[ 
https://issues.apache.org/jira/browse/AIRFLOW-1353?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16625000#comment-16625000
 ] 

ASF GitHub Bot commented on AIRFLOW-1353:
-----------------------------------------

Fokko closed pull request #2404: [AIRFLOW-1353] Standardizing string type in 
comments
URL: https://github.com/apache/incubator-airflow/pull/2404
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/airflow/contrib/hooks/bigquery_hook.py 
b/airflow/contrib/hooks/bigquery_hook.py
index cc769532e7..5cd0699962 100644
--- a/airflow/contrib/hooks/bigquery_hook.py
+++ b/airflow/contrib/hooks/bigquery_hook.py
@@ -84,11 +84,11 @@ def get_pandas_df(self, bql, parameters=None, 
dialect='legacy'):
         https://github.com/pydata/pandas/issues/6900
 
         :param bql: The BigQuery SQL to execute.
-        :type bql: string
+        :type bql: str
         :param parameters: The parameters to render the SQL query with (not 
used, leave to override superclass method)
         :type parameters: mapping or iterable
         :param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
-        :type dialect: string in {'legacy', 'standard'}, default 'legacy'
+        :type dialect: str in {'legacy', 'standard'}, default 'legacy'
         """
         service = self.get_service()
         project = self._get_field('project')
@@ -111,12 +111,12 @@ def table_exists(self, project_id, dataset_id, table_id):
 
         :param project_id: The Google cloud project in which to look for the 
table. The connection supplied to the hook
         must provide access to the specified project.
-        :type project_id: string
+        :type project_id: str
         :param dataset_id: The name of the dataset in which to look for the 
table.
             storage bucket.
-        :type dataset_id: string
+        :type dataset_id: str
         :param table_id: The name of the table to check the existence of.
-        :type table_id: string
+        :type table_id: str
         """
         service = self.get_service()
         try:
@@ -204,7 +204,7 @@ def run_query(
         For more details about these parameters.
 
         :param bql: The BigQuery SQL to execute.
-        :type bql: string
+        :type bql: str
         :param destination_dataset_table: The dotted <dataset>.<table>
             BigQuery table to save the query results.
         :param write_disposition: What to do if the table already exists in
@@ -262,18 +262,18 @@ def run_extract(  # noqa
 
         :param source_project_dataset_table: The dotted <dataset>.<table>
             BigQuery table to use as the source data.
-        :type source_project_dataset_table: string
+        :type source_project_dataset_table: str
         :param destination_cloud_storage_uris: The destination Google Cloud
             Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
             convention defined here:
             
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
         :type destination_cloud_storage_uris: list
         :param compression: Type of compression to use.
-        :type compression: string
+        :type compression: str
         :param export_format: File format to export.
-        :type export_format: string
+        :type export_format: str
         :param field_delimiter: The delimiter to use when extracting to a CSV.
-        :type field_delimiter: string
+        :type field_delimiter: str
         :param print_header: Whether to print a header for a CSV file extract.
         :type print_header: boolean
         """
@@ -325,11 +325,11 @@ def run_copy(self,
         :type source_project_dataset_tables: list|string
         :param destination_project_dataset_table: The destination BigQuery
             table. Format is: (project:|project.)<dataset>.<table>
-        :type destination_project_dataset_table: string
+        :type destination_project_dataset_table: str
         :param write_disposition: The write disposition if the table already 
exists.
-        :type write_disposition: string
+        :type write_disposition: str
         :param create_disposition: The create disposition if the table doesn't 
exist.
-        :type create_disposition: string
+        :type create_disposition: str
         """
         source_project_dataset_tables = (
             [source_project_dataset_tables]
@@ -388,7 +388,7 @@ def run_load(self,
             The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table 
to load
             data into. If <project> is not included, project will be the 
project defined
             in the connection json.
-        :type destination_project_dataset_table: string
+        :type destination_project_dataset_table: str
         :param schema_fields: The schema field list as defined here:
             
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
         :type schema_fields: list
@@ -397,15 +397,15 @@ def run_load(self,
             per-object name can be used.
         :type source_uris: list
         :param source_format: File format to export.
-        :type source_format: string
+        :type source_format: str
         :param create_disposition: The create disposition if the table doesn't 
exist.
-        :type create_disposition: string
+        :type create_disposition: str
         :param skip_leading_rows: Number of rows to skip when loading from a 
CSV.
         :type skip_leading_rows: int
         :param write_disposition: The write disposition if the table already 
exists.
-        :type write_disposition: string
+        :type write_disposition: str
         :param field_delimiter: The delimiter to use when loading from a CSV.
-        :type field_delimiter: string
+        :type field_delimiter: str
         :param max_bad_records: The maximum number of bad records that 
BigQuery can
             ignore when running the job.
         :type max_bad_records: int
@@ -756,7 +756,7 @@ def execute(self, operation, parameters=None):
         Executes a BigQuery query, and returns the job ID.
 
         :param operation: The query to execute.
-        :type operation: string
+        :type operation: str
         :param parameters: Parameters to substitute into the query.
         :type parameters: dict
         """
@@ -768,7 +768,7 @@ def executemany(self, operation, seq_of_parameters):
         Execute a BigQuery query multiple times with different parameters.
 
         :param operation: The query to execute.
-        :type operation: string
+        :type operation: str
         :param parameters: List of dictionary parameters to substitute into the
             query.
         :type parameters: list
diff --git a/airflow/contrib/hooks/databricks_hook.py 
b/airflow/contrib/hooks/databricks_hook.py
index 0cd5d0fb19..5ba01fbe67 100644
--- a/airflow/contrib/hooks/databricks_hook.py
+++ b/airflow/contrib/hooks/databricks_hook.py
@@ -45,7 +45,7 @@ def __init__(
             retry_limit=3):
         """
         :param databricks_conn_id: The name of the databricks connection to 
use.
-        :type databricks_conn_id: string
+        :type databricks_conn_id: str
         :param timeout_seconds: The amount of time in seconds the requests 
library
             will wait before timing-out.
         :type timeout_seconds: int
@@ -136,7 +136,7 @@ def submit_run(self, json):
         :param json: The data used in the body of the request to the 
``submit`` endpoint.
         :type json: dict
         :return: the run_id as a string
-        :rtype: string
+        :rtype: str
         """
         response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
         return response['run_id']
diff --git a/airflow/contrib/hooks/datadog_hook.py 
b/airflow/contrib/hooks/datadog_hook.py
index 212570142d..dc3ade0097 100644
--- a/airflow/contrib/hooks/datadog_hook.py
+++ b/airflow/contrib/hooks/datadog_hook.py
@@ -30,7 +30,7 @@ class DatadogHook(BaseHook):
     Airflow runs.
 
     :param datadog_conn_id: The connection to datadog, containing metadata for 
api keys.
-    :param datadog_conn_id: string
+    :param datadog_conn_id: str
     """
 
     def __init__(self, datadog_conn_id='datadog_default'):
@@ -65,7 +65,7 @@ def send_metric(self, metric_name, datapoint, tags=None):
         Sends a single datapoint metric to DataDog
 
         :param metric_name: The name of the metric
-        :type metric_name: string
+        :type metric_name: str
         :param datapoint: A single integer or float related to the metric
         :type datapoint: integer or float
         :param tags: A list of tags associated with the metric
@@ -89,7 +89,7 @@ def query_metric(self,
         and returns the results.
 
         :param query: The datadog query to execute (see datadog docs)
-        :type query: string
+        :type query: str
         :param from_seconds_ago: How many seconds ago to start querying for.
         :type from_seconds_ago: int
         :param to_seconds_ago: Up to how many seconds ago to query for.
@@ -112,16 +112,16 @@ def post_event(self, title, text, tags=None, 
alert_type=None, aggregation_key=No
         itself.
 
         :param title: The title of the event
-        :type title: string
+        :type title: str
         :param text: The body of the event (more information)
-        :type text: string
+        :type text: str
         :param tags: List of string tags to apply to the event
         :type tags: list
         :param alert_type: The alert type for the event, one of
             ["error", "warning", "info", "success"]
-        :type alert_type: string
+        :type alert_type: str
         :param aggregation_key: Key that can be used to aggregate this event 
in a stream
-        :type aggregation_key: string
+        :type aggregation_key: str
         """
         response = api.Event.create(
             title=title,
diff --git a/airflow/contrib/hooks/gcp_api_base_hook.py 
b/airflow/contrib/hooks/gcp_api_base_hook.py
index 2260e7b0e4..d88f865b0f 100644
--- a/airflow/contrib/hooks/gcp_api_base_hook.py
+++ b/airflow/contrib/hooks/gcp_api_base_hook.py
@@ -47,11 +47,11 @@ class GoogleCloudBaseHook(BaseHook):
     def __init__(self, conn_id, delegate_to=None):
         """
         :param conn_id: The connection ID to use when fetching connection info.
-        :type conn_id: string
+        :type conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         self.conn_id = conn_id
         self.delegate_to = delegate_to
diff --git a/airflow/contrib/hooks/gcs_hook.py 
b/airflow/contrib/hooks/gcs_hook.py
index b5f3edc845..1abe4b1883 100644
--- a/airflow/contrib/hooks/gcs_hook.py
+++ b/airflow/contrib/hooks/gcs_hook.py
@@ -54,12 +54,12 @@ def copy(self, source_bucket, source_object, 
destination_bucket=None,
         source bucket/object is used, but not both.
 
         :param bucket: The bucket of the object to copy from.
-        :type bucket: string
+        :type bucket: str
         :param object: The object to copy.
-        :type object: string
+        :type object: str
         :param destination_bucket: The destination of the object to copied to.
             Can be omitted; then the same bucket is used.
-        :type destination_bucket: string
+        :type destination_bucket: str
         :param destination_object: The (renamed) path of the object if given.
             Can be omitted; then the same name is used.
         """
@@ -95,11 +95,11 @@ def download(self, bucket, object, filename=False):
         Get a file from Google Cloud Storage.
 
         :param bucket: The bucket to fetch from.
-        :type bucket: string
+        :type bucket: str
         :param object: The object to fetch.
-        :type object: string
+        :type object: str
         :param filename: If set, a local file path where the file should be 
written to.
-        :type filename: string
+        :type filename: str
         """
         service = self.get_conn()
         downloaded_file_bytes = service \
@@ -121,13 +121,13 @@ def upload(self, bucket, object, filename, 
mime_type='application/octet-stream')
         Uploads a local file to Google Cloud Storage.
 
         :param bucket: The bucket to upload to.
-        :type bucket: string
+        :type bucket: str
         :param object: The object name to set when uploading the local file.
-        :type object: string
+        :type object: str
         :param filename: The local file path to the file to be uploaded.
-        :type filename: string
+        :type filename: str
         :param mime_type: The MIME type to set when uploading the file.
-        :type mime_type: string
+        :type mime_type: str
         """
         service = self.get_conn()
         media = MediaFileUpload(filename, mime_type)
@@ -142,10 +142,10 @@ def exists(self, bucket, object):
         Checks for the existence of a file in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         """
         service = self.get_conn()
         try:
@@ -165,10 +165,10 @@ def is_updated_after(self, bucket, object, ts):
         Checks if an object is updated in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         :param ts: The timestamp to check against.
         :type ts: datetime
         """
@@ -205,11 +205,11 @@ def delete(self, bucket, object, generation=None):
         parameter is used.
 
         :param bucket: name of the bucket, where the object resides
-        :type bucket: string
+        :type bucket: str
         :param object: name of the object to delete
-        :type object: string
+        :type object: str
         :param generation: if present, permanently delete the object of this 
generation
-        :type generation: string
+        :type generation: str
         :return: True if succeeded
         """
         service = self.get_conn()
@@ -230,13 +230,13 @@ def list(self, bucket, versions=None, maxResults=None, 
prefix=None):
         List all objects from the bucket with the give string prefix in name
 
         :param bucket: bucket name
-        :type bucket: string
+        :type bucket: str
         :param versions: if true, list all versions of the objects
         :type versions: boolean
         :param maxResults: max count of items to return in a single page of 
responses
         :type maxResults: integer
         :param prefix: prefix string which filters objects whose name begin 
with this prefix
-        :type prefix: string
+        :type prefix: str
         :return: a stream of object names matching the filtering criteria
         """
         service = self.get_conn()
diff --git a/airflow/contrib/hooks/jira_hook.py 
b/airflow/contrib/hooks/jira_hook.py
index 148101b72f..8ada15c1b4 100644
--- a/airflow/contrib/hooks/jira_hook.py
+++ b/airflow/contrib/hooks/jira_hook.py
@@ -26,7 +26,7 @@ class JiraHook(BaseHook):
     Jira interaction hook, a Wrapper around JIRA Python SDK.
 
     :param jira_conn_id: reference to a pre-defined Jira Connection
-    :type jira_conn_id: string
+    :type jira_conn_id: str
     """
 
     def __init__(self,
diff --git a/airflow/contrib/hooks/redis_hook.py 
b/airflow/contrib/hooks/redis_hook.py
index 936eff87a1..3bfcdfad19 100644
--- a/airflow/contrib/hooks/redis_hook.py
+++ b/airflow/contrib/hooks/redis_hook.py
@@ -87,6 +87,6 @@ def key_exists(self, key):
         Checks if a key exists in Redis database
 
         :param key: The key to check the existence.
-        :type key: string
+        :type key: str
         """
         return self.get_conn().exists(key)
diff --git a/airflow/contrib/operators/bigquery_check_operator.py 
b/airflow/contrib/operators/bigquery_check_operator.py
index 1ff803198b..7ce2bb7e0a 100644
--- a/airflow/contrib/operators/bigquery_check_operator.py
+++ b/airflow/contrib/operators/bigquery_check_operator.py
@@ -46,9 +46,9 @@ class BigQueryCheckOperator(CheckOperator):
     without stopping the progress of the DAG.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param bigquery_conn_id: reference to the BigQuery database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
@@ -71,7 +71,7 @@ class BigQueryValueCheckOperator(ValueCheckOperator):
     Performs a simple value check using sql code.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     """
 
     @apply_defaults
diff --git a/airflow/contrib/operators/bigquery_operator.py 
b/airflow/contrib/operators/bigquery_operator.py
index 2f3abe7de7..a385b1b362 100644
--- a/airflow/contrib/operators/bigquery_operator.py
+++ b/airflow/contrib/operators/bigquery_operator.py
@@ -30,13 +30,13 @@ class BigQueryOperator(BaseOperator):
     :param destination_dataset_table: A dotted
         (<project>.|<project>:)<dataset>.<table> that, if set, will store the 
results
         of the query.
-    :type destination_dataset_table: string
+    :type destination_dataset_table: str
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have 
domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param udf_config: The User Defined Function configuration for the query.
         See https://cloud.google.com/bigquery/user-defined-functions for 
details.
     :type udf_config: list
diff --git a/airflow/contrib/operators/bigquery_table_delete_operator.py 
b/airflow/contrib/operators/bigquery_table_delete_operator.py
index cd0c9dcf27..b9925ca811 100644
--- a/airflow/contrib/operators/bigquery_table_delete_operator.py
+++ b/airflow/contrib/operators/bigquery_table_delete_operator.py
@@ -26,13 +26,13 @@ class BigQueryTableDeleteOperator(BaseOperator):
     :param deletion_dataset_table: A dotted
         (<project>.|<project>:)<dataset>.<table> that indicates which table
         will be deleted.
-    :type deletion_dataset_table: string
+    :type deletion_dataset_table: str
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have 
domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param ignore_if_missing: if True, then return success even if the
         requested table does not exist.
     :type ignore_if_missing: boolean
diff --git a/airflow/contrib/operators/bigquery_to_bigquery.py 
b/airflow/contrib/operators/bigquery_to_bigquery.py
index 6f4843cb94..57ba636340 100644
--- a/airflow/contrib/operators/bigquery_to_bigquery.py
+++ b/airflow/contrib/operators/bigquery_to_bigquery.py
@@ -34,17 +34,17 @@ class BigQueryToBigQueryOperator(BaseOperator):
     :type source_project_dataset_tables: list|string
     :param destination_project_dataset_table: The destination BigQuery
         table. Format is: (project:|project.)<dataset>.<table>
-    :type destination_project_dataset_table: string
+    :type destination_project_dataset_table: str
     :param write_disposition: The write disposition if the table already 
exists.
-    :type write_disposition: string
+    :type write_disposition: str
     :param create_disposition: The create disposition if the table doesn't 
exist.
-    :type create_disposition: string
+    :type create_disposition: str
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have 
domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
     template_fields = ('source_project_dataset_tables',
                        'destination_project_dataset_table')
diff --git a/airflow/contrib/operators/bigquery_to_gcs.py 
b/airflow/contrib/operators/bigquery_to_gcs.py
index aaff462ba9..fd7acb9fdd 100644
--- a/airflow/contrib/operators/bigquery_to_gcs.py
+++ b/airflow/contrib/operators/bigquery_to_gcs.py
@@ -33,26 +33,26 @@ class BigQueryToCloudStorageOperator(BaseOperator):
         (<project>.|<project>:)<dataset>.<table> BigQuery table to use as the 
source
         data. If <project> is not included, project will be the project 
defined in
         the connection json.
-    :type source_project_dataset_table: string
+    :type source_project_dataset_table: str
     :param destination_cloud_storage_uris: The destination Google Cloud
         Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
         convention defined here:
         
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
     :type destination_cloud_storage_uris: list
     :param compression: Type of compression to use.
-    :type compression: string
+    :type compression: str
     :param export_format: File format to export.
-    :type field_delimiter: string
+    :type field_delimiter: str
     :param field_delimiter: The delimiter to use when extracting to a CSV.
-    :type field_delimiter: string
+    :type field_delimiter: str
     :param print_header: Whether to print a header for a CSV file extract.
     :type print_header: boolean
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have 
domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
     template_fields = ('source_project_dataset_table', 
'destination_cloud_storage_uris')
     template_ext = ('.sql',)
diff --git a/airflow/contrib/operators/cloudml_operator.py 
b/airflow/contrib/operators/cloudml_operator.py
index b0b6e91a17..805027d508 100644
--- a/airflow/contrib/operators/cloudml_operator.py
+++ b/airflow/contrib/operators/cloudml_operator.py
@@ -30,11 +30,11 @@ class CloudMLVersionOperator(BaseOperator):
 
     :param model_name: The name of the Google Cloud ML model that the version
         belongs to.
-    :type model_name: string
+    :type model_name: str
 
     :param project_name: The Google Cloud project name to which CloudML
         model belongs.
-    :type project_name: string
+    :type project_name: str
 
     :param version: A dictionary containing the information about the version.
         If the `operation` is `create`, `version` should contain all the
@@ -45,7 +45,7 @@ class CloudMLVersionOperator(BaseOperator):
     :type version: dict
 
     :param gcp_conn_id: The connection ID to use when fetching connection info.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
 
     :param operation: The operation to perform. Available operations are:
         'create': Creates a new version in the model specified by `model_name`,
@@ -64,12 +64,12 @@ class CloudMLVersionOperator(BaseOperator):
             model specified by `model_name`).
             The name of the version should be specified in the `version`
             parameter.
-     :type operation: string
+     :type operation: str
 
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
 
@@ -132,10 +132,10 @@ class CloudMLModelOperator(BaseOperator):
 
     :param project_name: The Google Cloud project name to which CloudML
         model belongs.
-    :type project_name: string
+    :type project_name: str
 
     :param gcp_conn_id: The connection ID to use when fetching connection info.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
 
     :param operation: The operation to perform. Available operations are:
         'create': Creates a new model as provided by the `model` parameter.
@@ -144,7 +144,7 @@ class CloudMLModelOperator(BaseOperator):
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = [
diff --git a/airflow/contrib/operators/databricks_operator.py 
b/airflow/contrib/operators/databricks_operator.py
index 9c995df76d..b9ab1e3bde 100644
--- a/airflow/contrib/operators/databricks_operator.py
+++ b/airflow/contrib/operators/databricks_operator.py
@@ -114,7 +114,7 @@ class DatabricksSubmitRunOperator(BaseOperator):
     :param existing_cluster_id: ID for existing cluster on which to run this 
task.
         *EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be 
specified.
         This field will be templated.
-    :type existing_cluster_id: string
+    :type existing_cluster_id: str
     :param libraries: Libraries which this run will use.
         This field will be templated.
 
@@ -125,14 +125,14 @@ class DatabricksSubmitRunOperator(BaseOperator):
         By default this will be set to the Airflow ``task_id``. This 
``task_id`` is a
         required parameter of the superclass ``BaseOperator``.
         This field will be templated.
-    :type run_name: string
+    :type run_name: str
     :param timeout_seconds: The timeout for this run. By default a value of 0 
is used
         which means to have no timeout.
         This field will be templated.
     :type timeout_seconds: int32
     :param databricks_conn_id: The name of the Airflow connection to use.
         By default and in the common case this will be ``databricks_default``.
-    :type databricks_conn_id: string
+    :type databricks_conn_id: str
     :param polling_period_seconds: Controls the rate which we poll for the 
result of
         this run. By default the operator will poll every 30 seconds.
     :type polling_period_seconds: int
diff --git a/airflow/contrib/operators/dataflow_operator.py 
b/airflow/contrib/operators/dataflow_operator.py
index c1dca246f0..78f60a47ab 100644
--- a/airflow/contrib/operators/dataflow_operator.py
+++ b/airflow/contrib/operators/dataflow_operator.py
@@ -84,18 +84,18 @@ def __init__(
         https://cloud.google.com/dataflow/pipelines/specifying-exec-params
 
         :param jar: The reference to a self executing DataFlow jar.
-        :type jar: string
+        :type jar: str
         :param dataflow_default_options: Map of default job options.
         :type dataflow_default_options: dict
         :param options: Map of job specific options.
         :type options: dict
         :param gcp_conn_id: The connection ID to use connecting to Google Cloud
         Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataFlowJavaOperator, self).__init__(*args, **kwargs)
 
@@ -147,7 +147,7 @@ def __init__(
 
         :param py_file: Reference to the python dataflow pipleline file, e.g.,
             /some/local/file/path/to/your/python/pipeline/file.py.
-        :type py_file: string
+        :type py_file: str
         :param py_options: Additional python options.
         :type pyt_options: list of strings, e.g., ["-m", "-v"].
         :param dataflow_default_options: Map of default job options.
@@ -156,11 +156,11 @@ def __init__(
         :type options: dict
         :param gcp_conn_id: The connection ID to use connecting to Google Cloud
             Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide  delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataFlowPythonOperator, self).__init__(*args, **kwargs)
 
@@ -207,9 +207,9 @@ def google_cloud_to_local(self, file_name):
         will be returned immediately.
 
         :param file_name: The full path of input file.
-        :type file_name: string
+        :type file_name: str
         :return: The full path of local file.
-        :type: string
+        :type: str
         """
         if not file_name.startswith('gs://'):
             return file_name
diff --git a/airflow/contrib/operators/dataproc_operator.py 
b/airflow/contrib/operators/dataproc_operator.py
index 3e006ac110..c5d324d53d 100644
--- a/airflow/contrib/operators/dataproc_operator.py
+++ b/airflow/contrib/operators/dataproc_operator.py
@@ -68,15 +68,15 @@ def __init__(self,
         
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters
 
         :param cluster_name: The name of the cluster to create
-        :type cluster_name: string
+        :type cluster_name: str
         :param project_id: The ID of the google cloud project in which
             to create the cluster
-        :type project_id: string
+        :type project_id: str
         :param num_workers: The # of workers to spin up
         :type num_workers: int
         :param storage_bucket: The storage bucket to use, setting to None lets 
dataproc
             generate a custom one for you
-        :type storage_bucket: string
+        :type storage_bucket: str
         :param init_actions_uris: List of GCS uri's containing
             dataproc initialization scripts
         :type init_actions_uris: list[string]
@@ -84,18 +84,18 @@ def __init__(self,
             to add to all instances
         :type metadata: dict
         :param image_version: the version of software inside the Dataproc 
cluster
-        :type image_version: string
+        :type image_version: str
         :param properties: dict of properties to set on
             config files (e.g. spark-defaults.conf), see
             https://cloud.google.com/dataproc/docs/reference/rest/v1/ \
             projects.regions.clusters#SoftwareConfig
         :type properties: dict
         :param master_machine_type: Compute engine machine type to use for the 
master node
-        :type master_machine_type: string
+        :type master_machine_type: str
         :param master_disk_size: Disk size for the master node
         :type int
         :param worker_machine_type:Compute engine machine type to use for the 
worker nodes
-        :type worker_machine_type: string
+        :type worker_machine_type: str
         :param worker_disk_size: Disk size for the worker nodes
         :type worker_disk_size: int
         :param num_preemptible_workers: The # of preemptible worker nodes to 
spin up
@@ -103,14 +103,14 @@ def __init__(self,
         :param labels: dict of labels to add to the cluster
         :type labels: dict
         :param zone: The zone where the cluster will be located
-        :type zone: string
+        :type zone: str
         :param region: leave as 'global', might become relevant in the future
         :param google_cloud_conn_id: The connection id to use when connecting 
to dataproc
-        :type google_cloud_conn_id: string
+        :type google_cloud_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataprocClusterCreateOperator, self).__init__(*args, **kwargs)
         self.google_cloud_conn_id = google_cloud_conn_id
@@ -302,18 +302,18 @@ def __init__(self,
         Delete a cluster on Google Cloud Dataproc.
 
         :param cluster_name: The name of the cluster to create
-        :type cluster_name: string
+        :type cluster_name: str
         :param project_id: The ID of the google cloud project in which
             the cluster runs
-        :type project_id: string
+        :type project_id: str
         :param region: leave as 'global', might become relevant in the future
-        :type region: string
+        :type region: str
         :param google_cloud_conn_id: The connection id to use when connecting 
to dataproc
-        :type google_cloud_conn_id: string
+        :type google_cloud_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataprocClusterDeleteOperator, self).__init__(*args, **kwargs)
         self.google_cloud_conn_id = google_cloud_conn_id
@@ -409,17 +409,17 @@ def __init__(
         
https://cloud.google.com/dataproc/reference/rest/v1/projects.regions.jobs
 
         :param query: The query or reference to the query file (pg or pig 
extension).
-        :type query: string
+        :type query: str
         :param query_uri: The uri of a pig script on Cloud Storage.
-        :type query_uri: string
+        :type query_uri: str
         :param variables: Map of named parameters for the query.
         :type variables: dict
         :param job_name: The job name used in the DataProc cluster. This name 
by default
             is the task_id appended with the execution data, but can be 
templated. The
             name will always be appended with a random number to avoid name 
clashes.
-        :type job_name: string
+        :type job_name: str
         :param dataproc_cluster: The id of the DataProc cluster.
-        :type dataproc_cluster: string
+        :type dataproc_cluster: str
         :param dataproc_pig_properties: Map for the Pig properties. Ideal to 
put in
             default arguments
         :type dataproc_pig_properties: dict
@@ -427,11 +427,11 @@ def __init__(
             UDFs and libs) and are ideal to put in default arguments.
         :type dataproc_pig_jars: list
         :param gcp_conn_id: The connection ID to use connecting to Google 
Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataProcPigOperator, self).__init__(*args, **kwargs)
         self.gcp_conn_id = gcp_conn_id
@@ -487,17 +487,17 @@ def __init__(
         Create a new DataProcHiveOperator.
 
         :param query: The query or reference to the query file (q extension).
-        :type query: string
+        :type query: str
         :param query_uri: The uri of a hive script on Cloud Storage.
-        :type query_uri: string
+        :type query_uri: str
         :param variables: Map of named parameters for the query.
         :type variables: dict
         :param job_name: The job name used in the DataProc cluster. This name 
by default
             is the task_id appended with the execution data, but can be 
templated. The
             name will always be appended with a random number to avoid name 
clashes.
-        :type job_name: string
+        :type job_name: str
         :param dataproc_cluster: The id of the DataProc cluster.
-        :type dataproc_cluster: string
+        :type dataproc_cluster: str
         :param dataproc_hive_properties: Map for the Pig properties. Ideal to 
put in
             default arguments
         :type dataproc_hive_properties: dict
@@ -505,11 +505,11 @@ def __init__(
             UDFs and libs) and are ideal to put in default arguments.
         :type dataproc_hive_jars: list
         :param gcp_conn_id: The connection ID to use connecting to Google 
Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataProcHiveOperator, self).__init__(*args, **kwargs)
         self.gcp_conn_id = gcp_conn_id
@@ -566,17 +566,17 @@ def __init__(
         Create a new DataProcSparkSqlOperator.
 
         :param query: The query or reference to the query file (q extension).
-        :type query: string
+        :type query: str
         :param query_uri: The uri of a spark sql script on Cloud Storage.
-        :type query_uri: string
+        :type query_uri: str
         :param variables: Map of named parameters for the query.
         :type variables: dict
         :param job_name: The job name used in the DataProc cluster. This name 
by default
             is the task_id appended with the execution data, but can be 
templated. The
             name will always be appended with a random number to avoid name 
clashes.
-        :type job_name: string
+        :type job_name: str
         :param dataproc_cluster: The id of the DataProc cluster.
-        :type dataproc_cluster: string
+        :type dataproc_cluster: str
         :param dataproc_spark_properties: Map for the Pig properties. Ideal to 
put in
             default arguments
         :type dataproc_spark_properties: dict
@@ -584,11 +584,11 @@ def __init__(
             for UDFs and libs) and are ideal to put in default arguments.
         :type dataproc_spark_jars: list
         :param gcp_conn_id: The connection ID to use connecting to Google 
Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataProcSparkSqlOperator, self).__init__(*args, **kwargs)
         self.gcp_conn_id = gcp_conn_id
@@ -648,10 +648,10 @@ def __init__(
 
         :param main_jar: URI of the job jar provisioned on Cloud Storage. (use 
this or
             the main_class, not both together).
-        :type main_jar: string
+        :type main_jar: str
         :param main_class: Name of the job class. (use this or the main_jar, 
not both
             together).
-        :type main_class: string
+        :type main_class: str
         :param arguments: Arguments for the job.
         :type arguments: list
         :param archives: List of archived files that will be unpacked in the 
work
@@ -662,9 +662,9 @@ def __init__(
         :param job_name: The job name used in the DataProc cluster. This name 
by default
             is the task_id appended with the execution data, but can be 
templated. The
             name will always be appended with a random number to avoid name 
clashes.
-        :type job_name: string
+        :type job_name: str
         :param dataproc_cluster: The id of the DataProc cluster.
-        :type dataproc_cluster: string
+        :type dataproc_cluster: str
         :param dataproc_spark_properties: Map for the Pig properties. Ideal to 
put in
             default arguments
         :type dataproc_spark_properties: dict
@@ -672,11 +672,11 @@ def __init__(
             for UDFs and libs) and are ideal to put in default arguments.
         :type dataproc_spark_jars: list
         :param gcp_conn_id: The connection ID to use connecting to Google 
Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataProcSparkOperator, self).__init__(*args, **kwargs)
         self.gcp_conn_id = gcp_conn_id
@@ -736,10 +736,10 @@ def __init__(
 
         :param main_jar: URI of the job jar provisioned on Cloud Storage. (use 
this or
             the main_class, not both together).
-        :type main_jar: string
+        :type main_jar: str
         :param main_class: Name of the job class. (use this or the main_jar, 
not both
             together).
-        :type main_class: string
+        :type main_class: str
         :param arguments: Arguments for the job.
         :type arguments: list
         :param archives: List of archived files that will be unpacked in the 
work
@@ -750,9 +750,9 @@ def __init__(
         :param job_name: The job name used in the DataProc cluster. This name 
by default
             is the task_id appended with the execution data, but can be 
templated. The
             name will always be appended with a random number to avoid name 
clashes.
-        :type job_name: string
+        :type job_name: str
         :param dataproc_cluster: The id of the DataProc cluster.
-        :type dataproc_cluster: string
+        :type dataproc_cluster: str
         :param dataproc_hadoop_properties: Map for the Pig properties. Ideal 
to put in
             default arguments
         :type dataproc_hadoop_properties: dict
@@ -760,11 +760,11 @@ def __init__(
             for UDFs and libs) and are ideal to put in default arguments.
         :type dataproc_hadoop_jars: list
         :param gcp_conn_id: The connection ID to use connecting to Google 
Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(DataProcHadoopOperator, self).__init__(*args, **kwargs)
         self.gcp_conn_id = gcp_conn_id
@@ -824,7 +824,7 @@ def __init__(
 
         :param main: [Required] The Hadoop Compatible Filesystem (HCFS) URI of 
the main
             Python file to use as the driver. Must be a .py file.
-        :type main: string
+        :type main: str
         :param arguments: Arguments for the job.
         :type arguments: list
         :param archives: List of archived files that will be unpacked in the 
work
@@ -838,9 +838,9 @@ def __init__(
         :param job_name: The job name used in the DataProc cluster. This name 
by default
             is the task_id appended with the execution data, but can be 
templated. The
             name will always be appended with a random number to avoid name 
clashes.
-        :type job_name: string
+        :type job_name: str
         :param dataproc_cluster: The id of the DataProc cluster.
-        :type dataproc_cluster: string
+        :type dataproc_cluster: str
         :param dataproc_pyspark_properties: Map for the Pig properties. Ideal 
to put in
             default arguments
         :type dataproc_pyspark_properties: dict
@@ -848,11 +848,11 @@ def __init__(
             for UDFs and libs) and are ideal to put in default arguments.
         :type dataproc_pyspark_jars: list
         :param gcp_conn_id: The connection ID to use connecting to Google 
Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
          """
         super(DataProcPySparkOperator, self).__init__(*args, **kwargs)
         self.gcp_conn_id = gcp_conn_id
diff --git a/airflow/contrib/operators/file_to_gcs.py 
b/airflow/contrib/operators/file_to_gcs.py
index 61497a57dd..67c331ca0b 100644
--- a/airflow/contrib/operators/file_to_gcs.py
+++ b/airflow/contrib/operators/file_to_gcs.py
@@ -23,17 +23,17 @@ class FileToGoogleCloudStorageOperator(BaseOperator):
     Uploads a file to Google Cloud Storage
 
     :param src: Path to the local file
-    :type src: string
+    :type src: str
     :param dst: Destination path within the specified bucket
-    :type dst: string
+    :type dst: str
     :param bucket: The bucket to upload to
-    :type bucket: string
+    :type bucket: str
     :param google_cloud_storage_conn_id: The Airflow connection ID to upload 
with
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param mime_type: The mime-type string
-    :type mime_type: string
+    :type mime_type: str
     :param delegate_to: The account to impersonate, if any
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     @apply_defaults
diff --git a/airflow/contrib/operators/fs_operator.py 
b/airflow/contrib/operators/fs_operator.py
index 259648709d..6d4bffd5eb 100644
--- a/airflow/contrib/operators/fs_operator.py
+++ b/airflow/contrib/operators/fs_operator.py
@@ -27,10 +27,10 @@ class FileSensor(BaseSensorOperator):
 
     :param fs_conn_id: reference to the File (path)
         connection id
-    :type fs_conn_id: string
+    :type fs_conn_id: str
     :param filepath: File or folder name (relative to
         the base path set within the connection)
-    :type fs_conn_id: string
+    :type fs_conn_id: str
     """
     template_fields = ('filepath',)
     ui_color = '#91818a'
diff --git a/airflow/contrib/operators/gcs_download_operator.py 
b/airflow/contrib/operators/gcs_download_operator.py
index c17f774c36..b71b629c2b 100644
--- a/airflow/contrib/operators/gcs_download_operator.py
+++ b/airflow/contrib/operators/gcs_download_operator.py
@@ -25,25 +25,25 @@ class GoogleCloudStorageDownloadOperator(BaseOperator):
     Downloads a file from Google Cloud Storage.
 
     :param bucket: The Google cloud storage bucket where the object is.
-    :type bucket: string
+    :type bucket: str
     :param object: The name of the object to download in the Google cloud
         storage bucket.
-    :type object: string
+    :type object: str
     :param filename: The file path on the local file system (where the
         operator is being executed) that the file should be downloaded to.
         If false, the downloaded data will not be stored on the local file
         system.
-    :type filename: string
+    :type filename: str
     :param store_to_xcom_key: If this param is set, the operator will push
         the contents of the downloaded file to XCom with the key set in this
         parameter. If false, the downloaded data will not be pushed to XCom.
-    :type store_to_xcom_key: string
+    :type store_to_xcom_key: str
     :param google_cloud_storage_conn_id: The connection ID to use when
         connecting to Google cloud storage.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have 
domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
     template_fields = ('bucket', 'object', 'filename', 'store_to_xcom_key',)
     ui_color = '#f0eee4'
diff --git a/airflow/contrib/operators/gcs_to_bq.py 
b/airflow/contrib/operators/gcs_to_bq.py
index 44cf7b64f6..d68e1bcd73 100644
--- a/airflow/contrib/operators/gcs_to_bq.py
+++ b/airflow/contrib/operators/gcs_to_bq.py
@@ -58,29 +58,29 @@ def __init__(
         Google cloud storage must be a JSON file with the schema fields in it.
 
         :param bucket: The bucket to load from.
-        :type bucket: string
+        :type bucket: str
         :param source_objects: List of Google cloud storage URIs to load from.
         :type object: list
         :param destination_project_dataset_table: The dotted 
(<project>.)<dataset>.<table>
             BigQuery table to load data into. If <project> is not included, 
project will
             be the project defined in the connection json.
-        :type destination_project_dataset_table: string
+        :type destination_project_dataset_table: str
         :param schema_fields: If set, the schema field list as defined here:
             
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
         :type schema_fields: list
         :param schema_object: If set, a GCS object path pointing to a .json 
file that
             contains the schema for the table.
-        :param schema_object: string
+        :param schema_object: str
         :param source_format: File format to export.
-        :type source_format: string
+        :type source_format: str
         :param create_disposition: The create disposition if the table doesn't 
exist.
-        :type create_disposition: string
+        :type create_disposition: str
         :param skip_leading_rows: Number of rows to skip when loading from a 
CSV.
         :type skip_leading_rows: int
         :param write_disposition: The write disposition if the table already 
exists.
-        :type write_disposition: string
+        :type write_disposition: str
         :param field_delimiter: The delimiter to use when loading from a CSV.
-        :type field_delimiter: string
+        :type field_delimiter: str
         :param max_bad_records: The maximum number of bad records that 
BigQuery can
             ignore when running the job.
         :type max_bad_records: int
@@ -90,16 +90,16 @@ def __init__(
             execute() command, which in turn gets stored in XCom for future
             operators to use. This can be helpful with incremental 
loads--during
             future executions, you can pick up from the max ID.
-        :type max_id_key: string
+        :type max_id_key: str
         :param bigquery_conn_id: Reference to a specific BigQuery hook.
-        :type bigquery_conn_id: string
+        :type bigquery_conn_id: str
         :param google_cloud_storage_conn_id: Reference to a specific Google
             cloud storage hook.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any. For this to
             work, the service account making the request must have domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         :param schema_update_options: Allows the schema of the desitination 
             table to be updated as a side effect of the load job.
         :type schema_update_options: list
diff --git a/airflow/contrib/operators/mysql_to_gcs.py 
b/airflow/contrib/operators/mysql_to_gcs.py
index 1a2312f414..6b6a574ae2 100644
--- a/airflow/contrib/operators/mysql_to_gcs.py
+++ b/airflow/contrib/operators/mysql_to_gcs.py
@@ -49,18 +49,18 @@ def __init__(self,
                  **kwargs):
         """
         :param sql: The SQL to execute on the MySQL table.
-        :type sql: string
+        :type sql: str
         :param bucket: The bucket to upload to.
-        :type bucket: string
+        :type bucket: str
         :param filename: The filename to use as the object name when uploading
             to Google cloud storage. A {} should be specified in the filename
             to allow the operator to inject file numbers in cases where the
             file is split due to size.
-        :type filename: string
+        :type filename: str
         :param schema_filename: If set, the filename to use as the object name
             when uploading a .json file containing the BigQuery schema fields
             for the table that was dumped from MySQL.
-        :type schema_filename: string
+        :type schema_filename: str
         :param approx_max_file_size_bytes: This operator supports the ability
             to split large table dumps into multiple files (see notes in the
             filenamed param docs above). Google cloud storage allows for files
@@ -68,10 +68,10 @@ def __init__(self,
             file size of the splits.
         :type approx_max_file_size_bytes: long
         :param mysql_conn_id: Reference to a specific MySQL hook.
-        :type mysql_conn_id: string
+        :type mysql_conn_id: str
         :param google_cloud_storage_conn_id: Reference to a specific Google
             cloud storage hook.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any. For this to
             work, the service account making the request must have domain-wide
             delegation enabled.
diff --git a/airflow/contrib/operators/ssh_execute_operator.py 
b/airflow/contrib/operators/ssh_execute_operator.py
index 3bd8f09fdf..ac5aeb9145 100644
--- a/airflow/contrib/operators/ssh_execute_operator.py
+++ b/airflow/contrib/operators/ssh_execute_operator.py
@@ -39,9 +39,9 @@ class SSHTempFileContent(object):
     :param ssh_hook: A SSHHook that indicates a remote host
                      where you want to create tempfile
     :param content: Initial content of creating temporary file
-    :type content: string
+    :type content: str
     :param prefix: The prefix string you want to use for the temporary file
-    :type prefix: string
+    :type prefix: str
     """
 
     def __init__(self, ssh_hook, content, prefix="tmp"):
@@ -91,10 +91,10 @@ class SSHExecuteOperator(BaseOperator):
 
     :param ssh_hook: A SSHHook that indicates the remote host
                      you want to run the script
-    :type ssh_hook: string
+    :type ssh_hook: str
     :param bash_command: The command, set of commands or reference to a
         bash script (must be '.sh') to be executed.
-    :type bash_command: string
+    :type bash_command: str
     :param env: If env is not None, it must be a mapping that defines the
         environment variables for the new process; these are used instead
         of inheriting the current process environment, which is the default
diff --git a/airflow/contrib/operators/vertica_operator.py 
b/airflow/contrib/operators/vertica_operator.py
index 9266563973..05f545d71d 100644
--- a/airflow/contrib/operators/vertica_operator.py
+++ b/airflow/contrib/operators/vertica_operator.py
@@ -24,7 +24,7 @@ class VerticaOperator(BaseOperator):
     Executes sql code in a specific Vertica database
 
     :param vertica_conn_id: reference to a specific Vertica database
-    :type vertica_conn_id: string
+    :type vertica_conn_id: str
     :param sql: the sql code to be executed
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
diff --git a/airflow/contrib/sensors/bigquery_sensor.py 
b/airflow/contrib/sensors/bigquery_sensor.py
index 8a8ca62a11..43495ea02d 100644
--- a/airflow/contrib/sensors/bigquery_sensor.py
+++ b/airflow/contrib/sensors/bigquery_sensor.py
@@ -41,17 +41,17 @@ def __init__(
 
         :param project_id: The Google cloud project in which to look for the 
table. The connection supplied to the hook
         must provide access to the specified project.
-        :type project_id: string
+        :type project_id: str
         :param dataset_id: The name of the dataset in which to look for the 
table.
             storage bucket.
-        :type dataset_id: string
+        :type dataset_id: str
         :param table_id: The name of the table to check the existence of.
-        :type table_id: string
+        :type table_id: str
         :param bigquery_conn_id: The connection ID to use when connecting to 
Google BigQuery.
-        :type bigquery_conn_id: string
+        :type bigquery_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(BigQueryTableSensor, self).__init__(*args, **kwargs)
         self.project_id = project_id
diff --git a/airflow/contrib/sensors/datadog_sensor.py 
b/airflow/contrib/sensors/datadog_sensor.py
index d8660f7045..34c7a70e0b 100644
--- a/airflow/contrib/sensors/datadog_sensor.py
+++ b/airflow/contrib/sensors/datadog_sensor.py
@@ -30,7 +30,7 @@ class DatadogSensor(BaseSensorOperator):
     Airflow runs.
 
     :param datadog_conn_id: The connection to datadog, containing metadata for 
api keys.
-    :param datadog_conn_id: string
+    :param datadog_conn_id: str
     """
     ui_color = '#66c3dd'
 
diff --git a/airflow/contrib/sensors/emr_job_flow_sensor.py 
b/airflow/contrib/sensors/emr_job_flow_sensor.py
index 662b3b873d..c3c37ceb21 100644
--- a/airflow/contrib/sensors/emr_job_flow_sensor.py
+++ b/airflow/contrib/sensors/emr_job_flow_sensor.py
@@ -26,7 +26,7 @@ class EmrJobFlowSensor(EmrBaseSensor):
     If it fails the sensor errors, failing the task.
 
     :param job_flow_id: job_flow_id to check the state of
-    :type job_flow_id: string
+    :type job_flow_id: str
     """
 
     NON_TERMINAL_STATES = ['STARTING', 'BOOTSTRAPPING', 'RUNNING', 'WAITING', 
'TERMINATING']
diff --git a/airflow/contrib/sensors/emr_step_sensor.py 
b/airflow/contrib/sensors/emr_step_sensor.py
index 4cc6bc4508..cc154b94d2 100644
--- a/airflow/contrib/sensors/emr_step_sensor.py
+++ b/airflow/contrib/sensors/emr_step_sensor.py
@@ -25,9 +25,9 @@ class EmrStepSensor(EmrBaseSensor):
     If it fails the sensor errors, failing the task.
 
     :param job_flow_id: job_flow_idwhich contains the step check the state of
-    :type job_flow_id: string
+    :type job_flow_id: str
     :param step_id: step to check the state of
-    :type step_id: string
+    :type step_id: str
     """
 
     NON_TERMINAL_STATES = ['PENDING', 'RUNNING', 'CONTINUE']
diff --git a/airflow/contrib/sensors/gcs_sensor.py 
b/airflow/contrib/sensors/gcs_sensor.py
index c9d741ba3f..c1df68b4ea 100644
--- a/airflow/contrib/sensors/gcs_sensor.py
+++ b/airflow/contrib/sensors/gcs_sensor.py
@@ -39,16 +39,16 @@ def __init__(
         Create a new GoogleCloudStorageObjectSensor.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         :param google_cloud_storage_conn_id: The connection ID to use when
             connecting to Google cloud storage.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(GoogleCloudStorageObjectSensor, self).__init__(*args, **kwargs)
         self.bucket = bucket
@@ -95,21 +95,21 @@ def __init__(
         Create a new GoogleCloudStorageObjectUpdatedSensor.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to download in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         :param ts_func: Callback for defining the update condition. The 
default callback
             returns execution_date + schedule_interval. The callback takes the 
context
             as parameter.
         :type ts_func: function
         :param google_cloud_storage_conn_id: The connection ID to use when
             connecting to Google cloud storage.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have 
domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(GoogleCloudStorageObjectUpdatedSensor, self).__init__(*args, 
**kwargs)
         self.bucket = bucket
diff --git a/airflow/contrib/sensors/redis_key_sensor.py 
b/airflow/contrib/sensors/redis_key_sensor.py
index 4cab4072b7..7e7396e489 100644
--- a/airflow/contrib/sensors/redis_key_sensor.py
+++ b/airflow/contrib/sensors/redis_key_sensor.py
@@ -32,9 +32,9 @@ def __init__(self, key, redis_conn_id, *args, **kwargs):
         Create a new RedisKeySensor
 
         :param key: The key to be monitored
-        :type key: string
+        :type key: str
         :param redis_conn_id: The connection ID to use when connecting to 
Redis DB.
-        :type redis_conn_id: string
+        :type redis_conn_id: str
         """
         super(RedisKeySensor, self).__init__(*args, **kwargs)
         self.logger = logging.getLogger(__name__)
diff --git a/airflow/hooks/S3_hook.py b/airflow/hooks/S3_hook.py
index caaa575cc4..1b8431760a 100644
--- a/airflow/hooks/S3_hook.py
+++ b/airflow/hooks/S3_hook.py
@@ -403,7 +403,7 @@ def load_string(self, string_data,
         boto infrastructure to ship a file to s3. It is currently using only
         a single part download, and should not be used to move large files.
 
-        :param string_data: string to set as content for the key.
+        :param string_data: str to set as content for the key.
         :type string_data: str
         :param key: S3 key that will point to the file
         :type key: str
diff --git a/airflow/hooks/hdfs_hook.py b/airflow/hooks/hdfs_hook.py
index 549b60973b..7414289392 100644
--- a/airflow/hooks/hdfs_hook.py
+++ b/airflow/hooks/hdfs_hook.py
@@ -33,9 +33,9 @@ class HDFSHook(BaseHook):
     Interact with HDFS. This class is a wrapper around the snakebite library.
 
     :param hdfs_conn_id: Connection id to fetch connection info
-    :type conn_id: string
+    :type conn_id: str
     :param proxy_user: effective user for HDFS operations
-    :type proxy_user: string
+    :type proxy_user: str
     :param autoconfig: use snakebite's automatically configured client
     :type autoconfig: bool
     """
diff --git a/airflow/hooks/hive_hooks.py b/airflow/hooks/hive_hooks.py
index 3e7d2db6f3..d6b4ea49c5 100644
--- a/airflow/hooks/hive_hooks.py
+++ b/airflow/hooks/hive_hooks.py
@@ -55,13 +55,13 @@ class HiveCliHook(BaseHook):
     connection string as is.
 
     :param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
-    :type  mapred_queue: string
+    :type  mapred_queue: str
     :param mapred_queue_priority: priority within the job queue.
         Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
-    :type  mapred_queue_priority: string
+    :type  mapred_queue_priority: str
     :param mapred_job_name: This name will appear in the jobtracker.
         This can make monitoring easier.
-    :type  mapred_job_name: string
+    :type  mapred_job_name: str
     """
 
     def __init__(
@@ -295,7 +295,7 @@ def load_df(
         :type recreate: bool
         :param field_dict: mapping from column name to hive data type
         :type field_dict: dict
-        :param encoding: string encoding to use when writing DataFrame to file
+        :param encoding: str encoding to use when writing DataFrame to file
         :type encoding: str
         :param pandas_kwargs: passed to DataFrame.to_csv
         :type pandas_kwargs: dict
@@ -471,12 +471,12 @@ def check_for_partition(self, schema, table, partition):
         Checks whether a partition exists
 
         :param schema: Name of hive schema (database) @table belongs to
-        :type schema: string
+        :type schema: str
         :param table: Name of hive table @partition belongs to
-        :type schema: string
+        :type schema: str
         :partition: Expression that matches the partitions to check for
             (eg `a = 'b' AND c = 'd'`)
-        :type schema: string
+        :type schema: str
         :rtype: boolean
 
         >>> hh = HiveMetastoreHook()
@@ -498,11 +498,11 @@ def check_for_named_partition(self, schema, table, 
partition_name):
         Checks whether a partition with a given name exists
 
         :param schema: Name of hive schema (database) @table belongs to
-        :type schema: string
+        :type schema: str
         :param table: Name of hive table @partition belongs to
-        :type schema: string
+        :type schema: str
         :partition: Name of the partitions to check for (eg `a=b/c=d`)
-        :type schema: string
+        :type schema: str
         :rtype: boolean
 
         >>> hh = HiveMetastoreHook()
diff --git a/airflow/hooks/jdbc_hook.py b/airflow/hooks/jdbc_hook.py
index bc1f352ecc..3a3cd31c80 100644
--- a/airflow/hooks/jdbc_hook.py
+++ b/airflow/hooks/jdbc_hook.py
@@ -28,15 +28,15 @@ class JdbcHook(DbApiHook):
     Otherwise host, port, schema, username and password can be specified on 
the fly.
 
     :param jdbc_url: jdbc connection url
-    :type jdbc_url: string
+    :type jdbc_url: str
     :param jdbc_driver_name: jdbc driver name
-    :type jdbc_driver_name: string
+    :type jdbc_driver_name: str
     :param jdbc_driver_loc: path to jdbc driver
-    :type jdbc_driver_loc: string
+    :type jdbc_driver_loc: str
     :param conn_id: reference to a predefined database
-    :type conn_id: string
+    :type conn_id: str
     :param sql: the sql code to be executed
-    :type sql: string or string pointing to a template file. File must have
+    :type sql: str or str pointing to a template file. File must have
         a '.sql' extensions.
     """
 
diff --git a/airflow/macros/hive.py b/airflow/macros/hive.py
index c68c2939c7..969ef23a94 100644
--- a/airflow/macros/hive.py
+++ b/airflow/macros/hive.py
@@ -22,17 +22,17 @@ def max_partition(
     Gets the max partition for a table.
 
     :param schema: The hive schema the table lives in
-    :type schema: string
+    :type schema: str
     :param table: The hive table you are interested in, supports the dot
         notation as in "my_database.my_table", if a dot is found,
         the schema param is disregarded
-    :type table: string
+    :type table: str
     :param hive_conn_id: The hive connection you are interested in.
         If your default is set you don't need to use this parameter.
-    :type hive_conn_id: string
+    :type hive_conn_id: str
     :param filter: filter on a subset of partition as in
         `sub_part='specific_value'`
-    :type filter: string
+    :type filter: str
     :param field: the field to get the max value from. If there's only
         one partition field, this will be inferred
 
diff --git a/airflow/models.py b/airflow/models.py
index 8566b7f108..10f2be82c3 100755
--- a/airflow/models.py
+++ b/airflow/models.py
@@ -1414,7 +1414,7 @@ def signal_handler(signum, frame):
                     result = task_copy.execute(context=context)
 
                 # If the task returns a result, push an XCom containing it
-                if result is not None:
+                if result:
                     self.xcom_push(key=XCOM_RETURN_KEY, value=result)
 
                 # TODO remove deprecated behavior in Airflow 2.0
@@ -1664,7 +1664,7 @@ def xcom_push(
         Make an XCom available for tasks to pull.
 
         :param key: A key for the XCom
-        :type key: string
+        :type key: str
         :param value: A value for the XCom. The value is pickled and stored
             in the database.
         :type value: any pickleable object
@@ -1710,13 +1710,13 @@ def xcom_pull(
             available as a constant XCOM_RETURN_KEY. This key is automatically
             given to XComs returned by tasks (as opposed to being pushed
             manually). To remove the filter, pass key=None.
-        :type key: string
+        :type key: str
         :param task_ids: Only XComs from tasks with matching ids will be
             pulled. Can pass None to remove the filter.
-        :type task_ids: string or iterable of strings (representing task_ids)
+        :type task_ids: str or iterable of strings (representing task_ids)
         :param dag_id: If provided, only pulls XComs from this DAG.
             If None (default), the DAG of the calling task is used.
-        :type dag_id: string
+        :type dag_id: str
         :param include_prior_dates: If False, only XComs from the current
             execution_date are returned. If True, XComs from previous dates
             are returned as well.
@@ -1865,9 +1865,9 @@ class derived from this one results in the creation of a 
task object,
     be set by using the set_upstream and/or set_downstream methods.
 
     :param task_id: a unique, meaningful id for the task
-    :type task_id: string
+    :type task_id: str
     :param owner: the owner of the task, using the unix username is recommended
-    :type owner: string
+    :type owner: str
     :param retries: the number of retries that should be performed before
         failing the task
     :type retries: int
@@ -1945,6 +1945,7 @@ class derived from this one results in the creation of a 
task object,
     :type on_failure_callback: callable
     :param on_retry_callback: much like the ``on_failure_callback`` except
         that it is executed when retries occur.
+    :type on_retry_callback: callable
     :param on_success_callback: much like the ``on_failure_callback`` except
         that it is executed when the task succeeds.
     :type on_success_callback: callable
@@ -2659,9 +2660,9 @@ class DAG(BaseDag, LoggingMixin):
     added once to a DAG.
 
     :param dag_id: The id of the DAG
-    :type dag_id: string
+    :type dag_id: str
     :param description: The description for the DAG to e.g. be shown on the 
webserver
-    :type description: string
+    :type description: str
     :param schedule_interval: Defines how often that DAG runs, this
         timedelta object gets added to your latest task instance's
         execution_date to figure out the next schedule
@@ -2678,7 +2679,7 @@ class DAG(BaseDag, LoggingMixin):
         defines where jinja will look for your templates. Order matters.
         Note that jinja/airflow includes the path of your DAG file by
         default
-    :type template_searchpath: string or list of stings
+    :type template_searchpath: str or list of stings
     :param user_defined_macros: a dictionary of macros that will be exposed
         in your jinja templates. For example, passing ``dict(foo='bar')``
         to this argument allows you to ``{{ foo }}`` in all jinja
@@ -2716,9 +2717,9 @@ class DAG(BaseDag, LoggingMixin):
         timeouts.
     :type sla_miss_callback: types.FunctionType
     :param default_view: Specify DAG default view (tree, graph, duration, 
gantt, landing_times)
-    :type default_view: string
+    :type default_view: str
     :param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT)
-    :type orientation: string
+    :type orientation: str
     :param catchup: Perform scheduler catchup (or only run latest)? Defaults 
to True
     :type catchup: bool
     """
@@ -3398,7 +3399,7 @@ def add_tasks(self, tasks):
         """
         Add a list of tasks to the DAG
 
-        :param tasks: a lit of tasks you want to add
+        :param tasks: a list of tasks you want to add
         :type tasks: list of tasks
         """
         for task in tasks:
@@ -3470,7 +3471,7 @@ def create_dagrun(self,
         Returns the dag run.
 
         :param run_id: defines the the run id for this dag run
-        :type run_id: string
+        :type run_id: str
         :param execution_date: the execution date of this dag run
         :type execution_date: datetime
         :param state: the state of the dag run
@@ -3599,7 +3600,7 @@ def get_num_task_instances(dag_id, task_ids, states=None, 
session=None):
         qry = session.query(func.count(TaskInstance.task_id)).filter(
             TaskInstance.dag_id == dag_id,
             TaskInstance.task_id.in_(task_ids))
-        if states is not None:
+        if states:
             if None in states:
                 qry = qry.filter(or_(
                     TaskInstance.state.in_(states),
@@ -3711,12 +3712,13 @@ def setdefault(cls, key, default, 
deserialize_json=False):
         for a key, and if it isn't there, stores the default value and returns 
it.
 
         :param key: Dict key for this Variable
-        :type key: String
+        :type key: str
         :param default: Default value to set and return if the variable
         isn't already in the DB
         :type default: Mixed
         :param deserialize_json: Store this as a JSON encoded value in the DB
          and un-encode it when retrieving a value
+        :type deserialize_json: bool
         :return: Mixed
         """
         default_sentinel = object()
@@ -4106,7 +4108,7 @@ def find(dag_id=None, run_id=None, execution_date=None,
         :param dag_id: the dag_id to find dag runs for
         :type dag_id: integer, list
         :param run_id: defines the the run id for this dag run
-        :type run_id: string
+        :type run_id: str
         :param execution_date: the execution date
         :type execution_date: datetime
         :param state: the state of the dag run
diff --git a/airflow/operators/bash_operator.py 
b/airflow/operators/bash_operator.py
index 3146cd6cfb..3d95099f59 100644
--- a/airflow/operators/bash_operator.py
+++ b/airflow/operators/bash_operator.py
@@ -32,7 +32,7 @@ class BashOperator(BaseOperator):
 
     :param bash_command: The command, set of commands or reference to a
         bash script (must be '.sh') to be executed.
-    :type bash_command: string
+    :type bash_command: str
     :param xcom_push: If xcom_push is True, the last line written to stdout
         will also be pushed to an XCom when the bash command completes.
     :type xcom_push: bool
@@ -41,7 +41,8 @@ class BashOperator(BaseOperator):
         of inheriting the current process environment, which is the default
         behavior. (templated)
     :type env: dict
-    :type output_encoding: output encoding of bash command
+    :param output_encoding: Output encoding of bash command
+    :type output_encoding: str
     """
     template_fields = ('bash_command', 'env')
     template_ext = ('.sh', '.bash',)
@@ -105,4 +106,3 @@ def execute(self, context):
     def on_kill(self):
         logging.info('Sending SIGTERM signal to bash process group')
         os.killpg(os.getpgid(self.sp.pid), signal.SIGTERM)
-
diff --git a/airflow/operators/check_operator.py 
b/airflow/operators/check_operator.py
index 139e451c14..17a6ecec5f 100644
--- a/airflow/operators/check_operator.py
+++ b/airflow/operators/check_operator.py
@@ -55,7 +55,7 @@ class CheckOperator(BaseOperator):
     single record from an external source.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     """
 
     template_fields = ('sql',)
@@ -110,7 +110,7 @@ class ValueCheckOperator(BaseOperator):
     single record from an external source.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     """
 
     __mapper_args__ = {
diff --git a/airflow/operators/email_operator.py 
b/airflow/operators/email_operator.py
index 5167a7a7b1..5bf56a997a 100644
--- a/airflow/operators/email_operator.py
+++ b/airflow/operators/email_operator.py
@@ -24,10 +24,10 @@ class EmailOperator(BaseOperator):
     :param to: list of emails to send the email to
     :type to: list or string (comma or semicolon delimited)
     :param subject: subject line for the email (templated)
-    :type subject: string
+    :type subject: str
     :param html_content: content of the email (templated), html markup
         is allowed
-    :type html_content: string
+    :type html_content: str
     :param files: file names to attach in email
     :type files: list
     :param cc: list of recipients to be added in CC field
diff --git a/airflow/operators/hive_operator.py 
b/airflow/operators/hive_operator.py
index 06a83e3768..b29bdcfaf0 100644
--- a/airflow/operators/hive_operator.py
+++ b/airflow/operators/hive_operator.py
@@ -26,9 +26,9 @@ class HiveOperator(BaseOperator):
     Executes hql code in a specific Hive database.
 
     :param hql: the hql to be executed
-    :type hql: string
+    :type hql: str
     :param hive_cli_conn_id: reference to the Hive database
-    :type hive_cli_conn_id: string
+    :type hive_cli_conn_id: str
     :param hiveconf_jinja_translate: when True, hiveconf-type templating
         ${var} gets translated into jinja-type templating {{ var }}. Note that
         you may want to use this along with the
@@ -39,13 +39,13 @@ class HiveOperator(BaseOperator):
         part of the script before the first occurrence of `script_begin_tag`
     :type script_begin_tag: str
     :param mapred_queue: queue used by the Hadoop CapacityScheduler
-    :type  mapred_queue: string
+    :type  mapred_queue: str
     :param mapred_queue_priority: priority within CapacityScheduler queue.
         Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
-    :type  mapred_queue_priority: string
+    :type  mapred_queue_priority: str
     :param mapred_job_name: This name will appear in the jobtracker.
         This can make monitoring easier.
-    :type  mapred_job_name: string
+    :type  mapred_job_name: str
     """
 
     template_fields = ('hql', 'schema')
diff --git a/airflow/operators/hive_to_samba_operator.py 
b/airflow/operators/hive_to_samba_operator.py
index 8f18dd9434..c27dd65c68 100644
--- a/airflow/operators/hive_to_samba_operator.py
+++ b/airflow/operators/hive_to_samba_operator.py
@@ -27,11 +27,11 @@ class Hive2SambaOperator(BaseOperator):
     results of the query as a csv to a Samba location.
 
     :param hql: the hql to be exported
-    :type hql: string
+    :type hql: str
     :param hiveserver2_conn_id: reference to the hiveserver2 service
-    :type hiveserver2_conn_id: string
+    :type hiveserver2_conn_id: str
     :param samba_conn_id: reference to the samba destination
-    :type samba_conn_id: string
+    :type samba_conn_id: str
     """
 
     template_fields = ('hql', 'destination_filepath')
diff --git a/airflow/operators/http_operator.py 
b/airflow/operators/http_operator.py
index 9884566a94..a36c1158b6 100644
--- a/airflow/operators/http_operator.py
+++ b/airflow/operators/http_operator.py
@@ -25,11 +25,11 @@ class SimpleHttpOperator(BaseOperator):
     Calls an endpoint on an HTTP system to execute an action
 
     :param http_conn_id: The connection to run the sensor against
-    :type http_conn_id: string
+    :type http_conn_id: str
     :param endpoint: The relative part of the full url
-    :type endpoint: string
+    :type endpoint: str
     :param method: The HTTP method to use, default = "POST"
-    :type method: string
+    :type method: str
     :param data: The data to pass. POST-data in POST/PUT and params
         in the URL for a GET request.
     :type data: For POST/PUT, depends on the content-type parameter,
diff --git a/airflow/operators/jdbc_operator.py 
b/airflow/operators/jdbc_operator.py
index 28977db0d0..efdeab7408 100644
--- a/airflow/operators/jdbc_operator.py
+++ b/airflow/operators/jdbc_operator.py
@@ -29,14 +29,14 @@ class JdbcOperator(BaseOperator):
 
     :param jdbc_url: driver specific connection url with string variables, 
e.g. for exasol jdbc:exa:{0}:{1};schema={2}
     Template vars are defined like this: {0} = hostname, {1} = port, {2} = 
dbschema, {3} = extra
-    :type jdbc_url: string
+    :type jdbc_url: str
     :param jdbc_driver_name: classname of the specific jdbc driver, for exasol 
com.exasol.jdbc.EXADriver
-    :type jdbc_driver_name: string
+    :type jdbc_driver_name: str
     :param jdbc_driver_loc: absolute path to jdbc driver location, for example 
/var/exasol/exajdbc.jar
-    :type jdbc_driver_loc: string
+    :type jdbc_driver_loc: str
 
     :param conn_id: reference to a predefined database
-    :type conn_id: string
+    :type conn_id: str
     :param sql: the sql code to be executed
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
diff --git a/airflow/operators/mssql_operator.py 
b/airflow/operators/mssql_operator.py
index 9ae2fffe45..7362948e2e 100644
--- a/airflow/operators/mssql_operator.py
+++ b/airflow/operators/mssql_operator.py
@@ -24,11 +24,11 @@ class MsSqlOperator(BaseOperator):
     Executes sql code in a specific Microsoft SQL database
 
     :param mssql_conn_id: reference to a specific mssql database
-    :type mssql_conn_id: string
+    :type mssql_conn_id: str
     :param sql: the sql code to be executed
-    :type sql: string or string pointing to a template file with .sql extension
+    :type sql: str pointing to a template file with .sql extension
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/operators/mysql_operator.py 
b/airflow/operators/mysql_operator.py
index 156ada8e90..1567e05590 100644
--- a/airflow/operators/mysql_operator.py
+++ b/airflow/operators/mysql_operator.py
@@ -24,13 +24,13 @@ class MySqlOperator(BaseOperator):
     Executes sql code in a specific MySQL database
 
     :param mysql_conn_id: reference to a specific mysql database
-    :type mysql_conn_id: string
+    :type mysql_conn_id: str
     :param sql: the sql code to be executed
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
         Template reference are recognized by str ending in '.sql'
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/operators/oracle_operator.py 
b/airflow/operators/oracle_operator.py
index ab7bdb2f34..80a574db4a 100644
--- a/airflow/operators/oracle_operator.py
+++ b/airflow/operators/oracle_operator.py
@@ -23,7 +23,7 @@ class OracleOperator(BaseOperator):
     """
     Executes sql code in a specific Oracle database
     :param oracle_conn_id: reference to a specific Oracle database
-    :type oracle_conn_id: string
+    :type oracle_conn_id: str
     :param sql: the sql code to be executed
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
diff --git a/airflow/operators/pig_operator.py 
b/airflow/operators/pig_operator.py
index 4a21eccbba..b605abd887 100644
--- a/airflow/operators/pig_operator.py
+++ b/airflow/operators/pig_operator.py
@@ -25,9 +25,9 @@ class PigOperator(BaseOperator):
     Executes pig script.
 
     :param pig: the pig latin script to be executed
-    :type pig: string
+    :type pig: str
     :param pig_cli_conn_id: reference to the Hive database
-    :type pig_cli_conn_id: string
+    :type pig_cli_conn_id: str
     :param pigparams_jinja_translate: when True, pig params-type templating
         ${var} gets translated into jinja-type templating {{ var }}. Note that
         you may want to use this along with the
diff --git a/airflow/operators/postgres_operator.py 
b/airflow/operators/postgres_operator.py
index 0de5aa53cd..369a29b3dc 100644
--- a/airflow/operators/postgres_operator.py
+++ b/airflow/operators/postgres_operator.py
@@ -24,13 +24,13 @@ class PostgresOperator(BaseOperator):
     Executes sql code in a specific Postgres database
 
     :param postgres_conn_id: reference to a specific postgres database
-    :type postgres_conn_id: string
+    :type postgres_conn_id: str
     :param sql: the sql code to be executed
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
         Template reference are recognized by str ending in '.sql'
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/operators/presto_check_operator.py 
b/airflow/operators/presto_check_operator.py
index e6e1fd81c3..12bdc7bee3 100644
--- a/airflow/operators/presto_check_operator.py
+++ b/airflow/operators/presto_check_operator.py
@@ -46,9 +46,9 @@ class PrestoCheckOperator(CheckOperator):
     without stopping the progress of the DAG.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param presto_conn_id: reference to the Presto database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
@@ -70,9 +70,9 @@ class PrestoValueCheckOperator(ValueCheckOperator):
     Performs a simple value check using sql code.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param presto_conn_id: reference to the Presto database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
@@ -102,7 +102,7 @@ class PrestoIntervalCheckOperator(IntervalCheckOperator):
     :param metrics_threshold: a dictionary of ratios indexed by metrics
     :type metrics_threshold: dict
     :param presto_conn_id: reference to the Presto database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
diff --git a/airflow/operators/python_operator.py 
b/airflow/operators/python_operator.py
index bef9bb02d5..9abda0ed4e 100644
--- a/airflow/operators/python_operator.py
+++ b/airflow/operators/python_operator.py
@@ -45,6 +45,7 @@ class PythonOperator(BaseOperator):
     :type templates_dict: dict of str
     :param templates_exts: a list of file extensions to resolve while
         processing templated fields, for examples ``['.sql', '.hql']``
+    :type template_exts: list of str
     """
     template_fields = ('templates_dict',)
     template_ext = tuple()
diff --git a/airflow/operators/redshift_to_s3_operator.py 
b/airflow/operators/redshift_to_s3_operator.py
index fda88d97dd..0c3c8aa6a2 100644
--- a/airflow/operators/redshift_to_s3_operator.py
+++ b/airflow/operators/redshift_to_s3_operator.py
@@ -24,17 +24,17 @@ class RedshiftToS3Transfer(BaseOperator):
     """
     Executes an UNLOAD command to s3 as a CSV with headers
     :param schema: reference to a specific schema in redshift database
-    :type schema: string
+    :type schema: str
     :param table: reference to a specific table in redshift database
-    :type table: string
+    :type table: str
     :param s3_bucket: reference to a specific S3 bucket
-    :type s3_bucket: string
+    :type s3_bucket: str
     :param s3_key: reference to a specific S3 key
-    :type s3_key: string
+    :type s3_key: str
     :param redshift_conn_id: reference to a specific redshift database
-    :type redshift_conn_id: string
+    :type redshift_conn_id: str
     :param s3_conn_id: reference to a specific S3 connection
-    :type s3_conn_id: string
+    :type s3_conn_id: str
     :param options: reference to a list of UNLOAD options
     :type options: list
     """
diff --git a/airflow/operators/sensors.py b/airflow/operators/sensors.py
index 4f276adb75..f59d083e04 100644
--- a/airflow/operators/sensors.py
+++ b/airflow/operators/sensors.py
@@ -89,7 +89,7 @@ class SqlSensor(BaseSensorOperator):
     sql returns no row, or if the first cell in (0, '0', '').
 
     :param conn_id: The connection to run the sensor against
-    :type conn_id: string
+    :type conn_id: str
     :param sql: The sql to run. To pass, it needs to return at least one cell
         that contains a non-zero / empty string value.
     """
@@ -183,10 +183,10 @@ class ExternalTaskSensor(BaseSensorOperator):
 
     :param external_dag_id: The dag_id that contains the task you want to
         wait for
-    :type external_dag_id: string
+    :type external_dag_id: str
     :param external_task_id: The task_id that contains the task you want to
         wait for
-    :type external_task_id: string
+    :type external_task_id: str
     :param allowed_states: list of allowed states, default is ``['success']``
     :type allowed_states: list
     :param execution_delta: time difference with the previous execution to
@@ -332,12 +332,12 @@ class HivePartitionSensor(BaseSensorOperator):
 
     :param table: The name of the table to wait for, supports the dot
         notation (my_database.my_table)
-    :type table: string
+    :type table: str
     :param partition: The partition clause to wait for. This is passed as
         is to the metastore Thrift client ``get_partitions_by_filter`` method,
         and apparently supports SQL like notation as in ``ds='2015-01-01'
         AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
-    :type partition: string
+    :type partition: str
     :param metastore_conn_id: reference to the metastore thrift service
         connection id
     :type metastore_conn_id: str
@@ -630,11 +630,11 @@ class HttpSensor(BaseSensorOperator):
         404 not found or response_check function returned False
 
     :param http_conn_id: The connection to run the sensor against
-    :type http_conn_id: string
+    :type http_conn_id: str
     :param method: The HTTP request method to use
-    :type method: string
+    :type method: str
     :param endpoint: The relative part of the full url
-    :type endpoint: string
+    :type endpoint: str
     :param request_params: The parameters to be added to the GET url
     :type request_params: a dictionary of string key/value pairs
     :param headers: The HTTP headers to be added to the GET request
diff --git a/airflow/operators/slack_operator.py 
b/airflow/operators/slack_operator.py
index 2e6d4269fa..d27354c080 100644
--- a/airflow/operators/slack_operator.py
+++ b/airflow/operators/slack_operator.py
@@ -27,9 +27,9 @@ class SlackAPIOperator(BaseOperator):
     In the future additional Slack API Operators will be derived from this 
class as well
 
     :param token: Slack API token (https://api.slack.com/web)
-    :type token: string
+    :type token: str
     :param method: The Slack API Method to Call (https://api.slack.com/methods)
-    :type method: string
+    :type method: str
     :param api_params: API Method call parameters 
(https://api.slack.com/methods)
     :type api_params: dict
     """
@@ -75,13 +75,13 @@ class SlackAPIPostOperator(SlackAPIOperator):
     Posts messages to a slack channel
 
     :param channel: channel in which to post message on slack name (#general) 
or ID (C12318391)
-    :type channel: string
+    :type channel: str
     :param username: Username that airflow will be posting to Slack as
-    :type username: string
+    :type username: str
     :param text: message to send to slack
-    :type text: string
+    :type text: str
     :param icon_url: url to icon used for this message
-    :type icon_url: string
+    :type icon_url: str
     :param attachments: extra formatting details - see 
https://api.slack.com/docs/attachments
     :type attachments: array of hashes
     """
diff --git a/airflow/operators/sqlite_operator.py 
b/airflow/operators/sqlite_operator.py
index 0ff4d05814..fe02b6cfcd 100644
--- a/airflow/operators/sqlite_operator.py
+++ b/airflow/operators/sqlite_operator.py
@@ -24,9 +24,9 @@ class SqliteOperator(BaseOperator):
     Executes sql code in a specific Sqlite database
 
     :param sqlite_conn_id: reference to a specific sqlite database
-    :type sqlite_conn_id: string
+    :type sqlite_conn_id: str
     :param sql: the sql code to be executed
-    :type sql: string or string pointing to a template file. File must have
+    :type sql: str pointing to a template file. File must have
         a '.sql' extensions.
     """
 
diff --git a/airflow/utils/helpers.py b/airflow/utils/helpers.py
index 9a9412513d..afd111046b 100644
--- a/airflow/utils/helpers.py
+++ b/airflow/utils/helpers.py
@@ -285,10 +285,10 @@ def __init__(self, parent_module, module_attributes):
         """
         :param parent_module: The string package name of the parent module. For
             example, 'airflow.operators'
-        :type parent_module: string
+        :type parent_module: str
         :param module_attributes: The file to class mappings for all importable
             classes.
-        :type module_attributes: string
+        :type module_attributes: str
         """
         self._parent_module = parent_module
         self._attribute_modules = 
self._build_attribute_modules(module_attributes)
diff --git a/airflow/utils/logging.py b/airflow/utils/logging.py
index 96767cb6ea..fcd4fafa2d 100644
--- a/airflow/utils/logging.py
+++ b/airflow/utils/logging.py
@@ -63,7 +63,7 @@ def read(self, remote_log_location, return_error=False):
         logs are found or there is an error.
 
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param return_error: if True, returns a string error message if an
             error occurs. Otherwise returns '' when an error occurs.
         :type return_error: bool
@@ -87,9 +87,9 @@ def write(self, log, remote_log_location, append=True):
         was created.
 
         :param log: the log to write to the remote_log_location
-        :type log: string
+        :type log: str
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param append: if False, any existing log file is overwritten. If True,
             the new log is appended to any existing logs.
         :type append: bool
@@ -142,7 +142,7 @@ def read(self, remote_log_location, return_error=False):
         Returns the log found at the remote_log_location.
 
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param return_error: if True, returns a string error message if an
             error occurs. Otherwise returns '' when an error occurs.
         :type return_error: bool
@@ -165,9 +165,9 @@ def write(self, log, remote_log_location, append=True):
         was created.
 
         :param log: the log to write to the remote_log_location
-        :type log: string
+        :type log: str
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param append: if False, any existing log file is overwritten. If True,
             the new log is appended to any existing logs.
         :type append: bool
diff --git a/airflow/utils/operator_resources.py 
b/airflow/utils/operator_resources.py
index d304170fda..879be45046 100644
--- a/airflow/utils/operator_resources.py
+++ b/airflow/utils/operator_resources.py
@@ -28,10 +28,10 @@ class Resource(object):
     Represents a resource requirement in an execution environment for an 
operator.
 
     :param name: Name of the resource
-    :type name: string
+    :type name: str
     :param units_str: The string representing the units of a resource (e.g. MB 
for a CPU
         resource) to be used for display purposes
-    :type units_str: string
+    :type units_str: str
     :param qty: The number of units of the specified resource that are 
required for
         execution of the operator.
     :type qty: long


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


> Standardizing string type in comments
> -------------------------------------
>
>                 Key: AIRFLOW-1353
>                 URL: https://issues.apache.org/jira/browse/AIRFLOW-1353
>             Project: Apache Airflow
>          Issue Type: Improvement
>            Reporter: Derek S
>            Assignee: Derek S
>            Priority: Trivial
>             Fix For: 2.0.0
>
>




--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to