You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@airflow.apache.org by GitBox <gi...@apache.org> on 2018/09/06 22:07:45 UTC

[GitHub] r39132 closed pull request #3857: [AIRFLOW-3023] Fix docstring datatypes

r39132 closed pull request #3857: [AIRFLOW-3023] Fix docstring datatypes
URL: https://github.com/apache/incubator-airflow/pull/3857
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/airflow/contrib/executors/kubernetes_executor.py b/airflow/contrib/executors/kubernetes_executor.py
index 66c600ba65..de1f9f4235 100644
--- a/airflow/contrib/executors/kubernetes_executor.py
+++ b/airflow/contrib/executors/kubernetes_executor.py
@@ -433,7 +433,7 @@ def _label_safe_datestring_to_datetime(string):
         "_", let's
         replace ":" with "_"
 
-        :param string: string
+        :param string: str
         :return: datetime.datetime object
         """
         return parser.parse(string.replace('_plus_', '+').replace("_", ":"))
diff --git a/airflow/contrib/hooks/bigquery_hook.py b/airflow/contrib/hooks/bigquery_hook.py
index 44ecd49e9e..250f83c36a 100644
--- a/airflow/contrib/hooks/bigquery_hook.py
+++ b/airflow/contrib/hooks/bigquery_hook.py
@@ -94,13 +94,13 @@ def get_pandas_df(self, sql, parameters=None, dialect=None):
         https://github.com/pydata/pandas/issues/6900
 
         :param sql: The BigQuery SQL to execute.
-        :type sql: string
+        :type sql: str
         :param parameters: The parameters to render the SQL query with (not
             used, leave to override superclass method)
         :type parameters: mapping or iterable
         :param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
             defaults to use `self.use_legacy_sql` if not specified
-        :type dialect: string in {'legacy', 'standard'}
+        :type dialect: str in {'legacy', 'standard'}
         """
         if dialect is None:
             dialect = 'legacy' if self.use_legacy_sql else 'standard'
@@ -117,12 +117,12 @@ def table_exists(self, project_id, dataset_id, table_id):
         :param project_id: The Google cloud project in which to look for the
             table. The connection supplied to the hook must provide access to
             the specified project.
-        :type project_id: string
+        :type project_id: str
         :param dataset_id: The name of the dataset in which to look for the
             table.
-        :type dataset_id: string
+        :type dataset_id: str
         :param table_id: The name of the table to check the existence of.
-        :type table_id: string
+        :type table_id: str
         """
         service = self.get_service()
         try:
@@ -313,7 +313,7 @@ def create_external_table(self,
             table name to create external table.
             If <project> is not included, project will be the
             project defined in the connection json.
-        :type external_project_dataset_table: string
+        :type external_project_dataset_table: str
         :param schema_fields: The schema field list as defined here:
             https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
         :type schema_fields: list
@@ -322,7 +322,7 @@ def create_external_table(self,
             per-object name can be used.
         :type source_uris: list
         :param source_format: File format to export.
-        :type source_format: string
+        :type source_format: str
         :param autodetect: Try to detect schema and format options automatically.
             Any option specified explicitly will be honored.
         :type autodetect: bool
@@ -331,7 +331,7 @@ def create_external_table(self,
             The default value is NONE.
             This setting is ignored for Google Cloud Bigtable,
                 Google Cloud Datastore backups and Avro formats.
-        :type compression: string
+        :type compression: str
         :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
             extra values that are not represented in the table schema.
             If true, the extra values are ignored. If false, records with extra columns
@@ -344,13 +344,13 @@ def create_external_table(self,
         :param skip_leading_rows: Number of rows to skip when loading from a CSV.
         :type skip_leading_rows: int
         :param field_delimiter: The delimiter to use when loading from a CSV.
-        :type field_delimiter: string
+        :type field_delimiter: str
         :param quote_character: The value that is used to quote data sections in a CSV
             file.
-        :type quote_character: string
+        :type quote_character: str
         :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
             (false).
-        :type allow_quoted_newlines: boolean
+        :type allow_quoted_newlines: bool
         :param allow_jagged_rows: Accept rows that are missing trailing optional columns.
             The missing values are treated as nulls. If false, records with missing
             trailing columns are treated as bad records, and if there are too many bad
@@ -507,27 +507,27 @@ def run_query(self,
 
         :param bql: (Deprecated. Use `sql` parameter instead) The BigQuery SQL
             to execute.
-        :type bql: string
+        :type bql: str
         :param sql: The BigQuery SQL to execute.
-        :type sql: string
+        :type sql: str
         :param destination_dataset_table: The dotted <dataset>.<table>
             BigQuery table to save the query results.
-        :type destination_dataset_table: string
+        :type destination_dataset_table: str
         :param write_disposition: What to do if the table already exists in
             BigQuery.
-        :type write_disposition: string
+        :type write_disposition: str
         :param allow_large_results: Whether to allow large results.
-        :type allow_large_results: boolean
+        :type allow_large_results: bool
         :param flatten_results: If true and query uses legacy SQL dialect, flattens
             all nested and repeated fields in the query results. ``allowLargeResults``
             must be true if this is set to false. For standard SQL queries, this
             flag is ignored and results are never flattened.
-        :type flatten_results: boolean
+        :type flatten_results: bool
         :param udf_config: The User Defined Function configuration for the query.
             See https://cloud.google.com/bigquery/user-defined-functions for details.
         :param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
             If `None`, defaults to `self.use_legacy_sql`.
-        :type use_legacy_sql: boolean
+        :type use_legacy_sql: bool
         :param api_resource_configs: a dictionary that contain params
             'configuration' applied for Google BigQuery Jobs API:
             https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
@@ -538,7 +538,7 @@ def run_query(self,
         :type udf_config: list
         :param maximum_billing_tier: Positive integer that serves as a
             multiplier of the basic price.
-        :type maximum_billing_tier: integer
+        :type maximum_billing_tier: int
         :param maximum_bytes_billed: Limits the bytes billed for this job.
             Queries that will have bytes billed beyond this limit will fail
             (without incurring a charge). If unspecified, this will be
@@ -546,7 +546,7 @@ def run_query(self,
         :type maximum_bytes_billed: float
         :param create_disposition: Specifies whether the job is allowed to
             create new tables.
-        :type create_disposition: string
+        :type create_disposition: str
         :param query_params a dictionary containing query parameter types and
             values, passed to BigQuery
         :type query_params: dict
@@ -559,7 +559,7 @@ def run_query(self,
         :param priority: Specifies a priority for the query.
             Possible values include INTERACTIVE and BATCH.
             The default value is INTERACTIVE.
-        :type priority: string
+        :type priority: str
         :param time_partitioning: configure optional time partitioning fields i.e.
             partition by field, type and
             expiration as per API specifications. Note that 'field' is not available in
@@ -718,20 +718,20 @@ def run_extract(  # noqa
 
         :param source_project_dataset_table: The dotted <dataset>.<table>
             BigQuery table to use as the source data.
-        :type source_project_dataset_table: string
+        :type source_project_dataset_table: str
         :param destination_cloud_storage_uris: The destination Google Cloud
             Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
             convention defined here:
             https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
         :type destination_cloud_storage_uris: list
         :param compression: Type of compression to use.
-        :type compression: string
+        :type compression: str
         :param export_format: File format to export.
-        :type export_format: string
+        :type export_format: str
         :param field_delimiter: The delimiter to use when extracting to a CSV.
-        :type field_delimiter: string
+        :type field_delimiter: str
         :param print_header: Whether to print a header for a CSV file extract.
-        :type print_header: boolean
+        :type print_header: bool
         :param labels: a dictionary containing labels for the job/query,
             passed to BigQuery
         :type labels: dict
@@ -790,11 +790,11 @@ def run_copy(self,
         :type source_project_dataset_tables: list|string
         :param destination_project_dataset_table: The destination BigQuery
             table. Format is: (project:|project.)<dataset>.<table>
-        :type destination_project_dataset_table: string
+        :type destination_project_dataset_table: str
         :param write_disposition: The write disposition if the table already exists.
-        :type write_disposition: string
+        :type write_disposition: str
         :param create_disposition: The create disposition if the table doesn't exist.
-        :type create_disposition: string
+        :type create_disposition: str
         :param labels a dictionary containing labels for the job/query,
             passed to BigQuery
         :type labels: dict
@@ -871,7 +871,7 @@ def run_load(self,
             project defined in the connection json. If a partition is specified the
             operator will automatically append the data, create a new partition or create
             a new DAY partitioned table.
-        :type destination_project_dataset_table: string
+        :type destination_project_dataset_table: str
         :param schema_fields: The schema field list as defined here:
             https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
         :type schema_fields: list
@@ -880,21 +880,21 @@ def run_load(self,
             per-object name can be used.
         :type source_uris: list
         :param source_format: File format to export.
-        :type source_format: string
+        :type source_format: str
         :param create_disposition: The create disposition if the table doesn't exist.
-        :type create_disposition: string
+        :type create_disposition: str
         :param skip_leading_rows: Number of rows to skip when loading from a CSV.
         :type skip_leading_rows: int
         :param write_disposition: The write disposition if the table already exists.
-        :type write_disposition: string
+        :type write_disposition: str
         :param field_delimiter: The delimiter to use when loading from a CSV.
-        :type field_delimiter: string
+        :type field_delimiter: str
         :param max_bad_records: The maximum number of bad records that BigQuery can
             ignore when running the job.
         :type max_bad_records: int
         :param quote_character: The value that is used to quote data sections in a CSV
             file.
-        :type quote_character: string
+        :type quote_character: str
         :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
             extra values that are not represented in the table schema.
             If true, the extra values are ignored. If false, records with extra columns
@@ -903,7 +903,7 @@ def run_load(self,
         :type ignore_unknown_values: bool
         :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
             (false).
-        :type allow_quoted_newlines: boolean
+        :type allow_quoted_newlines: bool
         :param allow_jagged_rows: Accept rows that are missing trailing optional columns.
             The missing values are treated as nulls. If false, records with missing
             trailing columns are treated as bad records, and if there are too many bad
@@ -1204,7 +1204,7 @@ def run_table_delete(self, deletion_dataset_table,
         :type deletion_dataset_table: str
         :param ignore_if_missing: if True, then return success even if the
         requested table does not exist.
-        :type ignore_if_missing: boolean
+        :type ignore_if_missing: bool
         :return:
         """
         deletion_project, deletion_dataset, deletion_table = \
@@ -1402,7 +1402,7 @@ def execute(self, operation, parameters=None):
         Executes a BigQuery query, and returns the job ID.
 
         :param operation: The query to execute.
-        :type operation: string
+        :type operation: str
         :param parameters: Parameters to substitute into the query.
         :type parameters: dict
         """
@@ -1415,7 +1415,7 @@ def executemany(self, operation, seq_of_parameters):
         Execute a BigQuery query multiple times with different parameters.
 
         :param operation: The query to execute.
-        :type operation: string
+        :type operation: str
         :param seq_of_parameters: List of dictionary parameters to substitute into the
             query.
         :type seq_of_parameters: list
diff --git a/airflow/contrib/hooks/cassandra_hook.py b/airflow/contrib/hooks/cassandra_hook.py
index 0e0b47708d..2c744fe9b1 100644
--- a/airflow/contrib/hooks/cassandra_hook.py
+++ b/airflow/contrib/hooks/cassandra_hook.py
@@ -164,7 +164,7 @@ def table_exists(self, table):
 
         :param table: Target Cassandra table.
                       Use dot notation to target a specific keyspace.
-        :type table: string
+        :type table: str
         """
         keyspace = self.keyspace
         if '.' in table:
@@ -179,7 +179,7 @@ def record_exists(self, table, keys):
 
         :param table: Target Cassandra table.
                       Use dot notation to target a specific keyspace.
-        :type table: string
+        :type table: str
         :param keys: The keys and their values to check the existence.
         :type keys: dict
         """
diff --git a/airflow/contrib/hooks/databricks_hook.py b/airflow/contrib/hooks/databricks_hook.py
index 284db98d91..f4a890ac7b 100644
--- a/airflow/contrib/hooks/databricks_hook.py
+++ b/airflow/contrib/hooks/databricks_hook.py
@@ -56,7 +56,7 @@ def __init__(
             retry_delay=1.0):
         """
         :param databricks_conn_id: The name of the databricks connection to use.
-        :type databricks_conn_id: string
+        :type databricks_conn_id: str
         :param timeout_seconds: The amount of time in seconds the requests library
             will wait before timing-out.
         :type timeout_seconds: int
@@ -169,7 +169,7 @@ def run_now(self, json):
         :param json: The data used in the body of the request to the ``run-now`` endpoint.
         :type json: dict
         :return: the run_id as a string
-        :rtype: string
+        :rtype: str
         """
         response = self._do_api_call(RUN_NOW_ENDPOINT, json)
         return response['run_id']
@@ -181,7 +181,7 @@ def submit_run(self, json):
         :param json: The data used in the body of the request to the ``submit`` endpoint.
         :type json: dict
         :return: the run_id as a string
-        :rtype: string
+        :rtype: str
         """
         response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
         return response['run_id']
diff --git a/airflow/contrib/hooks/datadog_hook.py b/airflow/contrib/hooks/datadog_hook.py
index 3dfeb781ae..f00e49b097 100644
--- a/airflow/contrib/hooks/datadog_hook.py
+++ b/airflow/contrib/hooks/datadog_hook.py
@@ -35,7 +35,7 @@ class DatadogHook(BaseHook, LoggingMixin):
     Airflow runs.
 
     :param datadog_conn_id: The connection to datadog, containing metadata for api keys.
-    :param datadog_conn_id: string
+    :param datadog_conn_id: str
     """
     def __init__(self, datadog_conn_id='datadog_default'):
         conn = self.get_connection(datadog_conn_id)
@@ -71,9 +71,9 @@ def send_metric(self, metric_name, datapoint, tags=None):
         Sends a single datapoint metric to DataDog
 
         :param metric_name: The name of the metric
-        :type metric_name: string
+        :type metric_name: str
         :param datapoint: A single integer or float related to the metric
-        :type datapoint: integer or float
+        :type datapoint: int or float
         :param tags: A list of tags associated with the metric
         :type tags: list
         """
@@ -95,7 +95,7 @@ def query_metric(self,
         function applied to it and returns the results.
 
         :param query: The datadog query to execute (see datadog docs)
-        :type query: string
+        :type query: str
         :param from_seconds_ago: How many seconds ago to start querying for.
         :type from_seconds_ago: int
         :param to_seconds_ago: Up to how many seconds ago to query for.
@@ -118,16 +118,16 @@ def post_event(self, title, text, tags=None, alert_type=None, aggregation_key=No
         alerting itself.
 
         :param title: The title of the event
-        :type title: string
+        :type title: str
         :param text: The body of the event (more information)
-        :type text: string
+        :type text: str
         :param tags: List of string tags to apply to the event
         :type tags: list
         :param alert_type: The alert type for the event, one of
             ["error", "warning", "info", "success"]
-        :type alert_type: string
+        :type alert_type: str
         :param aggregation_key: Key that can be used to aggregate this event in a stream
-        :type aggregation_key: string
+        :type aggregation_key: str
         """
         response = api.Event.create(
             title=title,
diff --git a/airflow/contrib/hooks/gcp_api_base_hook.py b/airflow/contrib/hooks/gcp_api_base_hook.py
index 053494743f..aa1cf3f713 100644
--- a/airflow/contrib/hooks/gcp_api_base_hook.py
+++ b/airflow/contrib/hooks/gcp_api_base_hook.py
@@ -58,11 +58,11 @@ class GoogleCloudBaseHook(BaseHook, LoggingMixin):
     def __init__(self, gcp_conn_id='google_cloud_default', delegate_to=None):
         """
         :param gcp_conn_id: The connection ID to use when fetching connection info.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         self.gcp_conn_id = gcp_conn_id
         self.delegate_to = delegate_to
diff --git a/airflow/contrib/hooks/gcp_mlengine_hook.py b/airflow/contrib/hooks/gcp_mlengine_hook.py
index b9f1008fa7..9c255dbe80 100644
--- a/airflow/contrib/hooks/gcp_mlengine_hook.py
+++ b/airflow/contrib/hooks/gcp_mlengine_hook.py
@@ -63,7 +63,7 @@ def create_job(self, project_id, job, use_existing_job_fn=None):
 
         :param project_id: The Google Cloud project id within which MLEngine
             job will be launched.
-        :type project_id: string
+        :type project_id: str
 
         :param job: MLEngine Job object that should be provided to the MLEngine
             API, such as: ::
diff --git a/airflow/contrib/hooks/gcp_pubsub_hook.py b/airflow/contrib/hooks/gcp_pubsub_hook.py
index 1d55d5d487..bf33a9b6bc 100644
--- a/airflow/contrib/hooks/gcp_pubsub_hook.py
+++ b/airflow/contrib/hooks/gcp_pubsub_hook.py
@@ -60,10 +60,10 @@ def publish(self, project, topic, messages):
         """Publishes messages to a Pub/Sub topic.
 
         :param project: the GCP project ID in which to publish
-        :type project: string
+        :type project: str
         :param topic: the Pub/Sub topic to which to publish; do not
             include the ``projects/{project}/topics/`` prefix.
-        :type topic: string
+        :type topic: str
         :param messages: messages to publish; if the data field in a
             message is set, it should already be base64 encoded.
         :type messages: list of PubSub messages; see
@@ -84,10 +84,10 @@ def create_topic(self, project, topic, fail_if_exists=False):
 
         :param project: the GCP project ID in which to create
             the topic
-        :type project: string
+        :type project: str
         :param topic: the Pub/Sub topic name to create; do not
             include the ``projects/{project}/topics/`` prefix.
-        :type topic: string
+        :type topic: str
         :param fail_if_exists: if set, raise an exception if the topic
             already exists
         :type fail_if_exists: bool
@@ -112,10 +112,10 @@ def delete_topic(self, project, topic, fail_if_not_exists=False):
         """Deletes a Pub/Sub topic if it exists.
 
         :param project: the GCP project ID in which to delete the topic
-        :type project: string
+        :type project: str
         :param topic: the Pub/Sub topic name to delete; do not
             include the ``projects/{project}/topics/`` prefix.
-        :type topic: string
+        :type topic: str
         :param fail_if_not_exists: if set, raise an exception if the topic
             does not exist
         :type fail_if_not_exists: bool
@@ -142,17 +142,17 @@ def create_subscription(self, topic_project, topic, subscription=None,
 
         :param topic_project: the GCP project ID of the topic that the
             subscription will be bound to.
-        :type topic_project: string
+        :type topic_project: str
         :param topic: the Pub/Sub topic name that the subscription will be bound
             to create; do not include the ``projects/{project}/subscriptions/``
             prefix.
-        :type topic: string
+        :type topic: str
         :param subscription: the Pub/Sub subscription name. If empty, a random
             name will be generated using the uuid module
-        :type subscription: string
+        :type subscription: str
         :param subscription_project: the GCP project ID where the subscription
             will be created. If unspecified, ``topic_project`` will be used.
-        :type subscription_project: string
+        :type subscription_project: str
         :param ack_deadline_secs: Number of seconds that a subscriber has to
             acknowledge each message pulled from the subscription
         :type ack_deadline_secs: int
@@ -161,7 +161,7 @@ def create_subscription(self, topic_project, topic, subscription=None,
         :type fail_if_exists: bool
         :return: subscription name which will be the system-generated value if
             the ``subscription`` parameter is not supplied
-        :rtype: string
+        :rtype: str
         """
         service = self.get_conn()
         full_topic = _format_topic(topic_project, topic)
@@ -197,10 +197,10 @@ def delete_subscription(self, project, subscription,
         """Deletes a Pub/Sub subscription, if it exists.
 
         :param project: the GCP project ID where the subscription exists
-        :type project: string
+        :type project: str
         :param subscription: the Pub/Sub subscription name to delete; do not
             include the ``projects/{project}/subscriptions/`` prefix.
-        :type subscription: string
+        :type subscription: str
         :param fail_if_not_exists: if set, raise an exception if the topic
             does not exist
         :type fail_if_not_exists: bool
@@ -228,10 +228,10 @@ def pull(self, project, subscription, max_messages,
         """Pulls up to ``max_messages`` messages from Pub/Sub subscription.
 
         :param project: the GCP project ID where the subscription exists
-        :type project: string
+        :type project: str
         :param subscription: the Pub/Sub subscription name to pull from; do not
             include the 'projects/{project}/topics/' prefix.
-        :type subscription: string
+        :type subscription: str
         :param max_messages: The maximum number of messages to return from
             the Pub/Sub API.
         :type max_messages: int
@@ -265,10 +265,10 @@ def acknowledge(self, project, subscription, ack_ids):
 
         :param project: the GCP project name or ID in which to create
             the topic
-        :type project: string
+        :type project: str
         :param subscription: the Pub/Sub subscription name to delete; do not
             include the 'projects/{project}/topics/' prefix.
-        :type subscription: string
+        :type subscription: str
         :param ack_ids: List of ReceivedMessage ackIds from a previous pull
             response
         :type ack_ids: list
diff --git a/airflow/contrib/hooks/gcs_hook.py b/airflow/contrib/hooks/gcs_hook.py
index e3c3747e0b..6cfa1cf565 100644
--- a/airflow/contrib/hooks/gcs_hook.py
+++ b/airflow/contrib/hooks/gcs_hook.py
@@ -57,15 +57,15 @@ def copy(self, source_bucket, source_object, destination_bucket=None,
         source bucket/object is used, but not both.
 
         :param source_bucket: The bucket of the object to copy from.
-        :type source_bucket: string
+        :type source_bucket: str
         :param source_object: The object to copy.
-        :type source_object: string
+        :type source_object: str
         :param destination_bucket: The destination of the object to copied to.
             Can be omitted; then the same bucket is used.
-        :type destination_bucket: string
+        :type destination_bucket: str
         :param destination_object: The (renamed) path of the object if given.
             Can be omitted; then the same name is used.
-        :type destination_object: string
+        :type destination_object: str
         """
         destination_bucket = destination_bucket or source_bucket
         destination_object = destination_object or source_object
@@ -103,11 +103,11 @@ def rewrite(self, source_bucket, source_object, destination_bucket,
         destination_object can be omitted, in which case source_object is used.
 
         :param source_bucket: The bucket of the object to copy from.
-        :type source_bucket: string
+        :type source_bucket: str
         :param source_object: The object to copy.
-        :type source_object: string
+        :type source_object: str
         :param destination_bucket: The destination of the object to copied to.
-        :type destination_bucket: string
+        :type destination_bucket: str
         :param destination_object: The (renamed) path of the object if given.
             Can be omitted; then the same name is used.
         """
@@ -151,11 +151,11 @@ def download(self, bucket, object, filename=None):
         Get a file from Google Cloud Storage.
 
         :param bucket: The bucket to fetch from.
-        :type bucket: string
+        :type bucket: str
         :param object: The object to fetch.
-        :type object: string
+        :type object: str
         :param filename: If set, a local file path where the file should be written to.
-        :type filename: string
+        :type filename: str
         """
         service = self.get_conn()
         downloaded_file_bytes = service \
@@ -177,13 +177,13 @@ def upload(self, bucket, object, filename, mime_type='application/octet-stream')
         Uploads a local file to Google Cloud Storage.
 
         :param bucket: The bucket to upload to.
-        :type bucket: string
+        :type bucket: str
         :param object: The object name to set when uploading the local file.
-        :type object: string
+        :type object: str
         :param filename: The local file path to the file to be uploaded.
-        :type filename: string
+        :type filename: str
         :param mime_type: The MIME type to set when uploading the file.
-        :type mime_type: string
+        :type mime_type: str
         """
         service = self.get_conn()
         media = MediaFileUpload(filename, mime_type)
@@ -204,10 +204,10 @@ def exists(self, bucket, object):
         Checks for the existence of a file in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         """
         service = self.get_conn()
         try:
@@ -227,10 +227,10 @@ def is_updated_after(self, bucket, object, ts):
         Checks if an object is updated in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         :param ts: The timestamp to check against.
         :type ts: datetime
         """
@@ -266,11 +266,11 @@ def delete(self, bucket, object, generation=None):
         parameter is used.
 
         :param bucket: name of the bucket, where the object resides
-        :type bucket: string
+        :type bucket: str
         :param object: name of the object to delete
-        :type object: string
+        :type object: str
         :param generation: if present, permanently delete the object of this generation
-        :type generation: string
+        :type generation: str
         :return: True if succeeded
         """
         service = self.get_conn()
@@ -291,16 +291,16 @@ def list(self, bucket, versions=None, maxResults=None, prefix=None, delimiter=No
         List all objects from the bucket with the give string prefix in name
 
         :param bucket: bucket name
-        :type bucket: string
+        :type bucket: str
         :param versions: if true, list all versions of the objects
-        :type versions: boolean
+        :type versions: bool
         :param maxResults: max count of items to return in a single page of responses
-        :type maxResults: integer
+        :type maxResults: int
         :param prefix: prefix string which filters objects whose name begin with
             this prefix
-        :type prefix: string
+        :type prefix: str
         :param delimiter: filters objects based on the delimiter (for e.g '.csv')
-        :type delimiter: string
+        :type delimiter: str
         :return: a stream of object names matching the filtering criteria
         """
         service = self.get_conn()
@@ -344,9 +344,9 @@ def get_size(self, bucket, object):
         Gets the size of a file in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud storage bucket.
-        :type object: string
+        :type object: str
 
         """
         self.log.info('Checking the file size of object: %s in bucket: %s',
@@ -375,10 +375,10 @@ def get_crc32c(self, bucket, object):
         Gets the CRC32c checksum of an object in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         """
         self.log.info('Retrieving the crc32c checksum of '
                       'object: %s in bucket: %s', object, bucket)
@@ -402,10 +402,10 @@ def get_md5hash(self, bucket, object):
         Gets the MD5 hash of an object in Google Cloud Storage.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         """
         self.log.info('Retrieving the MD5 hash of '
                       'object: %s in bucket: %s', object, bucket)
@@ -440,7 +440,7 @@ def create_bucket(self,
             https://cloud.google.com/storage/docs/bucketnaming.html#requirements
 
         :param bucket_name: The name of the bucket.
-        :type bucket_name: string
+        :type bucket_name: str
         :param storage_class: This defines how objects in the bucket are stored
             and determines the SLA and the cost of storage. Values include
 
@@ -451,7 +451,7 @@ def create_bucket(self,
             - ``COLDLINE``.
             If this value is not specified when the bucket is
             created, it will default to STANDARD.
-        :type storage_class: string
+        :type storage_class: str
         :param location: The location of the bucket.
             Object data for objects in the bucket resides in physical storage
             within this region. Defaults to US.
@@ -459,9 +459,9 @@ def create_bucket(self,
             .. seealso::
                 https://developers.google.com/storage/docs/bucket-locations
 
-        :type location: string
+        :type location: str
         :param project_id: The ID of the GCP Project.
-        :type project_id: string
+        :type project_id: str
         :param labels: User-provided labels, in key/value pairs.
         :type labels: dict
         :return: If successful, it returns the ``id`` of the bucket.
diff --git a/airflow/contrib/hooks/jira_hook.py b/airflow/contrib/hooks/jira_hook.py
index c6806d935a..a56c414dce 100644
--- a/airflow/contrib/hooks/jira_hook.py
+++ b/airflow/contrib/hooks/jira_hook.py
@@ -29,7 +29,7 @@ class JiraHook(BaseHook, LoggingMixin):
     Jira interaction hook, a Wrapper around JIRA Python SDK.
 
     :param jira_conn_id: reference to a pre-defined Jira Connection
-    :type jira_conn_id: string
+    :type jira_conn_id: str
     """
     def __init__(self,
                  jira_conn_id='jira_default',
diff --git a/airflow/contrib/hooks/redis_hook.py b/airflow/contrib/hooks/redis_hook.py
index 1de75dbca9..650cc9308b 100644
--- a/airflow/contrib/hooks/redis_hook.py
+++ b/airflow/contrib/hooks/redis_hook.py
@@ -88,6 +88,6 @@ def key_exists(self, key):
         Checks if a key exists in Redis database
 
         :param key: The key to check the existence.
-        :type key: string
+        :type key: str
         """
         return self.get_conn().exists(key)
diff --git a/airflow/contrib/hooks/sagemaker_hook.py b/airflow/contrib/hooks/sagemaker_hook.py
index 09993f96d8..40bebf8979 100644
--- a/airflow/contrib/hooks/sagemaker_hook.py
+++ b/airflow/contrib/hooks/sagemaker_hook.py
@@ -101,7 +101,7 @@ def check_status(self, non_terminal_states,
         :type failed_state: dict
         :param key: the key of the response dict
         that points to the state
-        :type key: string
+        :type key: str
         :param describe_function: the function used to retrieve the status
         :type describe_function: python callable
         :param args: the arguments for the function
@@ -236,7 +236,7 @@ def create_tuning_job(self, tuning_job_config, wait_for_completion=True):
     def describe_training_job(self, training_job_name):
         """
         :param training_job_name: the name of the training job
-        :type training_job_name: string
+        :type training_job_name: str
         Return the training job info associated with the current job_name
         :return: A dict contains all the training job info
         """
@@ -246,7 +246,7 @@ def describe_training_job(self, training_job_name):
     def describe_tuning_job(self, tuning_job_name):
         """
         :param tuning_job_name: the name of the training job
-        :type tuning_job_name: string
+        :type tuning_job_name: str
         Return the tuning job info associated with the current job_name
         :return: A dict contains all the tuning job info
         """
diff --git a/airflow/contrib/hooks/segment_hook.py b/airflow/contrib/hooks/segment_hook.py
index 874d35d074..01613f2ade 100644
--- a/airflow/contrib/hooks/segment_hook.py
+++ b/airflow/contrib/hooks/segment_hook.py
@@ -54,7 +54,7 @@ def __init__(
         :type segment_conn_id: str
         :param segment_debug_mode: Determines whether Segment should run in debug mode.
         Defaults to False
-        :type segment_debug_mode: boolean
+        :type segment_debug_mode: bool
         .. note::
             You must include a JSON structure in the `Extras` field.
             We need a user's security token to connect to Segment.
diff --git a/airflow/contrib/hooks/winrm_hook.py b/airflow/contrib/hooks/winrm_hook.py
index 1dd02f9951..5cc24cb7ad 100644
--- a/airflow/contrib/hooks/winrm_hook.py
+++ b/airflow/contrib/hooks/winrm_hook.py
@@ -36,36 +36,36 @@ class WinRMHook(BaseHook, LoggingMixin):
     :param ssh_conn_id: connection id from airflow Connections from where all
         the required parameters can be fetched like username and password.
         Thought the priority is given to the param passed during init
-    :type ssh_conn_id: string
+    :type ssh_conn_id: str
     :param endpoint: When set to `None`, endpoint will be constructed like this:
         'http://{remote_host}:{remote_port}/wsman'
-    :type endpoint: string
+    :type endpoint: str
     :param remote_host: Remote host to connect to.
         Ignored if `endpoint` is not `None`.
-    :type remote_host: string
+    :type remote_host: str
     :param remote_port: Remote port to connect to.
         Ignored if `endpoint` is not `None`.
     :type remote_port: int
     :param transport: transport type, one of 'plaintext' (default), 'kerberos', 'ssl',
         'ntlm', 'credssp'
-    :type transport: string
+    :type transport: str
     :param username: username to connect to the remote_host
-    :type username: string
+    :type username: str
     :param password: password of the username to connect to the remote_host
-    :type password: string
+    :type password: str
     :param service: the service name, default is HTTP
-    :type service: string
+    :type service: str
     :param keytab: the path to a keytab file if you are using one
-    :type keytab: string
+    :type keytab: str
     :param ca_trust_path: Certification Authority trust path
-    :type ca_trust_path: string
+    :type ca_trust_path: str
     :param cert_pem: client authentication certificate file path in PEM format
-    :type cert_pem: string
+    :type cert_pem: str
     :param cert_key_pem: client authentication certificate key file path in PEM format
-    :type cert_key_pem: string
+    :type cert_key_pem: str
     :param server_cert_validation: whether server certificate should be validated on
         Python versions that suppport it; one of 'validate' (default), 'ignore'
-    :type server_cert_validation: string
+    :type server_cert_validation: str
     :param kerberos_delegation: if True, TGT is sent to target server to
         allow multiple hops
     :type kerberos_delegation: bool
@@ -80,7 +80,7 @@ class WinRMHook(BaseHook, LoggingMixin):
     :type operation_timeout_sec: int
     :param kerberos_hostname_override: the hostname to use for the kerberos exchange
         (defaults to the hostname in the endpoint URL)
-    :type kerberos_hostname_override: string
+    :type kerberos_hostname_override: str
     :param message_encryption_enabled: Will encrypt the WinRM messages if set to True and
         the transport auth supports message encryption (Default True).
     :type message_encryption_enabled: bool
diff --git a/airflow/contrib/operators/bigquery_check_operator.py b/airflow/contrib/operators/bigquery_check_operator.py
index ff7b97eea8..247a1ae7fb 100644
--- a/airflow/contrib/operators/bigquery_check_operator.py
+++ b/airflow/contrib/operators/bigquery_check_operator.py
@@ -52,12 +52,12 @@ class BigQueryCheckOperator(CheckOperator):
     without stopping the progress of the DAG.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param bigquery_conn_id: reference to the BigQuery database
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param use_legacy_sql: Whether to use legacy SQL (true)
         or standard SQL (false).
-    :type use_legacy_sql: boolean
+    :type use_legacy_sql: bool
     """
 
     @apply_defaults
@@ -81,10 +81,10 @@ class BigQueryValueCheckOperator(ValueCheckOperator):
     Performs a simple value check using sql code.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param use_legacy_sql: Whether to use legacy SQL (true)
         or standard SQL (false).
-    :type use_legacy_sql: boolean
+    :type use_legacy_sql: bool
     """
 
     @apply_defaults
@@ -126,7 +126,7 @@ class BigQueryIntervalCheckOperator(IntervalCheckOperator):
     :type metrics_threshold: dict
     :param use_legacy_sql: Whether to use legacy SQL (true)
         or standard SQL (false).
-    :type use_legacy_sql: boolean
+    :type use_legacy_sql: bool
     """
 
     @apply_defaults
diff --git a/airflow/contrib/operators/bigquery_get_data.py b/airflow/contrib/operators/bigquery_get_data.py
index 6e0c6c44de..f5e6e50f06 100644
--- a/airflow/contrib/operators/bigquery_get_data.py
+++ b/airflow/contrib/operators/bigquery_get_data.py
@@ -51,21 +51,21 @@ class BigQueryGetDataOperator(BaseOperator):
         )
 
     :param dataset_id: The dataset ID of the requested table. (templated)
-    :type dataset_id: string
+    :type dataset_id: str
     :param table_id: The table ID of the requested table. (templated)
-    :type table_id: string
+    :type table_id: str
     :param max_results: The maximum number of records (rows) to be fetched
         from the table. (templated)
-    :type max_results: string
+    :type max_results: str
     :param selected_fields: List of fields to return (comma-separated). If
         unspecified, all fields are returned.
-    :type selected_fields: string
+    :type selected_fields: str
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
     template_fields = ('dataset_id', 'table_id', 'max_results')
     ui_color = '#e4f0e8'
diff --git a/airflow/contrib/operators/bigquery_operator.py b/airflow/contrib/operators/bigquery_operator.py
index e9366bf2ef..66ac864c69 100644
--- a/airflow/contrib/operators/bigquery_operator.py
+++ b/airflow/contrib/operators/bigquery_operator.py
@@ -41,35 +41,35 @@ class BigQueryOperator(BaseOperator):
     :param destination_dataset_table: A dotted
         (<project>.|<project>:)<dataset>.<table> that, if set, will store the results
         of the query. (templated)
-    :type destination_dataset_table: string
+    :type destination_dataset_table: str
     :param write_disposition: Specifies the action that occurs if the destination table
         already exists. (default: 'WRITE_EMPTY')
-    :type write_disposition: string
+    :type write_disposition: str
     :param create_disposition: Specifies whether the job is allowed to create new tables.
         (default: 'CREATE_IF_NEEDED')
-    :type create_disposition: string
+    :type create_disposition: str
     :param allow_large_results: Whether to allow large results.
-    :type allow_large_results: boolean
+    :type allow_large_results: bool
     :param flatten_results: If true and query uses legacy SQL dialect, flattens
         all nested and repeated fields in the query results. ``allow_large_results``
         must be ``true`` if this is set to ``false``. For standard SQL queries, this
         flag is ignored and results are never flattened.
-    :type flatten_results: boolean
+    :type flatten_results: bool
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param udf_config: The User Defined Function configuration for the query.
         See https://cloud.google.com/bigquery/user-defined-functions for details.
     :type udf_config: list
     :param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
-    :type use_legacy_sql: boolean
+    :type use_legacy_sql: bool
     :param maximum_billing_tier: Positive integer that serves as a multiplier
         of the basic price.
         Defaults to None, in which case it uses the value set in the project.
-    :type maximum_billing_tier: integer
+    :type maximum_billing_tier: int
     :param maximum_bytes_billed: Limits the bytes billed for this job.
         Queries that will have bytes billed beyond this limit will fail
         (without incurring a charge). If unspecified, this will be
@@ -94,7 +94,7 @@ class BigQueryOperator(BaseOperator):
     :param priority: Specifies a priority for the query.
         Possible values include INTERACTIVE and BATCH.
         The default value is INTERACTIVE.
-    :type priority: string
+    :type priority: str
     :param time_partitioning: configure optional time partitioning fields i.e.
         partition by field, type and
         expiration as per API specifications. Note that 'field' is not available in
@@ -213,11 +213,11 @@ class BigQueryCreateEmptyTableOperator(BaseOperator):
     You can also create a table without schema.
 
     :param project_id: The project to create the table into. (templated)
-    :type project_id: string
+    :type project_id: str
     :param dataset_id: The dataset to create the table into. (templated)
-    :type dataset_id: string
+    :type dataset_id: str
     :param table_id: The Name of the table to be created. (templated)
-    :type table_id: string
+    :type table_id: str
     :param schema_fields: If set, the schema field list as defined here:
         https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
 
@@ -230,7 +230,7 @@ class BigQueryCreateEmptyTableOperator(BaseOperator):
     :param gcs_schema_object: Full path to the JSON file containing
         schema (templated). For
         example: ``gs://test-bucket/dir1/dir2/employee_schema.json``
-    :type gcs_schema_object: string
+    :type gcs_schema_object: str
     :param time_partitioning: configure optional time partitioning fields i.e.
         partition by field, type and  expiration as per API specifications.
 
@@ -238,14 +238,14 @@ class BigQueryCreateEmptyTableOperator(BaseOperator):
             https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
     :type time_partitioning: dict
     :param bigquery_conn_id: Reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param google_cloud_storage_conn_id: Reference to a specific Google
         cloud storage hook.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any. For this to
         work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param labels: a dictionary containing labels for the table, passed to BigQuery
 
         **Example (with schema JSON in GCS)**: ::
@@ -362,7 +362,7 @@ class BigQueryCreateExternalTableOperator(BaseOperator):
     Google cloud storage must be a JSON file with the schema fields in it.
 
     :param bucket: The bucket to point the external table to. (templated)
-    :type bucket: string
+    :type bucket: str
     :param source_objects: List of Google cloud storage URIs to point
         table to. (templated)
         If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
@@ -370,7 +370,7 @@ class BigQueryCreateExternalTableOperator(BaseOperator):
     :param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table>
         BigQuery table to load data into (templated). If <project> is not included,
         project will be the project defined in the connection json.
-    :type destination_project_dataset_table: string
+    :type destination_project_dataset_table: str
     :param schema_fields: If set, the schema field list as defined here:
         https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
 
@@ -383,26 +383,26 @@ class BigQueryCreateExternalTableOperator(BaseOperator):
     :type schema_fields: list
     :param schema_object: If set, a GCS object path pointing to a .json file that
         contains the schema for the table. (templated)
-    :type schema_object: string
+    :type schema_object: str
     :param source_format: File format of the data.
-    :type source_format: string
+    :type source_format: str
     :param compression: [Optional] The compression type of the data source.
         Possible values include GZIP and NONE.
         The default value is NONE.
         This setting is ignored for Google Cloud Bigtable,
         Google Cloud Datastore backups and Avro formats.
-    :type compression: string
+    :type compression: str
     :param skip_leading_rows: Number of rows to skip when loading from a CSV.
     :type skip_leading_rows: int
     :param field_delimiter: The delimiter to use for the CSV.
-    :type field_delimiter: string
+    :type field_delimiter: str
     :param max_bad_records: The maximum number of bad records that BigQuery can
         ignore when running the job.
     :type max_bad_records: int
     :param quote_character: The value that is used to quote data sections in a CSV file.
-    :type quote_character: string
+    :type quote_character: str
     :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false).
-    :type allow_quoted_newlines: boolean
+    :type allow_quoted_newlines: bool
     :param allow_jagged_rows: Accept rows that are missing trailing optional columns.
         The missing values are treated as nulls. If false, records with missing trailing
         columns are treated as bad records, and if there are too many bad records, an
@@ -410,14 +410,14 @@ class BigQueryCreateExternalTableOperator(BaseOperator):
         for other formats.
     :type allow_jagged_rows: bool
     :param bigquery_conn_id: Reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param google_cloud_storage_conn_id: Reference to a specific Google
         cloud storage hook.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any. For this to
         work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param src_fmt_configs: configure optional fields specific to the source format
     :type src_fmt_configs: dict
     :param labels a dictionary containing labels for the table, passed to BigQuery
@@ -517,9 +517,9 @@ class BigQueryDeleteDatasetOperator(BaseOperator):
     This operator deletes an existing dataset from your Project in Big query.
     https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/delete
     :param project_id: The project id of the dataset.
-    :type project_id: string
+    :type project_id: str
     :param dataset_id: The dataset to be deleted.
-    :type dataset_id: string
+    :type dataset_id: str
 
     **Example**: ::
 
diff --git a/airflow/contrib/operators/bigquery_table_delete_operator.py b/airflow/contrib/operators/bigquery_table_delete_operator.py
index a16107d8c4..45c481454e 100644
--- a/airflow/contrib/operators/bigquery_table_delete_operator.py
+++ b/airflow/contrib/operators/bigquery_table_delete_operator.py
@@ -29,16 +29,16 @@ class BigQueryTableDeleteOperator(BaseOperator):
     :param deletion_dataset_table: A dotted
         (<project>.|<project>:)<dataset>.<table> that indicates which table
         will be deleted. (templated)
-    :type deletion_dataset_table: string
+    :type deletion_dataset_table: str
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param ignore_if_missing: if True, then return success even if the
         requested table does not exist.
-    :type ignore_if_missing: boolean
+    :type ignore_if_missing: bool
     """
     template_fields = ('deletion_dataset_table',)
     ui_color = '#ffd1dc'
diff --git a/airflow/contrib/operators/bigquery_to_bigquery.py b/airflow/contrib/operators/bigquery_to_bigquery.py
index 3a0b44db60..2073cadef1 100644
--- a/airflow/contrib/operators/bigquery_to_bigquery.py
+++ b/airflow/contrib/operators/bigquery_to_bigquery.py
@@ -38,17 +38,17 @@ class BigQueryToBigQueryOperator(BaseOperator):
     :type source_project_dataset_tables: list|string
     :param destination_project_dataset_table: The destination BigQuery
         table. Format is: (project:|project.)<dataset>.<table> (templated)
-    :type destination_project_dataset_table: string
+    :type destination_project_dataset_table: str
     :param write_disposition: The write disposition if the table already exists.
-    :type write_disposition: string
+    :type write_disposition: str
     :param create_disposition: The create disposition if the table doesn't exist.
-    :type create_disposition: string
+    :type create_disposition: str
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param labels: a dictionary containing labels for the job/query,
         passed to BigQuery
     :type labels: dict
diff --git a/airflow/contrib/operators/bigquery_to_gcs.py b/airflow/contrib/operators/bigquery_to_gcs.py
index 7cefd7e815..ec6b937d21 100644
--- a/airflow/contrib/operators/bigquery_to_gcs.py
+++ b/airflow/contrib/operators/bigquery_to_gcs.py
@@ -34,26 +34,26 @@ class BigQueryToCloudStorageOperator(BaseOperator):
         ``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to use as the
         source data. If <project> is not included, project will be the project
         defined in the connection json. (templated)
-    :type source_project_dataset_table: string
+    :type source_project_dataset_table: str
     :param destination_cloud_storage_uris: The destination Google Cloud
         Storage URI (e.g. gs://some-bucket/some-file.txt). (templated) Follows
         convention defined here:
         https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
     :type destination_cloud_storage_uris: list
     :param compression: Type of compression to use.
-    :type compression: string
+    :type compression: str
     :param export_format: File format to export.
-    :type export_format: string
+    :type export_format: str
     :param field_delimiter: The delimiter to use when extracting to a CSV.
-    :type field_delimiter: string
+    :type field_delimiter: str
     :param print_header: Whether to print a header for a CSV file extract.
-    :type print_header: boolean
+    :type print_header: bool
     :param bigquery_conn_id: reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param labels: a dictionary containing labels for the job/query,
         passed to BigQuery
     :type labels: dict
diff --git a/airflow/contrib/operators/cassandra_to_gcs.py b/airflow/contrib/operators/cassandra_to_gcs.py
index 211444b96d..95107a497f 100644
--- a/airflow/contrib/operators/cassandra_to_gcs.py
+++ b/airflow/contrib/operators/cassandra_to_gcs.py
@@ -60,18 +60,18 @@ def __init__(self,
                  **kwargs):
         """
         :param cql: The CQL to execute on the Cassandra table.
-        :type cql: string
+        :type cql: str
         :param bucket: The bucket to upload to.
-        :type bucket: string
+        :type bucket: str
         :param filename: The filename to use as the object name when uploading
             to Google cloud storage. A {} should be specified in the filename
             to allow the operator to inject file numbers in cases where the
             file is split due to size.
-        :type filename: string
+        :type filename: str
         :param schema_filename: If set, the filename to use as the object name
             when uploading a .json file containing the BigQuery schema fields
             for the table that was dumped from MySQL.
-        :type schema_filename: string
+        :type schema_filename: str
         :param approx_max_file_size_bytes: This operator supports the ability
             to split large table dumps into multiple files (see notes in the
             filenamed param docs above). Google cloud storage allows for files
@@ -79,14 +79,14 @@ def __init__(self,
             file size of the splits.
         :type approx_max_file_size_bytes: long
         :param cassandra_conn_id: Reference to a specific Cassandra hook.
-        :type cassandra_conn_id: string
+        :type cassandra_conn_id: str
         :param google_cloud_storage_conn_id: Reference to a specific Google
             cloud storage hook.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any. For this to
             work, the service account making the request must have domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(CassandraToGoogleCloudStorageOperator, self).__init__(*args, **kwargs)
         self.cql = cql
diff --git a/airflow/contrib/operators/databricks_operator.py b/airflow/contrib/operators/databricks_operator.py
index 836458c76f..3ebc729f78 100644
--- a/airflow/contrib/operators/databricks_operator.py
+++ b/airflow/contrib/operators/databricks_operator.py
@@ -179,7 +179,7 @@ class DatabricksSubmitRunOperator(BaseOperator):
     :param existing_cluster_id: ID for existing cluster on which to run this task.
         *EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified.
         This field will be templated.
-    :type existing_cluster_id: string
+    :type existing_cluster_id: str
     :param libraries: Libraries which this run will use.
         This field will be templated.
 
@@ -190,7 +190,7 @@ class DatabricksSubmitRunOperator(BaseOperator):
         By default this will be set to the Airflow ``task_id``. This ``task_id`` is a
         required parameter of the superclass ``BaseOperator``.
         This field will be templated.
-    :type run_name: string
+    :type run_name: str
     :param timeout_seconds: The timeout for this run. By default a value of 0 is used
         which means to have no timeout.
         This field will be templated.
@@ -199,7 +199,7 @@ class DatabricksSubmitRunOperator(BaseOperator):
         By default and in the common case this will be ``databricks_default``. To use
         token based authentication, provide the key ``token`` in the extra field for the
         connection.
-    :type databricks_conn_id: string
+    :type databricks_conn_id: str
     :param polling_period_seconds: Controls the rate which we poll for the result of
         this run. By default the operator will poll every 30 seconds.
     :type polling_period_seconds: int
@@ -210,7 +210,7 @@ class DatabricksSubmitRunOperator(BaseOperator):
             might be a floating point number).
     :type databricks_retry_delay: float
     :param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
-    :type do_xcom_push: boolean
+    :type do_xcom_push: bool
     """
     # Used in airflow.models.BaseOperator
     template_fields = ('json',)
@@ -347,7 +347,7 @@ class DatabricksRunNowOperator(BaseOperator):
         This field will be templated.
         .. seealso::
             https://docs.databricks.com/api/latest/jobs.html#run-now
-    :type job_id: string
+    :type job_id: str
     :param json: A JSON object containing API parameters which will be passed
         directly to the ``api/2.0/jobs/run-now`` endpoint. The other named parameters
         (i.e. ``notebook_params``, ``spark_submit_params``..) to this operator will
@@ -403,7 +403,7 @@ class DatabricksRunNowOperator(BaseOperator):
         By default and in the common case this will be ``databricks_default``. To use
         token based authentication, provide the key ``token`` in the extra field for the
         connection.
-    :type databricks_conn_id: string
+    :type databricks_conn_id: str
     :param polling_period_seconds: Controls the rate which we poll for the result of
         this run. By default the operator will poll every 30 seconds.
     :type polling_period_seconds: int
@@ -411,7 +411,7 @@ class DatabricksRunNowOperator(BaseOperator):
         unreachable. Its value must be greater than or equal to 1.
     :type databricks_retry_limit: int
     :param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
-    :type do_xcom_push: boolean
+    :type do_xcom_push: bool
     """
     # Used in airflow.models.BaseOperator
     template_fields = ('json',)
diff --git a/airflow/contrib/operators/dataflow_operator.py b/airflow/contrib/operators/dataflow_operator.py
index 9d472e810d..1a0d447ef5 100644
--- a/airflow/contrib/operators/dataflow_operator.py
+++ b/airflow/contrib/operators/dataflow_operator.py
@@ -38,25 +38,25 @@ class DataFlowJavaOperator(BaseOperator):
         https://cloud.google.com/dataflow/pipelines/specifying-exec-params
 
     :param jar: The reference to a self executing DataFlow jar.
-    :type jar: string
+    :type jar: str
     :param dataflow_default_options: Map of default job options.
     :type dataflow_default_options: dict
     :param options: Map of job specific options.
     :type options: dict
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud
         Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param poll_sleep: The time in seconds to sleep between polling Google
         Cloud Platform for the dataflow job status while the job is in the
         JOB_STATE_RUNNING state.
     :type poll_sleep: int
     :param job_class: The name of the dataflow job class to be executued, it
         is often not the main class configured in the dataflow jar file.
-    :type job_class: string
+    :type job_class: str
 
     Both ``jar`` and ``options`` are templated so you can use variables in them.
 
@@ -150,18 +150,18 @@ class DataflowTemplateOperator(BaseOperator):
     will be passed to the job.
 
     :param template: The reference to the DataFlow template.
-    :type template: string
+    :type template: str
     :param dataflow_default_options: Map of default job environment options.
     :type dataflow_default_options: dict
     :param parameters: Map of job specific parameters for the template.
     :type parameters: dict
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud
         Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param poll_sleep: The time in seconds to sleep between polling Google
         Cloud Platform for the dataflow job status while the job is in the
         JOB_STATE_RUNNING state.
@@ -263,7 +263,7 @@ class DataFlowPythonOperator(BaseOperator):
 
     :param py_file: Reference to the python dataflow pipleline file.py, e.g.,
         /some/local/file/path/to/your/python/pipeline/file.
-    :type py_file: string
+    :type py_file: str
     :param py_options: Additional python options.
     :type pyt_options: list of strings, e.g., ["-m", "-v"].
     :param dataflow_default_options: Map of default job options.
@@ -272,11 +272,11 @@ class DataFlowPythonOperator(BaseOperator):
     :type options: dict
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud
         Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide  delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param poll_sleep: The time in seconds to sleep between polling Google
         Cloud Platform for the dataflow job status while the job is in the
         JOB_STATE_RUNNING state.
@@ -346,9 +346,9 @@ def google_cloud_to_local(self, file_name):
         will be returned immediately.
 
         :param file_name: The full path of input file.
-        :type file_name: string
+        :type file_name: str
         :return: The full path of local file.
-        :type: string
+        :type: str
         """
         if not file_name.startswith('gs://'):
             return file_name
diff --git a/airflow/contrib/operators/dataproc_operator.py b/airflow/contrib/operators/dataproc_operator.py
index 5ce2ff1964..ad74c36b37 100644
--- a/airflow/contrib/operators/dataproc_operator.py
+++ b/airflow/contrib/operators/dataproc_operator.py
@@ -48,46 +48,46 @@ class DataprocClusterCreateOperator(BaseOperator):
     parameters detailed in the link are available as a parameter to this operator.
 
     :param cluster_name: The name of the DataProc cluster to create. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param project_id: The ID of the google cloud project in which
         to create the cluster. (templated)
-    :type project_id: string
+    :type project_id: str
     :param num_workers: The # of workers to spin up
     :type num_workers: int
     :param storage_bucket: The storage bucket to use, setting to None lets dataproc
         generate a custom one for you
-    :type storage_bucket: string
+    :type storage_bucket: str
     :param init_actions_uris: List of GCS uri's containing
         dataproc initialization scripts
     :type init_actions_uris: list[string]
     :param init_action_timeout: Amount of time executable scripts in
         init_actions_uris has to complete
-    :type init_action_timeout: string
+    :type init_action_timeout: str
     :param metadata: dict of key-value google compute engine metadata entries
         to add to all instances
     :type metadata: dict
     :param image_version: the version of software inside the Dataproc cluster
-    :type image_version: string
+    :type image_version: str
     :param properties: dict of properties to set on
         config files (e.g. spark-defaults.conf), see
         https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.clusters#SoftwareConfig
     :type properties: dict
     :param master_machine_type: Compute engine machine type to use for the master node
-    :type master_machine_type: string
+    :type master_machine_type: str
     :param master_disk_type: Type of the boot disk for the master node
         (default is ``pd-standard``).
         Valid values: ``pd-ssd`` (Persistent Disk Solid State Drive) or
         ``pd-standard`` (Persistent Disk Hard Disk Drive).
-    :type master_disk_type: string
+    :type master_disk_type: str
     :param master_disk_size: Disk size for the master node
     :type master_disk_size: int
     :param worker_machine_type: Compute engine machine type to use for the worker nodes
-    :type worker_machine_type: string
+    :type worker_machine_type: str
     :param worker_disk_type: Type of the boot disk for the worker node
         (default is ``pd-standard``).
         Valid values: ``pd-ssd`` (Persistent Disk Solid State Drive) or
         ``pd-standard`` (Persistent Disk Hard Disk Drive).
-    :type worker_disk_type: string
+    :type worker_disk_type: str
     :param worker_disk_size: Disk size for the worker nodes
     :type worker_disk_size: int
     :param num_preemptible_workers: The # of preemptible worker nodes to spin up
@@ -95,13 +95,13 @@ class DataprocClusterCreateOperator(BaseOperator):
     :param labels: dict of labels to add to the cluster
     :type labels: dict
     :param zone: The zone where the cluster will be located. (templated)
-    :type zone: string
+    :type zone: str
     :param network_uri: The network uri to be used for machine communication, cannot be
         specified with subnetwork_uri
-    :type network_uri: string
+    :type network_uri: str
     :param subnetwork_uri: The subnetwork uri to be used for machine communication,
         cannot be specified with network_uri
-    :type subnetwork_uri: string
+    :type subnetwork_uri: str
     :param internal_ip_only: If true, all instances in the cluster will only
         have internal IP addresses. This can only be enabled for subnetwork
         enabled networks
@@ -109,14 +109,15 @@ class DataprocClusterCreateOperator(BaseOperator):
     :param tags: The GCE tags to add to all instances
     :type tags: list[string]
     :param region: leave as 'global', might become relevant in the future. (templated)
+    :type region: str
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param service_account: The service account of the dataproc instances.
-    :type service_account: string
+    :type service_account: str
     :param service_account_scopes: The URIs of service account scopes to be included.
     :type service_account_scopes: list[string]
     :param idle_delete_ttl: The longest duration that cluster would keep alive while
@@ -425,25 +426,25 @@ class DataprocClusterScaleOperator(BaseOperator):
         https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters
 
     :param cluster_name: The name of the cluster to scale. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param project_id: The ID of the google cloud project in which
         the cluster runs. (templated)
-    :type project_id: string
+    :type project_id: str
     :param region: The region for the dataproc cluster. (templated)
-    :type region: string
+    :type region: str
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param num_workers: The new number of workers
     :type num_workers: int
     :param num_preemptible_workers: The new number of preemptible workers
     :type num_preemptible_workers: int
     :param graceful_decommission_timeout: Timeout for graceful YARN decomissioning.
         Maximum value is 1d
-    :type graceful_decommission_timeout: string
+    :type graceful_decommission_timeout: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = ['cluster_name', 'project_id', 'region']
@@ -559,18 +560,18 @@ class DataprocClusterDeleteOperator(BaseOperator):
     cluster is destroyed.
 
     :param cluster_name: The name of the cluster to create. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param project_id: The ID of the google cloud project in which
         the cluster runs. (templated)
-    :type project_id: string
+    :type project_id: str
     :param region: leave as 'global', might become relevant in the future. (templated)
-    :type region: string
+    :type region: str
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = ['cluster_name', 'project_id', 'region']
@@ -661,18 +662,18 @@ class DataProcPigOperator(BaseOperator):
 
     :param query: The query or reference to the query
         file (pg or pig extension). (templated)
-    :type query: string
+    :type query: str
     :param query_uri: The uri of a pig script on Cloud Storage.
-    :type query_uri: string
+    :type query_uri: str
     :param variables: Map of named parameters for the query. (templated)
     :type variables: dict
     :param job_name: The job name used in the DataProc cluster. This
         name by default is the task_id appended with the execution data, but can
         be templated. The name will always be appended with a random number to
         avoid name clashes. (templated)
-    :type job_name: string
+    :type job_name: str
     :param cluster_name: The name of the DataProc cluster. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param dataproc_pig_properties: Map for the Pig properties. Ideal to put in
         default arguments
     :type dataproc_pig_properties: dict
@@ -680,18 +681,18 @@ class DataProcPigOperator(BaseOperator):
         UDFs and libs) and are ideal to put in default arguments.
     :type dataproc_pig_jars: list
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param region: The specified region where the dataproc cluster is created.
-    :type region: string
+    :type region: str
     :var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
         This is useful for identifying or linking to the job in the Google Cloud Console
         Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
         an 8 character random string.
-    :vartype dataproc_job_id: string
+    :vartype dataproc_job_id: str
     """
     template_fields = ['query', 'variables', 'job_name', 'cluster_name', 'dataproc_jars']
     template_ext = ('.pg', '.pig',)
@@ -750,17 +751,17 @@ class DataProcHiveOperator(BaseOperator):
     Start a Hive query Job on a Cloud DataProc cluster.
 
     :param query: The query or reference to the query file (q extension).
-    :type query: string
+    :type query: str
     :param query_uri: The uri of a hive script on Cloud Storage.
-    :type query_uri: string
+    :type query_uri: str
     :param variables: Map of named parameters for the query.
     :type variables: dict
     :param job_name: The job name used in the DataProc cluster. This name by default
         is the task_id appended with the execution data, but can be templated. The
         name will always be appended with a random number to avoid name clashes.
-    :type job_name: string
+    :type job_name: str
     :param cluster_name: The name of the DataProc cluster.
-    :type cluster_name: string
+    :type cluster_name: str
     :param dataproc_hive_properties: Map for the Pig properties. Ideal to put in
         default arguments
     :type dataproc_hive_properties: dict
@@ -768,18 +769,18 @@ class DataProcHiveOperator(BaseOperator):
         UDFs and libs) and are ideal to put in default arguments.
     :type dataproc_hive_jars: list
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param region: The specified region where the dataproc cluster is created.
-    :type region: string
+    :type region: str
     :var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
         This is useful for identifying or linking to the job in the Google Cloud Console
         Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
         an 8 character random string.
-    :vartype dataproc_job_id: string
+    :vartype dataproc_job_id: str
     """
     template_fields = ['query', 'variables', 'job_name', 'cluster_name', 'dataproc_jars']
     template_ext = ('.q',)
@@ -839,18 +840,18 @@ class DataProcSparkSqlOperator(BaseOperator):
     Start a Spark SQL query Job on a Cloud DataProc cluster.
 
     :param query: The query or reference to the query file (q extension). (templated)
-    :type query: string
+    :type query: str
     :param query_uri: The uri of a spark sql script on Cloud Storage.
-    :type query_uri: string
+    :type query_uri: str
     :param variables: Map of named parameters for the query. (templated)
     :type variables: dict
     :param job_name: The job name used in the DataProc cluster. This
         name by default is the task_id appended with the execution data, but can
         be templated. The name will always be appended with a random number to
         avoid name clashes. (templated)
-    :type job_name: string
+    :type job_name: str
     :param cluster_name: The name of the DataProc cluster. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param dataproc_spark_properties: Map for the Pig properties. Ideal to put in
         default arguments
     :type dataproc_spark_properties: dict
@@ -858,18 +859,18 @@ class DataProcSparkSqlOperator(BaseOperator):
         for UDFs and libs) and are ideal to put in default arguments.
     :type dataproc_spark_jars: list
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param region: The specified region where the dataproc cluster is created.
-    :type region: string
+    :type region: str
     :var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
         This is useful for identifying or linking to the job in the Google Cloud Console
         Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
         an 8 character random string.
-    :vartype dataproc_job_id: string
+    :vartype dataproc_job_id: str
     """
     template_fields = ['query', 'variables', 'job_name', 'cluster_name', 'dataproc_jars']
     template_ext = ('.q',)
@@ -930,10 +931,10 @@ class DataProcSparkOperator(BaseOperator):
 
     :param main_jar: URI of the job jar provisioned on Cloud Storage. (use this or
             the main_class, not both together).
-    :type main_jar: string
+    :type main_jar: str
     :param main_class: Name of the job class. (use this or the main_jar, not both
         together).
-    :type main_class: string
+    :type main_class: str
     :param arguments: Arguments for the job. (templated)
     :type arguments: list
     :param archives: List of archived files that will be unpacked in the work
@@ -945,9 +946,9 @@ class DataProcSparkOperator(BaseOperator):
         name by default is the task_id appended with the execution data, but can
         be templated. The name will always be appended with a random number to
         avoid name clashes. (templated)
-    :type job_name: string
+    :type job_name: str
     :param cluster_name: The name of the DataProc cluster. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param dataproc_spark_properties: Map for the Pig properties. Ideal to put in
         default arguments
     :type dataproc_spark_properties: dict
@@ -955,18 +956,18 @@ class DataProcSparkOperator(BaseOperator):
         for UDFs and libs) and are ideal to put in default arguments.
     :type dataproc_spark_jars: list
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param region: The specified region where the dataproc cluster is created.
-    :type region: string
+    :type region: str
     :var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
         This is useful for identifying or linking to the job in the Google Cloud Console
         Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
         an 8 character random string.
-    :vartype dataproc_job_id: string
+    :vartype dataproc_job_id: str
     """
 
     template_fields = ['arguments', 'job_name', 'cluster_name', 'dataproc_jars']
@@ -1029,10 +1030,10 @@ class DataProcHadoopOperator(BaseOperator):
 
     :param main_jar: URI of the job jar provisioned on Cloud Storage. (use this or
             the main_class, not both together).
-    :type main_jar: string
+    :type main_jar: str
     :param main_class: Name of the job class. (use this or the main_jar, not both
         together).
-    :type main_class: string
+    :type main_class: str
     :param arguments: Arguments for the job. (templated)
     :type arguments: list
     :param archives: List of archived files that will be unpacked in the work
@@ -1044,9 +1045,9 @@ class DataProcHadoopOperator(BaseOperator):
         name by default is the task_id appended with the execution data, but can
         be templated. The name will always be appended with a random number to
         avoid name clashes. (templated)
-    :type job_name: string
+    :type job_name: str
     :param cluster_name: The name of the DataProc cluster. (templated)
-    :type cluster_name: string
+    :type cluster_name: str
     :param dataproc_hadoop_properties: Map for the Pig properties. Ideal to put in
         default arguments
     :type dataproc_hadoop_properties: dict
@@ -1054,18 +1055,18 @@ class DataProcHadoopOperator(BaseOperator):
         for UDFs and libs) and are ideal to put in default arguments.
     :type dataproc_hadoop_jars: list
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param region: The specified region where the dataproc cluster is created.
-    :type region: string
+    :type region: str
     :var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
         This is useful for identifying or linking to the job in the Google Cloud Console
         Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
         an 8 character random string.
-    :vartype dataproc_job_id: string
+    :vartype dataproc_job_id: str
     """
 
     template_fields = ['arguments', 'job_name', 'cluster_name', 'dataproc_jars']
@@ -1129,7 +1130,7 @@ class DataProcPySparkOperator(BaseOperator):
 
     :param main: [Required] The Hadoop Compatible Filesystem (HCFS) URI of the main
             Python file to use as the driver. Must be a .py file.
-    :type main: string
+    :type main: str
     :param arguments: Arguments for the job. (templated)
     :type arguments: list
     :param archives: List of archived files that will be unpacked in the work
@@ -1144,9 +1145,9 @@ class DataProcPySparkOperator(BaseOperator):
         name by default is the task_id appended with the execution data, but can
         be templated. The name will always be appended with a random number to
         avoid name clashes. (templated)
-    :type job_name: string
+    :type job_name: str
     :param cluster_name: The name of the DataProc cluster.
-    :type cluster_name: string
+    :type cluster_name: str
     :param dataproc_pyspark_properties: Map for the Pig properties. Ideal to put in
         default arguments
     :type dataproc_pyspark_properties: dict
@@ -1154,18 +1155,18 @@ class DataProcPySparkOperator(BaseOperator):
         for UDFs and libs) and are ideal to put in default arguments.
     :type dataproc_pyspark_jars: list
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param region: The specified region where the dataproc cluster is created.
-    :type region: string
+    :type region: str
     :var dataproc_job_id: The actual "jobId" as submitted to the Dataproc API.
         This is useful for identifying or linking to the job in the Google Cloud Console
         Dataproc UI, as the actual "jobId" submitted to the Dataproc API is appended with
         an 8 character random string.
-    :vartype dataproc_job_id: string
+    :vartype dataproc_job_id: str
     """
 
     template_fields = ['arguments', 'job_name', 'cluster_name', 'dataproc_jars']
@@ -1299,18 +1300,18 @@ class DataprocWorkflowTemplateInstantiateOperator(DataprocWorkflowTemplateBaseOp
         https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.workflowTemplates/instantiate
 
     :param template_id: The id of the template. (templated)
-    :type template_id: string
+    :type template_id: str
     :param project_id: The ID of the google cloud project in which
         the template runs
-    :type project_id: string
+    :type project_id: str
     :param region: leave as 'global', might become relevant in the future
-    :type region: string
+    :type region: str
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = ['template_id']
@@ -1346,15 +1347,15 @@ class DataprocWorkflowTemplateInstantiateInlineOperator(
     :type template: map
     :param project_id: The ID of the google cloud project in which
         the template runs
-    :type project_id: string
+    :type project_id: str
     :param region: leave as 'global', might become relevant in the future
-    :type region: string
+    :type region: str
     :param gcp_conn_id: The connection ID to use connecting to Google Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = ['template']
diff --git a/airflow/contrib/operators/datastore_export_operator.py b/airflow/contrib/operators/datastore_export_operator.py
index f6dc7cc571..9d95eadc74 100644
--- a/airflow/contrib/operators/datastore_export_operator.py
+++ b/airflow/contrib/operators/datastore_export_operator.py
@@ -29,19 +29,19 @@ class DatastoreExportOperator(BaseOperator):
     Export entities from Google Cloud Datastore to Cloud Storage
 
     :param bucket: name of the cloud storage bucket to backup data
-    :type bucket: string
+    :type bucket: str
     :param namespace: optional namespace path in the specified Cloud Storage bucket
         to backup data. If this namespace does not exist in GCS, it will be created.
     :type namespace: str
     :param datastore_conn_id: the name of the Datastore connection id to use
-    :type datastore_conn_id: string
+    :type datastore_conn_id: str
     :param cloud_storage_conn_id: the name of the cloud storage connection id to
         force-write backup
-    :type cloud_storage_conn_id: string
+    :type cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param entity_filter: description of what data from the project is included in the
         export, refer to
         https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
diff --git a/airflow/contrib/operators/datastore_import_operator.py b/airflow/contrib/operators/datastore_import_operator.py
index 401d36e05b..c79767f35e 100644
--- a/airflow/contrib/operators/datastore_import_operator.py
+++ b/airflow/contrib/operators/datastore_import_operator.py
@@ -28,10 +28,10 @@ class DatastoreImportOperator(BaseOperator):
     Import entities from Cloud Storage to Google Cloud Datastore
 
     :param bucket: container in Cloud Storage to store data
-    :type bucket: string
+    :type bucket: str
     :param file: path of the backup metadata file in the specified Cloud Storage bucket.
         It should have the extension .overall_export_metadata
-    :type file: string
+    :type file: str
     :param namespace: optional namespace of the backup metadata file in
         the specified Cloud Storage bucket.
     :type namespace: str
@@ -42,11 +42,11 @@ class DatastoreImportOperator(BaseOperator):
     :param labels: client-assigned labels for cloud storage
     :type labels: dict
     :param datastore_conn_id: the name of the connection id to use
-    :type datastore_conn_id: string
+    :type datastore_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param polling_interval_in_seconds: number of seconds to wait before polling for
         execution status again
     :type polling_interval_in_seconds: int
diff --git a/airflow/contrib/operators/file_to_gcs.py b/airflow/contrib/operators/file_to_gcs.py
index 807385b43c..a392a16891 100644
--- a/airflow/contrib/operators/file_to_gcs.py
+++ b/airflow/contrib/operators/file_to_gcs.py
@@ -28,17 +28,17 @@ class FileToGoogleCloudStorageOperator(BaseOperator):
     Uploads a file to Google Cloud Storage
 
     :param src: Path to the local file. (templated)
-    :type src: string
+    :type src: str
     :param dst: Destination path within the specified bucket. (templated)
-    :type dst: string
+    :type dst: str
     :param bucket: The bucket to upload to. (templated)
-    :type bucket: string
+    :type bucket: str
     :param google_cloud_storage_conn_id: The Airflow connection ID to upload with
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param mime_type: The mime-type string
-    :type mime_type: string
+    :type mime_type: str
     :param delegate_to: The account to impersonate, if any
-    :type delegate_to: string
+    :type delegate_to: str
     """
     template_fields = ('src', 'dst', 'bucket')
 
diff --git a/airflow/contrib/operators/gcs_download_operator.py b/airflow/contrib/operators/gcs_download_operator.py
index ce272aedd5..1d168d4660 100644
--- a/airflow/contrib/operators/gcs_download_operator.py
+++ b/airflow/contrib/operators/gcs_download_operator.py
@@ -29,26 +29,26 @@ class GoogleCloudStorageDownloadOperator(BaseOperator):
     Downloads a file from Google Cloud Storage.
 
     :param bucket: The Google cloud storage bucket where the object is. (templated)
-    :type bucket: string
+    :type bucket: str
     :param object: The name of the object to download in the Google cloud
         storage bucket. (templated)
-    :type object: string
+    :type object: str
     :param filename: The file path on the local file system (where the
         operator is being executed) that the file should be downloaded to. (templated)
         If no filename passed, the downloaded data will not be stored on the local file
         system.
-    :type filename: string
+    :type filename: str
     :param store_to_xcom_key: If this param is set, the operator will push
         the contents of the downloaded file to XCom with the key set in this
         parameter. If not set, the downloaded data will not be pushed to XCom. (templated)
-    :type store_to_xcom_key: string
+    :type store_to_xcom_key: str
     :param google_cloud_storage_conn_id: The connection ID to use when
         connecting to Google cloud storage.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
     template_fields = ('bucket', 'object', 'filename', 'store_to_xcom_key',)
     ui_color = '#f0eee4'
diff --git a/airflow/contrib/operators/gcs_list_operator.py b/airflow/contrib/operators/gcs_list_operator.py
index 6474453afa..7b37b269a6 100644
--- a/airflow/contrib/operators/gcs_list_operator.py
+++ b/airflow/contrib/operators/gcs_list_operator.py
@@ -30,21 +30,21 @@ class GoogleCloudStorageListOperator(BaseOperator):
      `xcom` in the downstream task.
 
     :param bucket: The Google cloud storage bucket to find the objects. (templated)
-    :type bucket: string
+    :type bucket: str
     :param prefix: Prefix string which filters objects whose name begin with
            this prefix. (templated)
-    :type prefix: string
+    :type prefix: str
     :param delimiter: The delimiter by which you want to filter the objects. (templated)
         For e.g to lists the CSV files from in a directory in GCS you would use
         delimiter='.csv'.
-    :type delimiter: string
+    :type delimiter: str
     :param google_cloud_storage_conn_id: The connection ID to use when
         connecting to Google cloud storage.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
 
     **Example**:
         The following Operator would list all the Avro files from ``sales/sales-2017``
diff --git a/airflow/contrib/operators/gcs_operator.py b/airflow/contrib/operators/gcs_operator.py
index ef5e8de504..c685722185 100644
--- a/airflow/contrib/operators/gcs_operator.py
+++ b/airflow/contrib/operators/gcs_operator.py
@@ -33,7 +33,7 @@ class GoogleCloudStorageCreateBucketOperator(BaseOperator):
             https://cloud.google.com/storage/docs/bucketnaming.html#requirements
 
     :param bucket_name: The name of the bucket. (templated)
-    :type bucket_name: string
+    :type bucket_name: str
     :param storage_class: This defines how objects in the bucket are stored
             and determines the SLA and the cost of storage (templated). Values include
 
@@ -44,7 +44,7 @@ class GoogleCloudStorageCreateBucketOperator(BaseOperator):
             - ``COLDLINE``.
             If this value is not specified when the bucket is
             created, it will default to STANDARD.
-    :type storage_class: string
+    :type storage_class: str
     :param location: The location of the bucket. (templated)
         Object data for objects in the bucket resides in physical storage
         within this region. Defaults to US.
@@ -52,18 +52,18 @@ class GoogleCloudStorageCreateBucketOperator(BaseOperator):
         .. seealso::
             https://developers.google.com/storage/docs/bucket-locations
 
-    :type location: string
+    :type location: str
     :param project_id: The ID of the GCP Project. (templated)
-    :type project_id: string
+    :type project_id: str
     :param labels: User-provided labels, in key/value pairs.
     :type labels: dict
     :param google_cloud_storage_conn_id: The connection ID to use when
         connecting to Google cloud storage.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must
         have domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
 
     **Example**:
         The following Operator would create a new bucket ``test-bucket``
diff --git a/airflow/contrib/operators/gcs_to_bq.py b/airflow/contrib/operators/gcs_to_bq.py
index 40b6d2381f..f7f5378f40 100644
--- a/airflow/contrib/operators/gcs_to_bq.py
+++ b/airflow/contrib/operators/gcs_to_bq.py
@@ -35,42 +35,42 @@ class GoogleCloudStorageToBigQueryOperator(BaseOperator):
     Google cloud storage must be a JSON file with the schema fields in it.
 
     :param bucket: The bucket to load from. (templated)
-    :type bucket: string
+    :type bucket: str
     :param source_objects: List of Google cloud storage URIs to load from. (templated)
         If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI.
     :type source_objects: list of str
     :param destination_project_dataset_table: The dotted (<project>.)<dataset>.<table>
         BigQuery table to load data into. If <project> is not included,
         project will be the project defined in the connection json. (templated)
-    :type destination_project_dataset_table: string
+    :type destination_project_dataset_table: str
     :param schema_fields: If set, the schema field list as defined here:
         https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
         Should not be set when source_format is 'DATASTORE_BACKUP'.
     :type schema_fields: list
     :param schema_object: If set, a GCS object path pointing to a .json file that
         contains the schema for the table. (templated)
-    :type schema_object: string
+    :type schema_object: str
     :param source_format: File format to export.
-    :type source_format: string
+    :type source_format: str
     :param compression: [Optional] The compression type of the data source.
         Possible values include GZIP and NONE.
         The default value is NONE.
         This setting is ignored for Google Cloud Bigtable,
         Google Cloud Datastore backups and Avro formats.
-    :type compression: string
+    :type compression: str
     :param create_disposition: The create disposition if the table doesn't exist.
-    :type create_disposition: string
+    :type create_disposition: str
     :param skip_leading_rows: Number of rows to skip when loading from a CSV.
     :type skip_leading_rows: int
     :param write_disposition: The write disposition if the table already exists.
-    :type write_disposition: string
+    :type write_disposition: str
     :param field_delimiter: The delimiter to use when loading from a CSV.
-    :type field_delimiter: string
+    :type field_delimiter: str
     :param max_bad_records: The maximum number of bad records that BigQuery can
         ignore when running the job.
     :type max_bad_records: int
     :param quote_character: The value that is used to quote data sections in a CSV file.
-    :type quote_character: string
+    :type quote_character: str
     :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
         extra values that are not represented in the table schema.
         If true, the extra values are ignored. If false, records with extra columns
@@ -91,16 +91,16 @@ class GoogleCloudStorageToBigQueryOperator(BaseOperator):
         execute() command, which in turn gets stored in XCom for future
         operators to use. This can be helpful with incremental loads--during
         future executions, you can pick up from the max ID.
-    :type max_id_key: string
+    :type max_id_key: str
     :param bigquery_conn_id: Reference to a specific BigQuery hook.
-    :type bigquery_conn_id: string
+    :type bigquery_conn_id: str
     :param google_cloud_storage_conn_id: Reference to a specific Google
         cloud storage hook.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any. For this to
         work, the service account making the request must have domain-wide
         delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param schema_update_options: Allows the schema of the destination
         table to be updated as a side effect of the load job.
     :type schema_update_options: list
diff --git a/airflow/contrib/operators/gcs_to_gcs.py b/airflow/contrib/operators/gcs_to_gcs.py
index 523240c909..12fbff5276 100644
--- a/airflow/contrib/operators/gcs_to_gcs.py
+++ b/airflow/contrib/operators/gcs_to_gcs.py
@@ -28,7 +28,7 @@ class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
 
     :param source_bucket: The source Google cloud storage bucket where the
          object is. (templated)
-    :type source_bucket: string
+    :type source_bucket: str
     :param source_object: The source name of the object to copy in the Google cloud
         storage bucket. (templated)
         If wildcards are used in this argument:
@@ -36,10 +36,10 @@ class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
             bucket. The wildcard can appear inside the object name or at the
             end of the object name. Appending a wildcard to the bucket name is
             unsupported.
-    :type source_object: string
+    :type source_object: str
     :param destination_bucket: The destination Google cloud storage bucket
         where the object should be. (templated)
-    :type destination_bucket: string
+    :type destination_bucket: str
     :param destination_object: The destination name of the object in the
         destination Google cloud storage bucket. (templated)
         If a wildcard is supplied in the source_object argument, this is the
@@ -50,18 +50,18 @@ class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
         file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
         the destination_object as e.g. ``blah/foo``, in which case the copied file
         will be named ``blah/foo/baz``.
-    :type destination_object: string
+    :type destination_object: str
     :param move_object: When move object is True, the object is moved instead
         of copied to the new location. This is the equivalent of a mv command
         as opposed to a cp command.
     :type move_object: bool
     :param google_cloud_storage_conn_id: The connection ID to use when
         connecting to Google cloud storage.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
 
     **Examples**:
         The following Operator would copy a single file named
diff --git a/airflow/contrib/operators/gcs_to_s3.py b/airflow/contrib/operators/gcs_to_s3.py
index 0df6170eab..d8b180c81a 100644
--- a/airflow/contrib/operators/gcs_to_s3.py
+++ b/airflow/contrib/operators/gcs_to_s3.py
@@ -28,21 +28,21 @@ class GoogleCloudStorageToS3Operator(GoogleCloudStorageListOperator):
     Synchronizes a Google Cloud Storage bucket with an S3 bucket.
 
     :param bucket: The Google Cloud Storage bucket to find the objects. (templated)
-    :type bucket: string
+    :type bucket: str
     :param prefix: Prefix string which filters objects whose name begin with
         this prefix. (templated)
-    :type prefix: string
+    :type prefix: str
     :param delimiter: The delimiter by which you want to filter the objects. (templated)
         For e.g to lists the CSV files from in a directory in GCS you would use
         delimiter='.csv'.
-    :type delimiter: string
+    :type delimiter: str
     :param google_cloud_storage_conn_id: The connection ID to use when
         connecting to Google Cloud Storage.
-    :type google_cloud_storage_conn_id: string
+    :type google_cloud_storage_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param dest_aws_conn_id: The destination S3 connection
     :type dest_aws_conn_id: str
     :param dest_s3_key: The base S3 key to be used to store the files. (templated)
diff --git a/airflow/contrib/operators/jenkins_job_trigger_operator.py b/airflow/contrib/operators/jenkins_job_trigger_operator.py
index 37185f3e13..3e1aba0c1b 100644
--- a/airflow/contrib/operators/jenkins_job_trigger_operator.py
+++ b/airflow/contrib/operators/jenkins_job_trigger_operator.py
@@ -95,11 +95,11 @@ class JenkinsJobTriggerOperator(BaseOperator):
     version >= 0.4.15 to communicate with jenkins server.
     You'll also need to configure a Jenkins connection in the connections screen.
     :param jenkins_connection_id: The jenkins connection to use for this job
-    :type jenkins_connection_id: string
+    :type jenkins_connection_id: str
     :param job_name: The name of the job to trigger
-    :type job_name: string
+    :type job_name: str
     :param parameters: The parameters block to provide to jenkins. (templated)
-    :type parameters: string
+    :type parameters: str
     :param sleep_time: How long will the operator sleep between each status
     request for the job (min 1, default 10)
     :type sleep_time: int
diff --git a/airflow/contrib/operators/kubernetes_pod_operator.py b/airflow/contrib/operators/kubernetes_pod_operator.py
index bb4bf7fca1..d3c396e668 100644
--- a/airflow/contrib/operators/kubernetes_pod_operator.py
+++ b/airflow/contrib/operators/kubernetes_pod_operator.py
@@ -65,7 +65,7 @@ class KubernetesPodOperator(BaseOperator):
     :type in_cluster: bool
     :param cluster_context: context that points to kubernetes cluster.
         Ignored when in_cluster is True. If None, current-context is used.
-    :type cluster_context: string
+    :type cluster_context: str
     :param get_logs: get the stdout of the container as logs of the tasks
     :type get_logs: bool
     :param affinity: A dict containing a group of affinity scheduling rules
diff --git a/airflow/contrib/operators/mlengine_operator.py b/airflow/contrib/operators/mlengine_operator.py
index 2e2cfb4fe9..c3d9075618 100644
--- a/airflow/contrib/operators/mlengine_operator.py
+++ b/airflow/contrib/operators/mlengine_operator.py
@@ -94,16 +94,16 @@ class MLEngineBatchPredictionOperator(BaseOperator):
 
     :param project_id: The Google Cloud project name where the
         prediction job is submitted. (templated)
-    :type project_id: string
+    :type project_id: str
 
     :param job_id: A unique id for the prediction job on Google Cloud
         ML Engine. (templated)
-    :type job_id: string
+    :type job_id: str
 
     :param data_format: The format of the input data.
         It will default to 'DATA_FORMAT_UNSPECIFIED' if is not provided
         or is not one of ["TEXT", "TF_RECORD", "TF_RECORD_GZIP"].
-    :type data_format: string
+    :type data_format: str
 
     :param input_paths: A list of GCS paths of input data for batch
         prediction. Accepting wildcard operator *, but only at the end. (templated)
@@ -111,28 +111,28 @@ class MLEngineBatchPredictionOperator(BaseOperator):
 
     :param output_path: The GCS path where the prediction results are
         written to. (templated)
-    :type output_path: string
+    :type output_path: str
 
     :param region: The Google Compute Engine region to run the
         prediction job in. (templated)
-    :type region: string
+    :type region: str
 
     :param model_name: The Google Cloud ML Engine model to use for prediction.
         If version_name is not provided, the default version of this
         model will be used.
         Should not be None if version_name is provided.
         Should be None if uri is provided. (templated)
-    :type model_name: string
+    :type model_name: str
 
     :param version_name: The Google Cloud ML Engine model version to use for
         prediction.
         Should be None if uri is provided. (templated)
-    :type version_name: string
+    :type version_name: str
 
     :param uri: The GCS path of the saved model to use for prediction.
         Should be None if model_name is provided.
         It should be a GCS path pointing to a tensorflow SavedModel. (templated)
-    :type uri: string
+    :type uri: str
 
     :param max_worker_count: The maximum number of workers to be used
         for parallel processing. Defaults to 10 if not specified.
@@ -140,16 +140,16 @@ class MLEngineBatchPredictionOperator(BaseOperator):
 
     :param runtime_version: The Google Cloud ML Engine runtime version to use
         for batch prediction.
-    :type runtime_version: string
+    :type runtime_version: str
 
     :param gcp_conn_id: The connection ID used for connection to Google
         Cloud Platform.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
 
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must
         have doamin-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
 
     Raises:
         ``ValueError``: if a unique model/version origin cannot be determined.
@@ -281,7 +281,7 @@ class MLEngineModelOperator(BaseOperator):
 
     :param project_id: The Google Cloud project name to which MLEngine
         model belongs. (templated)
-    :type project_id: string
+    :type project_id: str
     :param model: A dictionary containing the information about the model.
         If the `operation` is `create`, then the `model` parameter should
         contain all the information about this model such as `name`.
@@ -293,13 +293,13 @@ class MLEngineModelOperator(BaseOperator):
 
         * ``create``: Creates a new model as provided by the `model` parameter.
         * ``get``: Gets a particular model where the name is specified in `model`.
-    :type operation: string
+    :type operation: str
     :param gcp_conn_id: The connection ID to use when fetching connection info.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = [
@@ -339,17 +339,17 @@ class MLEngineVersionOperator(BaseOperator):
 
     :param project_id: The Google Cloud project name to which MLEngine
         model belongs.
-    :type project_id: string
+    :type project_id: str
 
     :param model_name: The name of the Google Cloud ML Engine model that the version
         belongs to. (templated)
-    :type model_name: string
+    :type model_name: str
 
     :param version_name: A name to use for the version being operated upon.
         If not None and the `version` argument is None or does not have a value for
         the `name` key, then this will be populated in the payload for the
         `name` key. (templated)
-    :type version_name: string
+    :type version_name: str
 
     :param version: A dictionary containing the information about the version.
         If the `operation` is `create`, `version` should contain all the
@@ -378,15 +378,15 @@ class MLEngineVersionOperator(BaseOperator):
             model specified by `model_name`).
             The name of the version should be specified in the `version`
             parameter.
-    :type operation: string
+    :type operation: str
 
     :param gcp_conn_id: The connection ID to use when fetching connection info.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
 
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     """
 
     template_fields = [
@@ -447,56 +447,56 @@ class MLEngineTrainingOperator(BaseOperator):
 
     :param project_id: The Google Cloud project name within which MLEngine
         training job should run (templated).
-    :type project_id: string
+    :type project_id: str
 
     :param job_id: A unique templated id for the submitted Google MLEngine
         training job. (templated)
-    :type job_id: string
+    :type job_id: str
 
     :param package_uris: A list of package locations for MLEngine training job,
         which should include the main training program + any additional
         dependencies. (templated)
-    :type package_uris: string
+    :type package_uris: str
 
     :param training_python_module: The Python module name to run within MLEngine
         training job after installing 'package_uris' packages. (templated)
-    :type training_python_module: string
+    :type training_python_module: str
 
     :param training_args: A list of templated command line arguments to pass to
         the MLEngine training program. (templated)
-    :type training_args: string
+    :type training_args: str
 
     :param region: The Google Compute Engine region to run the MLEngine training
         job in (templated).
-    :type region: string
+    :type region: str
 
     :param scale_tier: Resource tier for MLEngine training job. (templated)
-    :type scale_tier: string
+    :type scale_tier: str
 
     :param runtime_version: The Google Cloud ML runtime version to use for
         training. (templated)
-    :type runtime_version: string
+    :type runtime_version: str
 
     :param python_version: The version of Python used in training. (templated)
-    :type python_version: string
+    :type python_version: str
 
     :param job_dir: A Google Cloud Storage path in which to store training
         outputs and other data needed for training. (templated)
-    :type job_dir: string
+    :type job_dir: str
 
     :param gcp_conn_id: The connection ID to use when fetching connection info.
-    :type gcp_conn_id: string
+    :type gcp_conn_id: str
 
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
 
     :param mode: Can be one of 'DRY_RUN'/'CLOUD'. In 'DRY_RUN' mode, no real
         training job will be launched, but the MLEngine training job request
         will be printed out. In 'CLOUD' mode, a real MLEngine training job
         creation request will be issued.
-    :type mode: string
+    :type mode: str
     """
 
     template_fields = [
diff --git a/airflow/contrib/operators/mlengine_operator_utils.py b/airflow/contrib/operators/mlengine_operator_utils.py
index 60001daea4..c2951b715d 100644
--- a/airflow/contrib/operators/mlengine_operator_utils.py
+++ b/airflow/contrib/operators/mlengine_operator_utils.py
@@ -108,16 +108,16 @@ def validate_err_and_count(summary):
     :param task_prefix: a prefix for the tasks. Only alphanumeric characters and
         hyphen are allowed (no underscores), since this will be used as dataflow
         job name, which doesn't allow other characters.
-    :type task_prefix: string
+    :type task_prefix: str
 
     :param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
-    :type data_format: string
+    :type data_format: str
 
     :param input_paths: a list of input paths to be sent to BatchPrediction.
     :type input_paths: list of strings
 
     :param prediction_path: GCS path to put the prediction results in.
-    :type prediction_path: string
+    :type prediction_path: str
 
     :param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
         - metric_fn is a function that accepts a dictionary (for an instance),
@@ -132,17 +132,17 @@ def validate_err_and_count(summary):
     :param batch_prediction_job_id: the id to use for the Cloud ML Batch
         prediction job. Passed directly to the MLEngineBatchPredictionOperator as
         the job_id argument.
-    :type batch_prediction_job_id: string
+    :type batch_prediction_job_id: str
 
     :param project_id: the Google Cloud Platform project id in which to execute
         Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
         `default_args['project_id']` will be used.
-    :type project_id: string
+    :type project_id: str
 
     :param region: the Google Cloud Platform region in which to execute Cloud ML
         Batch Prediction and Dataflow jobs. If None, then the `dag`'s
         `default_args['region']` will be used.
-    :type region: string
+    :type region: str
 
     :param dataflow_options: options to run Dataflow jobs. If None, then the
         `dag`'s `default_args['dataflow_default_options']` will be used.
@@ -152,19 +152,19 @@ def validate_err_and_count(summary):
         tensorflow.estimator.export_savedmodel(). It cannot be used with
         model_name or version_name below. See MLEngineBatchPredictionOperator for
         more detail.
-    :type model_uri: string
+    :type model_uri: str
 
     :param model_name: Used to indicate a model to use for prediction. Can be
         used in combination with version_name, but cannot be used together with
         model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
         then the `dag`'s `default_args['model_name']` will be used.
-    :type model_name: string
+    :type model_name: str
 
     :param version_name: Used to indicate a model version to use for prediction,
         in combination with model_name. Cannot be used together with model_uri.
         See MLEngineBatchPredictionOperator for more detail. If None, then the
         `dag`'s `default_args['version_name']` will be used.
-    :type version_name: string
+    :type version_name: str
 
     :param dag: The `DAG` to use for all Operators.
     :type dag: airflow.DAG
diff --git a/airflow/contrib/operators/mysql_to_gcs.py b/airflow/contrib/operators/mysql_to_gcs.py
index eb47465e8e..d9fc932ff5 100644
--- a/airflow/contrib/operators/mysql_to_gcs.py
+++ b/airflow/contrib/operators/mysql_to_gcs.py
@@ -58,18 +58,18 @@ def __init__(self,
                  **kwargs):
         """
         :param sql: The SQL to execute on the MySQL table.
-        :type sql: string
+        :type sql: str
         :param bucket: The bucket to upload to.
-        :type bucket: string
+        :type bucket: str
         :param filename: The filename to use as the object name when uploading
             to Google cloud storage. A {} should be specified in the filename
             to allow the operator to inject file numbers in cases where the
             file is split due to size.
-        :type filename: string
+        :type filename: str
         :param schema_filename: If set, the filename to use as the object name
             when uploading a .json file containing the BigQuery schema fields
             for the table that was dumped from MySQL.
-        :type schema_filename: string
+        :type schema_filename: str
         :param approx_max_file_size_bytes: This operator supports the ability
             to split large table dumps into multiple files (see notes in the
             filenamed param docs above). Google cloud storage allows for files
@@ -77,10 +77,10 @@ def __init__(self,
             file size of the splits.
         :type approx_max_file_size_bytes: long
         :param mysql_conn_id: Reference to a specific MySQL hook.
-        :type mysql_conn_id: string
+        :type mysql_conn_id: str
         :param google_cloud_storage_conn_id: Reference to a specific Google
             cloud storage hook.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param schema: The schema to use, if any. Should be a list of dict or
             a str. Pass a string if using Jinja template, otherwise, pass a list of
             dict. Examples could be seen: https://cloud.google.com/bigquery/docs
diff --git a/airflow/contrib/operators/postgres_to_gcs_operator.py b/airflow/contrib/operators/postgres_to_gcs_operator.py
index 88b4d00e39..850d858f94 100644
--- a/airflow/contrib/operators/postgres_to_gcs_operator.py
+++ b/airflow/contrib/operators/postgres_to_gcs_operator.py
@@ -56,18 +56,18 @@ def __init__(self,
                  **kwargs):
         """
         :param sql: The SQL to execute on the Postgres table.
-        :type sql: string
+        :type sql: str
         :param bucket: The bucket to upload to.
-        :type bucket: string
+        :type bucket: str
         :param filename: The filename to use as the object name when uploading
             to Google Cloud Storage. A {} should be specified in the filename
             to allow the operator to inject file numbers in cases where the
             file is split due to size.
-        :type filename: string
+        :type filename: str
         :param schema_filename: If set, the filename to use as the object name
             when uploading a .json file containing the BigQuery schema fields
             for the table that was dumped from Postgres.
-        :type schema_filename: string
+        :type schema_filename: str
         :param approx_max_file_size_bytes: This operator supports the ability
             to split large table dumps into multiple files (see notes in the
             filenamed param docs above). Google Cloud Storage allows for files
@@ -75,10 +75,10 @@ def __init__(self,
             file size of the splits.
         :type approx_max_file_size_bytes: long
         :param postgres_conn_id: Reference to a specific Postgres hook.
-        :type postgres_conn_id: string
+        :type postgres_conn_id: str
         :param google_cloud_storage_conn_id: Reference to a specific Google
             cloud storage hook.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any. For this to
             work, the service account making the request must have domain-wide
             delegation enabled.
diff --git a/airflow/contrib/operators/pubsub_operator.py b/airflow/contrib/operators/pubsub_operator.py
index 2d55b19f85..e40828bf92 100644
--- a/airflow/contrib/operators/pubsub_operator.py
+++ b/airflow/contrib/operators/pubsub_operator.py
@@ -67,19 +67,19 @@ def __init__(
             **kwargs):
         """
         :param project: the GCP project ID where the topic will be created
-        :type project: string
+        :type project: str
         :param topic: the topic to create. Do not include the
             full topic path. In other words, instead of
             ``projects/{project}/topics/{topic}``, provide only
             ``{topic}``. (templated)
-        :type topic: string
+        :type topic: str
         :param gcp_conn_id: The connection ID to use connecting to
             Google Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request
             must have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(PubSubTopicCreateOperator, self).__init__(*args, **kwargs)
 
@@ -163,28 +163,28 @@ def __init__(
             **kwargs):
         """
         :param topic_project: the GCP project ID where the topic exists
-        :type topic_project: string
+        :type topic_project: str
         :param topic: the topic to create. Do not include the
             full topic path. In other words, instead of
             ``projects/{project}/topics/{topic}``, provide only
             ``{topic}``. (templated)
-        :type topic: string
+        :type topic: str
         :param subscription: the Pub/Sub subscription name. If empty, a random
             name will be generated using the uuid module
-        :type subscription: string
+        :type subscription: str
         :param subscription_project: the GCP project ID where the subscription
             will be created. If empty, ``topic_project`` will be used.
-        :type subscription_project: string
+        :type subscription_project: str
         :param ack_deadline_secs: Number of seconds that a subscriber has to
             acknowledge each message pulled from the subscription
         :type ack_deadline_secs: int
         :param gcp_conn_id: The connection ID to use connecting to
             Google Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request
             must have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(PubSubSubscriptionCreateOperator, self).__init__(*args, **kwargs)
 
@@ -248,22 +248,22 @@ def __init__(
             **kwargs):
         """
         :param project: the GCP project ID in which to work (templated)
-        :type project: string
+        :type project: str
         :param topic: the topic to delete. Do not include the
             full topic path. In other words, instead of
             ``projects/{project}/topics/{topic}``, provide only
             ``{topic}``. (templated)
-        :type topic: string
+        :type topic: str
         :param fail_if_not_exists: If True and the topic does not exist, fail
             the task
         :type fail_if_not_exists: bool
         :param gcp_conn_id: The connection ID to use connecting to
             Google Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request
             must have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(PubSubTopicDeleteOperator, self).__init__(*args, **kwargs)
 
@@ -324,22 +324,22 @@ def __init__(
             **kwargs):
         """
         :param project: the GCP project ID in which to work (templated)
-        :type project: string
+        :type project: str
         :param subscription: the subscription to delete. Do not include the
             full subscription path. In other words, instead of
             ``projects/{project}/subscription/{subscription}``, provide only
             ``{subscription}``. (templated)
-        :type subscription: string
+        :type subscription: str
         :param fail_if_not_exists: If True and the subscription does not exist,
             fail the task
         :type fail_if_not_exists: bool
         :param gcp_conn_id: The connection ID to use connecting to
             Google Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request
             must have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(PubSubSubscriptionDeleteOperator, self).__init__(*args, **kwargs)
 
@@ -396,12 +396,12 @@ def __init__(
             **kwargs):
         """
         :param project: the GCP project ID in which to work (templated)
-        :type project: string
+        :type project: str
         :param topic: the topic to which to publish. Do not include the
             full topic path. In other words, instead of
             ``projects/{project}/topics/{topic}``, provide only
             ``{topic}``. (templated)
-        :type topic: string
+        :type topic: str
         :param messages: a list of messages to be published to the
             topic. Each message is a dict with one or more of the
             following keys-value mappings:
@@ -413,11 +413,11 @@ def __init__(
         :type messages: list
         :param gcp_conn_id: The connection ID to use connecting to
             Google Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request
             must have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(PubSubPublishOperator, self).__init__(*args, **kwargs)
 
diff --git a/airflow/contrib/operators/s3_list_operator.py b/airflow/contrib/operators/s3_list_operator.py
index a9e005eed3..3ca22d5932 100644
--- a/airflow/contrib/operators/s3_list_operator.py
+++ b/airflow/contrib/operators/s3_list_operator.py
@@ -30,14 +30,14 @@ class S3ListOperator(BaseOperator):
     used by `xcom` in the downstream task.
 
     :param bucket: The S3 bucket where to find the objects. (templated)
-    :type bucket: string
+    :type bucket: str
     :param prefix: Prefix string to filters the objects whose name begin with
         such prefix. (templated)
-    :type prefix: string
+    :type prefix: str
     :param delimiter: the delimiter marks key hierarchy. (templated)
-    :type delimiter: string
+    :type delimiter: str
     :param aws_conn_id: The connection ID to use when connecting to S3 storage.
-    :type aws_conn_id: string
+    :type aws_conn_id: str
     :parame verify: Whether or not to verify SSL certificates for S3 connection.
         By default SSL certificates are verified.
         You can provide the following values:
diff --git a/airflow/contrib/operators/s3_to_gcs_operator.py b/airflow/contrib/operators/s3_to_gcs_operator.py
index 35bd1f9371..5dd355a6fd 100644
--- a/airflow/contrib/operators/s3_to_gcs_operator.py
+++ b/airflow/contrib/operators/s3_to_gcs_operator.py
@@ -33,14 +33,14 @@ class S3ToGoogleCloudStorageOperator(S3ListOperator):
     destination path.
 
     :param bucket: The S3 bucket where to find the objects. (templated)
-    :type bucket: string
+    :type bucket: str
     :param prefix: Prefix string which filters objects whose name begin with
         such prefix. (templated)
-    :type prefix: string
+    :type prefix: str
     :param delimiter: the delimiter marks key hierarchy. (templated)
-    :type delimiter: string
+    :type delimiter: str
     :param aws_conn_id: The source S3 connection
-    :type aws_conn_id: string
+    :type aws_conn_id: str
     :parame verify: Whether or not to verify SSL certificates for S3 connection.
         By default SSL certificates are verified.
         You can provide the following values:
@@ -53,14 +53,14 @@ class S3ToGoogleCloudStorageOperator(S3ListOperator):
     :type verify: bool or str
     :param dest_gcs_conn_id: The destination connection ID to use
         when connecting to Google Cloud Storage.
-    :type dest_gcs_conn_id: string
+    :type dest_gcs_conn_id: str
     :param dest_gcs: The destination Google Cloud Storage bucket and prefix
         where you want to store the files. (templated)
-    :type dest_gcs: string
+    :type dest_gcs: str
     :param delegate_to: The account to impersonate, if any.
         For this to work, the service account making the request must have
         domain-wide delegation enabled.
-    :type delegate_to: string
+    :type delegate_to: str
     :param replace: Whether you want to replace existing destination files
         or not.
     :type replace: bool
diff --git a/airflow/contrib/operators/sagemaker_create_training_job_operator.py b/airflow/contrib/operators/sagemaker_create_training_job_operator.py
index 409c5f6aa9..5b600c707d 100644
--- a/airflow/contrib/operators/sagemaker_create_training_job_operator.py
+++ b/airflow/contrib/operators/sagemaker_create_training_job_operator.py
@@ -34,9 +34,9 @@ class SageMakerCreateTrainingJobOperator(BaseOperator):
        The configuration necessary to start a training job (templated)
        :type training_job_config: dict
        :param region_name: The AWS region_name
-       :type region_name: string
+       :type region_name: str
        :param sagemaker_conn_id: The SageMaker connection ID to use.
-       :type sagemaker_conn_id: string
+       :type sagemaker_conn_id: str
        :param use_db_config: Whether or not to use db config
        associated with sagemaker_conn_id.
        If set to true, will automatically update the training config
@@ -45,7 +45,7 @@ class SageMakerCreateTrainingJobOperator(BaseOperator):
        in the training_job_config, so be careful
        :type use_db_config: bool
        :param aws_conn_id: The AWS connection ID to use.
-       :type aws_conn_id: string
+       :type aws_conn_id: str
        :param wait_for_completion: if the operator should block
        until training job finishes
        :type wait_for_completion: bool
diff --git a/airflow/contrib/operators/sagemaker_create_tuning_job_operator.py b/airflow/contrib/operators/sagemaker_create_tuning_job_operator.py
index 0c40a9adc9..2dc3a88e97 100644
--- a/airflow/contrib/operators/sagemaker_create_tuning_job_operator.py
+++ b/airflow/contrib/operators/sagemaker_create_tuning_job_operator.py
@@ -31,9 +31,9 @@ class SageMakerCreateTuningJobOperator(BaseOperator):
        This operator returns The ARN of the model created in Amazon SageMaker
 
        :param sagemaker_conn_id: The SageMaker connection ID to use.
-       :type sagemaker_conn_id: string
+       :type sagemaker_conn_id: str
        :param region_name: The AWS region_name
-       :type region_name: string
+       :type region_name: str
        :param tuning_job_config:
        The configuration necessary to start a tuning job (templated)
        :type tuning_job_config: dict
@@ -55,7 +55,7 @@ class SageMakerCreateTuningJobOperator(BaseOperator):
        (Caution: be careful to set this parameters because tuning can take very long)
        :type max_ingestion_time: int
        :param aws_conn_id: The AWS connection ID to use.
-       :type aws_conn_id: string
+       :type aws_conn_id: str
 
        **Example**:
            The following operator would start a tuning job when executed
diff --git a/airflow/contrib/operators/segment_track_event_operator.py b/airflow/contrib/operators/segment_track_event_operator.py
index 04f6ae6d41..5d2f2db539 100644
--- a/airflow/contrib/operators/segment_track_event_operator.py
+++ b/airflow/contrib/operators/segment_track_event_operator.py
@@ -27,16 +27,16 @@ class SegmentTrackEventOperator(BaseOperator):
     Send Track Event to Segment for a specified user_id and event
 
     :param user_id: The ID for this user in your database. (templated)
-    :type user_id: string
+    :type user_id: str
     :param event: The name of the event you're tracking. (templated)
-    :type event: string
+    :type event: str
     :param properties: A dictionary of properties for the event. (templated)
     :type properties: dict
     :param segment_conn_id: The connection ID to use when connecting to Segment.
-    :type segment_conn_id: string
+    :type segment_conn_id: str
     :param segment_debug_mode: Determines whether Segment should run in debug mode.
         Defaults to False
-    :type segment_debug_mode: boolean
+    :type segment_debug_mode: bool
     """
     template_fields = ('user_id', 'event', 'properties')
     ui_color = '#ffd700'
diff --git a/airflow/contrib/operators/snowflake_operator.py b/airflow/contrib/operators/snowflake_operator.py
index 39d7d496ea..6c3f403912 100644
--- a/airflow/contrib/operators/snowflake_operator.py
+++ b/airflow/contrib/operators/snowflake_operator.py
@@ -26,16 +26,16 @@ class SnowflakeOperator(BaseOperator):
     Executes sql code in a Snowflake database
 
     :param snowflake_conn_id: reference to specific snowflake connection id
-    :type snowflake_conn_id: string
+    :type snowflake_conn_id: str
     :param sql: the sql code to be executed. (templated)
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
         Template reference are recognized by str ending in '.sql'
     :param warehouse: name of warehouse which overwrite defined
         one in connection
-    :type warehouse: string
+    :type warehouse: str
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/contrib/operators/vertica_operator.py b/airflow/contrib/operators/vertica_operator.py
index 41072ff82b..37ccd134e8 100644
--- a/airflow/contrib/operators/vertica_operator.py
+++ b/airflow/contrib/operators/vertica_operator.py
@@ -26,7 +26,7 @@ class VerticaOperator(BaseOperator):
     Executes sql code in a specific Vertica database
 
     :param vertica_conn_id: reference to a specific Vertica database
-    :type vertica_conn_id: string
+    :type vertica_conn_id: str
     :param sql: the sql code to be executed. (templated)
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
diff --git a/airflow/contrib/sensors/bash_sensor.py b/airflow/contrib/sensors/bash_sensor.py
index 26fbb06c18..7372a419b6 100644
--- a/airflow/contrib/sensors/bash_sensor.py
+++ b/airflow/contrib/sensors/bash_sensor.py
@@ -33,7 +33,7 @@ class BashSensor(BaseSensorOperator):
 
     :param bash_command: The command, set of commands or reference to a
         bash script (must be '.sh') to be executed.
-    :type bash_command: string
+    :type bash_command: str
 
     :param env: If env is not None, it must be a mapping that defines the
         environment variables for the new process; these are used instead
@@ -41,7 +41,7 @@ class BashSensor(BaseSensorOperator):
         behavior. (templated)
     :type env: dict
     :param output_encoding: output encoding of bash command.
-    :type output_encoding: string
+    :type output_encoding: str
     """
 
     template_fields = ('bash_command', 'env')
diff --git a/airflow/contrib/sensors/bigquery_sensor.py b/airflow/contrib/sensors/bigquery_sensor.py
index 2e496f6889..89ec476d9d 100644
--- a/airflow/contrib/sensors/bigquery_sensor.py
+++ b/airflow/contrib/sensors/bigquery_sensor.py
@@ -28,19 +28,19 @@ class BigQueryTableSensor(BaseSensorOperator):
         :param project_id: The Google cloud project in which to look for the table.
             The connection supplied to the hook must provide
             access to the specified project.
-        :type project_id: string
+        :type project_id: str
         :param dataset_id: The name of the dataset in which to look for the table.
             storage bucket.
-        :type dataset_id: string
+        :type dataset_id: str
         :param table_id: The name of the table to check the existence of.
-        :type table_id: string
+        :type table_id: str
         :param bigquery_conn_id: The connection ID to use when connecting to
             Google BigQuery.
-        :type bigquery_conn_id: string
+        :type bigquery_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must
             have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
     """
     template_fields = ('project_id', 'dataset_id', 'table_id',)
     ui_color = '#f0eee4'
diff --git a/airflow/contrib/sensors/cassandra_record_sensor.py b/airflow/contrib/sensors/cassandra_record_sensor.py
index 493a6ba6b1..bbb2f50990 100644
--- a/airflow/contrib/sensors/cassandra_record_sensor.py
+++ b/airflow/contrib/sensors/cassandra_record_sensor.py
@@ -43,12 +43,12 @@ def __init__(self, table, keys, cassandra_conn_id, *args, **kwargs):
 
         :param table: Target Cassandra table.
                       Use dot notation to target a specific keyspace.
-        :type table: string
+        :type table: str
         :param keys: The keys and their values to be monitored
         :type keys: dict
         :param cassandra_conn_id: The connection ID to use
                                   when connecting to Cassandra cluster
-        :type cassandra_conn_id: string
+        :type cassandra_conn_id: str
         """
         super(CassandraRecordSensor, self).__init__(*args, **kwargs)
         self.cassandra_conn_id = cassandra_conn_id
diff --git a/airflow/contrib/sensors/cassandra_table_sensor.py b/airflow/contrib/sensors/cassandra_table_sensor.py
index 5a85995aca..088c82bdd3 100644
--- a/airflow/contrib/sensors/cassandra_table_sensor.py
+++ b/airflow/contrib/sensors/cassandra_table_sensor.py
@@ -41,10 +41,10 @@ def __init__(self, table, cassandra_conn_id, *args, **kwargs):
 
         :param table: Target Cassandra table.
                       Use dot notation to target a specific keyspace.
-        :type table: string
+        :type table: str
         :param cassandra_conn_id: The connection ID to use
                                   when connecting to Cassandra cluster
-        :type cassandra_conn_id: string
+        :type cassandra_conn_id: str
         """
         super(CassandraTableSensor, self).__init__(*args, **kwargs)
         self.cassandra_conn_id = cassandra_conn_id
diff --git a/airflow/contrib/sensors/datadog_sensor.py b/airflow/contrib/sensors/datadog_sensor.py
index 817fee061a..01b7b9fc51 100644
--- a/airflow/contrib/sensors/datadog_sensor.py
+++ b/airflow/contrib/sensors/datadog_sensor.py
@@ -32,7 +32,7 @@ class DatadogSensor(BaseSensorOperator):
     Airflow runs.
 
     :param datadog_conn_id: The connection to datadog, containing metadata for api keys.
-    :param datadog_conn_id: string
+    :param datadog_conn_id: str
     """
     ui_color = '#66c3dd'
 
diff --git a/airflow/contrib/sensors/emr_job_flow_sensor.py b/airflow/contrib/sensors/emr_job_flow_sensor.py
index 5a17a012d4..ba8a018134 100644
--- a/airflow/contrib/sensors/emr_job_flow_sensor.py
+++ b/airflow/contrib/sensors/emr_job_flow_sensor.py
@@ -27,7 +27,7 @@ class EmrJobFlowSensor(EmrBaseSensor):
     If it fails the sensor errors, failing the task.
 
     :param job_flow_id: job_flow_id to check the state of
-    :type job_flow_id: string
+    :type job_flow_id: str
     """
 
     NON_TERMINAL_STATES = ['STARTING', 'BOOTSTRAPPING', 'RUNNING',
diff --git a/airflow/contrib/sensors/emr_step_sensor.py b/airflow/contrib/sensors/emr_step_sensor.py
index 6e79f6353f..65bb7c95b1 100644
--- a/airflow/contrib/sensors/emr_step_sensor.py
+++ b/airflow/contrib/sensors/emr_step_sensor.py
@@ -27,9 +27,9 @@ class EmrStepSensor(EmrBaseSensor):
     If it fails the sensor errors, failing the task.
 
     :param job_flow_id: job_flow_id which contains the step check the state of
-    :type job_flow_id: string
+    :type job_flow_id: str
     :param step_id: step to check the state of
-    :type step_id: string
+    :type step_id: str
     """
 
     NON_TERMINAL_STATES = ['PENDING', 'RUNNING', 'CONTINUE']
diff --git a/airflow/contrib/sensors/file_sensor.py b/airflow/contrib/sensors/file_sensor.py
index 3e49abdfb5..048dd06258 100644
--- a/airflow/contrib/sensors/file_sensor.py
+++ b/airflow/contrib/sensors/file_sensor.py
@@ -35,10 +35,10 @@ class FileSensor(BaseSensorOperator):
 
     :param fs_conn_id: reference to the File (path)
         connection id
-    :type fs_conn_id: string
+    :type fs_conn_id: str
     :param filepath: File or folder name (relative to
         the base path set within the connection)
-    :type fs_conn_id: string
+    :type fs_conn_id: str
     """
     template_fields = ('filepath',)
     ui_color = '#91818a'
diff --git a/airflow/contrib/sensors/gcs_sensor.py b/airflow/contrib/sensors/gcs_sensor.py
index 23cd760a41..5a4f73e61e 100644
--- a/airflow/contrib/sensors/gcs_sensor.py
+++ b/airflow/contrib/sensors/gcs_sensor.py
@@ -27,17 +27,17 @@ class GoogleCloudStorageObjectSensor(BaseSensorOperator):
     Create a new GoogleCloudStorageObjectSensor.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to check in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         :param google_cloud_storage_conn_id: The connection ID to use when
             connecting to Google cloud storage.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
     """
     template_fields = ('bucket', 'object')
     ui_color = '#f0eee4'
@@ -79,21 +79,21 @@ class GoogleCloudStorageObjectUpdatedSensor(BaseSensorOperator):
     Create a new GoogleCloudStorageObjectUpdatedSensor.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param object: The name of the object to download in the Google cloud
             storage bucket.
-        :type object: string
+        :type object: str
         :param ts_func: Callback for defining the update condition. The default callback
             returns execution_date + schedule_interval. The callback takes the context
             as parameter.
         :type ts_func: function
         :param google_cloud_storage_conn_id: The connection ID to use when
             connecting to Google cloud storage.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have domain-wide
             delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
     """
     template_fields = ('bucket', 'object')
     template_ext = ('.sql',)
@@ -129,17 +129,17 @@ class GoogleCloudStoragePrefixSensor(BaseSensorOperator):
     Create a new GoogleCloudStorageObjectSensor.
 
         :param bucket: The Google cloud storage bucket where the object is.
-        :type bucket: string
+        :type bucket: str
         :param prefix: The name of the prefix to check in the Google cloud
             storage bucket.
-        :type prefix: string
+        :type prefix: str
         :param google_cloud_storage_conn_id: The connection ID to use when
             connecting to Google cloud storage.
-        :type google_cloud_storage_conn_id: string
+        :type google_cloud_storage_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request must have
             domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
     """
     template_fields = ('bucket', 'prefix')
     ui_color = '#f0eee4'
diff --git a/airflow/contrib/sensors/mongo_sensor.py b/airflow/contrib/sensors/mongo_sensor.py
index 3ed49a6f9d..8fd32a0775 100644
--- a/airflow/contrib/sensors/mongo_sensor.py
+++ b/airflow/contrib/sensors/mongo_sensor.py
@@ -39,12 +39,12 @@ def __init__(self, collection, query, mongo_conn_id="mongo_default", *args, **kw
         Create a new MongoSensor
 
         :param collection: Target MongoDB collection.
-        :type collection: string
+        :type collection: str
         :param query: The query to find the target document.
         :type query: dict
         :param mongo_conn_id: The connection ID to use
                               when connecting to MongoDB.
-        :type mongo_conn_id: string
+        :type mongo_conn_id: str
         """
         super(MongoSensor, self).__init__(*args, **kwargs)
         self.mongo_conn_id = mongo_conn_id
diff --git a/airflow/contrib/sensors/pubsub_sensor.py b/airflow/contrib/sensors/pubsub_sensor.py
index 7d1721570d..8104fb5699 100644
--- a/airflow/contrib/sensors/pubsub_sensor.py
+++ b/airflow/contrib/sensors/pubsub_sensor.py
@@ -54,10 +54,10 @@ def __init__(
             **kwargs):
         """
         :param project: the GCP project ID for the subscription (templated)
-        :type project: string
+        :type project: str
         :param subscription: the Pub/Sub subscription name. Do not include the
             full subscription path.
-        :type subscription: string
+        :type subscription: str
         :param max_messages: The maximum number of messages to retrieve per
             PubSub pull request
         :type max_messages: int
@@ -69,11 +69,11 @@ def __init__(
         :type ack_messages: bool
         :param gcp_conn_id: The connection ID to use connecting to
             Google Cloud Platform.
-        :type gcp_conn_id: string
+        :type gcp_conn_id: str
         :param delegate_to: The account to impersonate, if any.
             For this to work, the service account making the request
             must have domain-wide delegation enabled.
-        :type delegate_to: string
+        :type delegate_to: str
         """
         super(PubSubPullSensor, self).__init__(*args, **kwargs)
 
diff --git a/airflow/contrib/sensors/qubole_sensor.py b/airflow/contrib/sensors/qubole_sensor.py
index d67fa8557e..f79f58746b 100644
--- a/airflow/contrib/sensors/qubole_sensor.py
+++ b/airflow/contrib/sensors/qubole_sensor.py
@@ -31,7 +31,7 @@ class QuboleSensor(BaseSensorOperator):
     Base class for all Qubole Sensors
 
     :param qubole_conn_id: The qubole connection to run the sensor against
-    :type qubole_conn_id: string
+    :type qubole_conn_id: str
     :param data: a JSON object containing payload, whose presence needs to be checked
     :type data: a JSON object
 
diff --git a/airflow/contrib/sensors/redis_key_sensor.py b/airflow/contrib/sensors/redis_key_sensor.py
index baf3e161f5..a2d190baae 100644
--- a/airflow/contrib/sensors/redis_key_sensor.py
+++ b/airflow/contrib/sensors/redis_key_sensor.py
@@ -34,9 +34,9 @@ def __init__(self, key, redis_conn_id, *args, **kwargs):
         Create a new RedisKeySensor
 
         :param key: The key to be monitored
-        :type key: string
+        :type key: str
         :param redis_conn_id: The connection ID to use when connecting to Redis DB.
-        :type redis_conn_id: string
+        :type redis_conn_id: str
         """
         super(RedisKeySensor, self).__init__(*args, **kwargs)
         self.redis_conn_id = redis_conn_id
diff --git a/airflow/contrib/sensors/sagemaker_training_sensor.py b/airflow/contrib/sensors/sagemaker_training_sensor.py
index 90c62ce988..963ab7d29f 100644
--- a/airflow/contrib/sensors/sagemaker_training_sensor.py
+++ b/airflow/contrib/sensors/sagemaker_training_sensor.py
@@ -28,7 +28,7 @@ class SageMakerTrainingSensor(SageMakerBaseSensor):
     If it fails the sensor errors, failing the task.
 
     :param job_name: job_name of the training instance to check the state of
-    :type job_name: string
+    :type job_name: str
     """
 
     template_fields = ['job_name']
diff --git a/airflow/contrib/sensors/sagemaker_tuning_sensor.py b/airflow/contrib/sensors/sagemaker_tuning_sensor.py
index bc74e3a5c5..b2dd4604c0 100644
--- a/airflow/contrib/sensors/sagemaker_tuning_sensor.py
+++ b/airflow/contrib/sensors/sagemaker_tuning_sensor.py
@@ -29,9 +29,9 @@ class SageMakerTuningSensor(SageMakerBaseSensor):
     containing the failure reason.
 
     :param job_name: job_name of the tuning instance to check the state of
-    :type job_name: string
+    :type job_name: str
     :param region_name: The AWS region_name
-    :type region_name: string
+    :type region_name: str
     """
 
     template_fields = ['job_name']
diff --git a/airflow/executors/local_executor.py b/airflow/executors/local_executor.py
index 291d6e1277..3e622da053 100644
--- a/airflow/executors/local_executor.py
+++ b/airflow/executors/local_executor.py
@@ -77,7 +77,7 @@ def execute_work(self, key, command):
         :param key: the key to identify the TI
         :type key: Tuple(dag_id, task_id, execution_date)
         :param command: the command to execute
-        :type command: string
+        :type command: str
         """
         if key is None:
             return
@@ -146,7 +146,7 @@ def execute_async(self, key, command):
             :param key: the key to identify the TI
             :type key: Tuple(dag_id, task_id, execution_date)
             :param command: the command to execute
-            :type command: string
+            :type command: str
             """
             local_worker = LocalWorker(self.executor.result_queue)
             local_worker.key = key
@@ -191,7 +191,7 @@ def execute_async(self, key, command):
             :param key: the key to identify the TI
             :type key: Tuple(dag_id, task_id, execution_date)
             :param command: the command to execute
-            :type command: string
+            :type command: str
             """
             self.executor.queue.put((key, command))
 
diff --git a/airflow/hooks/S3_hook.py b/airflow/hooks/S3_hook.py
index 2d64b31534..68670d3635 100644
--- a/airflow/hooks/S3_hook.py
+++ b/airflow/hooks/S3_hook.py
@@ -326,7 +326,7 @@ def load_string(self,
         This is provided as a convenience to drop a string in S3. It uses the
         boto infrastructure to ship a file to s3.
 
-        :param string_data: string to set as content for the key.
+        :param string_data: str to set as content for the key.
         :type string_data: str
         :param key: S3 key that will point to the file
         :type key: str
diff --git a/airflow/hooks/druid_hook.py b/airflow/hooks/druid_hook.py
index cafba9c6c1..75111fad2e 100644
--- a/airflow/hooks/druid_hook.py
+++ b/airflow/hooks/druid_hook.py
@@ -35,7 +35,7 @@ class DruidHook(BaseHook):
 
     :param druid_ingest_conn_id: The connection id to the Druid overlord machine
                                  which accepts index jobs
-    :type druid_ingest_conn_id: string
+    :type druid_ingest_conn_id: str
     :param timeout: The interval between polling
                     the Druid job for the status of the ingestion job.
                     Must be greater than or equal to 1
diff --git a/airflow/hooks/hdfs_hook.py b/airflow/hooks/hdfs_hook.py
index 3c9136b1fd..b00bab42ce 100644
--- a/airflow/hooks/hdfs_hook.py
+++ b/airflow/hooks/hdfs_hook.py
@@ -39,9 +39,9 @@ class HDFSHook(BaseHook):
     Interact with HDFS. This class is a wrapper around the snakebite library.
 
     :param hdfs_conn_id: Connection id to fetch connection info
-    :type conn_id: string
+    :type conn_id: str
     :param proxy_user: effective user for HDFS operations
-    :type proxy_user: string
+    :type proxy_user: str
     :param autoconfig: use snakebite's automatically configured client
     :type autoconfig: bool
     """
diff --git a/airflow/hooks/hive_hooks.py b/airflow/hooks/hive_hooks.py
index bd6a01a1e6..ffa1c863a2 100644
--- a/airflow/hooks/hive_hooks.py
+++ b/airflow/hooks/hive_hooks.py
@@ -72,13 +72,13 @@ class HiveCliHook(BaseHook):
     connection string as is.
 
     :param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
-    :type  mapred_queue: string
+    :type  mapred_queue: str
     :param mapred_queue_priority: priority within the job queue.
         Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
-    :type  mapred_queue_priority: string
+    :type  mapred_queue_priority: str
     :param mapred_job_name: This name will appear in the jobtracker.
         This can make monitoring easier.
-    :type  mapred_job_name: string
+    :type  mapred_job_name: str
     """
 
     def __init__(
@@ -324,7 +324,7 @@ def load_df(
         :type field_dict: OrderedDict
         :param delimiter: field delimiter in the file
         :type delimiter: str
-        :param encoding: string encoding to use when writing DataFrame to file
+        :param encoding: str encoding to use when writing DataFrame to file
         :type encoding: str
         :param pandas_kwargs: passed to DataFrame.to_csv
         :type pandas_kwargs: dict
@@ -536,13 +536,13 @@ def check_for_partition(self, schema, table, partition):
         Checks whether a partition exists
 
         :param schema: Name of hive schema (database) @table belongs to
-        :type schema: string
+        :type schema: str
         :param table: Name of hive table @partition belongs to
-        :type schema: string
+        :type schema: str
         :partition: Expression that matches the partitions to check for
             (eg `a = 'b' AND c = 'd'`)
-        :type schema: string
-        :rtype: boolean
+        :type schema: str
+        :rtype: bool
 
         >>> hh = HiveMetastoreHook()
         >>> t = 'static_babynames_partitioned'
@@ -563,12 +563,12 @@ def check_for_named_partition(self, schema, table, partition_name):
         Checks whether a partition with a given name exists
 
         :param schema: Name of hive schema (database) @table belongs to
-        :type schema: string
+        :type schema: str
         :param table: Name of hive table @partition belongs to
-        :type schema: string
+        :type schema: str
         :partition: Name of the partitions to check for (eg `a=b/c=d`)
-        :type schema: string
-        :rtype: boolean
+        :type schema: str
+        :rtype: bool
 
         >>> hh = HiveMetastoreHook()
         >>> t = 'static_babynames_partitioned'
@@ -652,7 +652,7 @@ def _get_max_partition_from_part_specs(part_specs, partition_key, filter_map):
         :param part_specs: list of partition specs.
         :type part_specs: list
         :param partition_key: partition key name.
-        :type partition_key: string
+        :type partition_key: str
         :param filter_map: partition_key:partition_value map used for partition filtering,
                            e.g. {'key1': 'value1', 'key2': 'value2'}.
                            Only partitions matching all partition_key:partition_value
@@ -692,11 +692,11 @@ def max_partition(self, schema, table_name, field=None, filter_map=None):
         filter out partitions.
 
         :param schema: schema name.
-        :type schema: string
+        :type schema: str
         :param table_name: table name.
-        :type table_name: string
+        :type table_name: str
         :param field: partition key to get max partition from.
-        :type field: string
+        :type field: str
         :param filter_map: partition_key:partition_value map used for partition filtering.
         :type filter_map: map
 
diff --git a/airflow/hooks/slack_hook.py b/airflow/hooks/slack_hook.py
index 17006fc9a5..953ce319b1 100644
--- a/airflow/hooks/slack_hook.py
+++ b/airflow/hooks/slack_hook.py
@@ -34,9 +34,9 @@ def __init__(self, token=None, slack_conn_id=None):
         If both supplied, Slack API token will be used.
 
         :param token: Slack API token
-        :type token: string
+        :type token: str
         :param slack_conn_id: connection that has Slack API token in the password field
-        :type slack_conn_id: string
+        :type slack_conn_id: str
         """
         self.token = self.__get_token(token, slack_conn_id)
 
diff --git a/airflow/macros/hive.py b/airflow/macros/hive.py
index bb60203e95..f8e57bb515 100644
--- a/airflow/macros/hive.py
+++ b/airflow/macros/hive.py
@@ -27,14 +27,14 @@ def max_partition(
     Gets the max partition for a table.
 
     :param schema: The hive schema the table lives in
-    :type schema: string
+    :type schema: str
     :param table: The hive table you are interested in, supports the dot
         notation as in "my_database.my_table", if a dot is found,
         the schema param is disregarded
-    :type table: string
+    :type table: str
     :param metastore_conn_id: The hive connection you are interested in.
         If your default is set you don't need to use this parameter.
-    :type metastore_conn_id: string
+    :type metastore_conn_id: str
     :param filter_map: partition_key:partition_value map used for partition filtering,
                        e.g. {'key1': 'value1', 'key2': 'value2'}.
                        Only partitions matching all partition_key:partition_value
diff --git a/airflow/models.py b/airflow/models.py
index 4aa00074f5..86a9cc2fc6 100755
--- a/airflow/models.py
+++ b/airflow/models.py
@@ -1068,15 +1068,15 @@ def generate_command(dag_id,
         :type mark_success: bool
         :param ignore_all_deps: Ignore all ignorable dependencies.
             Overrides the other ignore_* parameters.
-        :type ignore_all_deps: boolean
+        :type ignore_all_deps: bool
         :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs
             (e.g. for Backfills)
-        :type ignore_depends_on_past: boolean
+        :type ignore_depends_on_past: bool
         :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past
             and trigger rule
-        :type ignore_task_deps: boolean
+        :type ignore_task_deps: bool
         :param ignore_ti_state: Ignore the task instance's previous failure/success
-        :type ignore_ti_state: boolean
+        :type ignore_ti_state: bool
         :param local: Whether to run the task locally
         :type local: bool
         :param pickle_id: If the DAG was serialized to the DB, the ID
@@ -1327,7 +1327,7 @@ def are_dependencies_met(
         :type session: Session
         :param verbose: whether log details on failed dependencies on
             info or debug log level
-        :type verbose: boolean
+        :type verbose: bool
         """
         dep_context = dep_context or DepContext()
         failed = False
@@ -1467,19 +1467,19 @@ def _check_and_change_state_before_execution(
         executed, in preparation for _run_raw_task
 
         :param verbose: whether to turn on more verbose logging
-        :type verbose: boolean
+        :type verbose: bool
         :param ignore_all_deps: Ignore all of the non-critical dependencies, just runs
-        :type ignore_all_deps: boolean
+        :type ignore_all_deps: bool
         :param ignore_depends_on_past: Ignore depends_on_past DAG attribute
-        :type ignore_depends_on_past: boolean
+        :type ignore_depends_on_past: bool
         :param ignore_task_deps: Don't check the dependencies of this TI's task
-        :type ignore_task_deps: boolean
+        :type ignore_task_deps: bool
         :param ignore_ti_state: Disregards previous task instance state
-        :type ignore_ti_state: boolean
+        :type ignore_ti_state: bool
         :param mark_success: Don't run the task, mark its state as success
-        :type mark_success: boolean
+        :type mark_success: bool
         :param test_mode: Doesn't record success or failure in the DB
-        :type test_mode: boolean
+        :type test_mode: bool
         :param pool: specifies the pool to use to run the task instance
         :type pool: str
         :return: whether the state was changed to running or not
@@ -1597,9 +1597,9 @@ def _run_raw_task(
         only after another function changes the state to running.
 
         :param mark_success: Don't run the task, mark its state as success
-        :type mark_success: boolean
+        :type mark_success: bool
         :param test_mode: Doesn't record success or failure in the DB
-        :type test_mode: boolean
+        :type test_mode: bool
         :param pool: specifies the pool to use to run the task instance
         :type pool: str
         """
@@ -1978,7 +1978,7 @@ def xcom_push(
         Make an XCom available for tasks to pull.
 
         :param key: A key for the XCom
-        :type key: string
+        :type key: str
         :param value: A value for the XCom. The value is pickled and stored
             in the database.
         :type value: any pickleable object
@@ -2024,13 +2024,13 @@ def xcom_pull(
             available as a constant XCOM_RETURN_KEY. This key is automatically
             given to XComs returned by tasks (as opposed to being pushed
             manually). To remove the filter, pass key=None.
-        :type key: string
+        :type key: str
         :param task_ids: Only XComs from tasks with matching ids will be
             pulled. Can pass None to remove the filter.
-        :type task_ids: string or iterable of strings (representing task_ids)
+        :type task_ids: str or iterable of strings (representing task_ids)
         :param dag_id: If provided, only pulls XComs from this DAG.
             If None (default), the DAG of the calling task is used.
-        :type dag_id: string
+        :type dag_id: str
         :param include_prior_dates: If False, only XComs from the current
             execution_date are returned. If True, XComs from previous dates
             are returned as well.
@@ -2209,9 +2209,9 @@ class derived from this one results in the creation of a task object,
     be set by using the set_upstream and/or set_downstream methods.
 
     :param task_id: a unique, meaningful id for the task
-    :type task_id: string
+    :type task_id: str
     :param owner: the owner of the task, using the unix username is recommended
-    :type owner: string
+    :type owner: str
     :param retries: the number of retries that should be performed before
         failing the task
     :type retries: int
@@ -3127,9 +3127,9 @@ class DAG(BaseDag, LoggingMixin):
     added once to a DAG.
 
     :param dag_id: The id of the DAG
-    :type dag_id: string
+    :type dag_id: str
     :param description: The description for the DAG to e.g. be shown on the webserver
-    :type description: string
+    :type description: str
     :param schedule_interval: Defines how often that DAG runs, this
         timedelta object gets added to your latest task instance's
         execution_date to figure out the next schedule
@@ -3146,7 +3146,7 @@ class DAG(BaseDag, LoggingMixin):
         defines where jinja will look for your templates. Order matters.
         Note that jinja/airflow includes the path of your DAG file by
         default
-    :type template_searchpath: string or list of stings
+    :type template_searchpath: str or list of stings
     :param user_defined_macros: a dictionary of macros that will be exposed
         in your jinja templates. For example, passing ``dict(foo='bar')``
         to this argument allows you to ``{{ foo }}`` in all jinja
@@ -3185,9 +3185,9 @@ class DAG(BaseDag, LoggingMixin):
     :type sla_miss_callback: types.FunctionType
     :param default_view: Specify DAG default view (tree, graph, duration,
                                                    gantt, landing_times)
-    :type default_view: string
+    :type default_view: str
     :param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT)
-    :type orientation: string
+    :type orientation: str
     :param catchup: Perform scheduler catchup (or only run latest)? Defaults to True
     :type catchup: bool
     :param on_failure_callback: A function to be called when a DagRun of this dag fails.
@@ -4120,12 +4120,12 @@ def run(
             dependencies for the first set of tasks only
         :type ignore_first_depends_on_past: bool
         :param pool: Resource pool to use
-        :type pool: string
+        :type pool: str
         :param delay_on_limit_secs: Time in seconds to wait before next attempt to run
             dag run when max_active_runs limit has been reached
         :type delay_on_limit_secs: float
         :param verbose: Make logging output more verbose
-        :type verbose: boolean
+        :type verbose: bool
         :param conf: user defined dictionary passed from CLI
         :type conf: dict
         """
@@ -4174,7 +4174,7 @@ def create_dagrun(self,
         Returns the dag run.
 
         :param run_id: defines the the run id for this dag run
-        :type run_id: string
+        :type run_id: str
         :param execution_date: the execution date of this dag run
         :type execution_date: datetime
         :param state: the state of the dag run
@@ -4903,9 +4903,9 @@ def find(dag_id=None, run_id=None, execution_date=None,
         Returns a set of dag runs for the given search criteria.
 
         :param dag_id: the dag_id to find dag runs for
-        :type dag_id: integer, list
+        :type dag_id: int, list
         :param run_id: defines the the run id for this dag run
-        :type run_id: string
+        :type run_id: str
         :param execution_date: the execution date
         :type execution_date: datetime
         :param state: the state of the dag run
diff --git a/airflow/operators/bash_operator.py b/airflow/operators/bash_operator.py
index 17de0145a3..5ca63deeab 100644
--- a/airflow/operators/bash_operator.py
+++ b/airflow/operators/bash_operator.py
@@ -38,7 +38,7 @@ class BashOperator(BaseOperator):
 
     :param bash_command: The command, set of commands or reference to a
         bash script (must be '.sh') to be executed. (templated)
-    :type bash_command: string
+    :type bash_command: str
     :param xcom_push: If xcom_push is True, the last line written to stdout
         will also be pushed to an XCom when the bash command completes.
     :type xcom_push: bool
diff --git a/airflow/operators/check_operator.py b/airflow/operators/check_operator.py
index a0d213cf66..98af08dd3a 100644
--- a/airflow/operators/check_operator.py
+++ b/airflow/operators/check_operator.py
@@ -59,7 +59,7 @@ class CheckOperator(BaseOperator):
     single record from an external source.
 
     :param sql: the sql to be executed. (templated)
-    :type sql: string
+    :type sql: str
     """
 
     template_fields = ('sql',)
@@ -114,7 +114,7 @@ class ValueCheckOperator(BaseOperator):
     single record from an external source.
 
     :param sql: the sql to be executed. (templated)
-    :type sql: string
+    :type sql: str
     """
 
     __mapper_args__ = {
diff --git a/airflow/operators/druid_check_operator.py b/airflow/operators/druid_check_operator.py
index 73f7915ca5..39674fdd39 100644
--- a/airflow/operators/druid_check_operator.py
+++ b/airflow/operators/druid_check_operator.py
@@ -51,9 +51,9 @@ class DruidCheckOperator(CheckOperator):
     without stopping the progress of the DAG.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param druid_broker_conn_id: reference to the druid broker
-    :type druid_broker_conn_id: string
+    :type druid_broker_conn_id: str
     """
 
     @apply_defaults
diff --git a/airflow/operators/email_operator.py b/airflow/operators/email_operator.py
index 7b4109c3a5..d6af588d06 100644
--- a/airflow/operators/email_operator.py
+++ b/airflow/operators/email_operator.py
@@ -29,10 +29,10 @@ class EmailOperator(BaseOperator):
     :param to: list of emails to send the email to. (templated)
     :type to: list or string (comma or semicolon delimited)
     :param subject: subject line for the email. (templated)
-    :type subject: string
+    :type subject: str
     :param html_content: content of the email, html markup
         is allowed. (templated)
-    :type html_content: string
+    :type html_content: str
     :param files: file names to attach in email
     :type files: list
     :param cc: list of recipients to be added in CC field
@@ -40,10 +40,10 @@ class EmailOperator(BaseOperator):
     :param bcc: list of recipients to be added in BCC field
     :type bcc: list or string (comma or semicolon delimited)
     :param mime_subtype: MIME sub content type
-    :type mime_subtype: string
+    :type mime_subtype: str
     :param mime_charset: character set parameter added to the Content-Type
         header.
-    :type mime_charset: string
+    :type mime_charset: str
     """
 
     template_fields = ('to', 'subject', 'html_content')
diff --git a/airflow/operators/hive_operator.py b/airflow/operators/hive_operator.py
index 6b06fd4ba0..cf3f655e59 100644
--- a/airflow/operators/hive_operator.py
+++ b/airflow/operators/hive_operator.py
@@ -35,9 +35,9 @@ class HiveOperator(BaseOperator):
     :param hql: the hql to be executed. Note that you may also use
         a relative path from the dag file of a (template) hive
         script. (templated)
-    :type hql: string
+    :type hql: str
     :param hive_cli_conn_id: reference to the Hive database. (templated)
-    :type hive_cli_conn_id: string
+    :type hive_cli_conn_id: str
     :param hiveconfs: if defined, these key value pairs will be passed
         to hive as ``-hiveconf "key"="value"``
     :type hiveconfs: dict
@@ -47,18 +47,18 @@ class HiveOperator(BaseOperator):
         Note that you may want to use this along with the
         ``DAG(user_defined_macros=myargs)`` parameter. View the DAG
         object documentation for more details.
-    :type hiveconf_jinja_translate: boolean
+    :type hiveconf_jinja_translate: bool
     :param script_begin_tag: If defined, the operator will get rid of the
         part of the script before the first occurrence of `script_begin_tag`
     :type script_begin_tag: str
     :param mapred_queue: queue used by the Hadoop CapacityScheduler. (templated)
-    :type  mapred_queue: string
+    :type  mapred_queue: str
     :param mapred_queue_priority: priority within CapacityScheduler queue.
         Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
-    :type  mapred_queue_priority: string
+    :type  mapred_queue_priority: str
     :param mapred_job_name: This name will appear in the jobtracker.
         This can make monitoring easier.
-    :type  mapred_job_name: string
+    :type  mapred_job_name: str
     """
 
     template_fields = ('hql', 'schema', 'hive_cli_conn_id', 'mapred_queue',
diff --git a/airflow/operators/hive_to_samba_operator.py b/airflow/operators/hive_to_samba_operator.py
index fa2a9619eb..7963524a10 100644
--- a/airflow/operators/hive_to_samba_operator.py
+++ b/airflow/operators/hive_to_samba_operator.py
@@ -32,11 +32,11 @@ class Hive2SambaOperator(BaseOperator):
     results of the query as a csv to a Samba location.
 
     :param hql: the hql to be exported. (templated)
-    :type hql: string
+    :type hql: str
     :param hiveserver2_conn_id: reference to the hiveserver2 service
-    :type hiveserver2_conn_id: string
+    :type hiveserver2_conn_id: str
     :param samba_conn_id: reference to the samba destination
-    :type samba_conn_id: string
+    :type samba_conn_id: str
     """
 
     template_fields = ('hql', 'destination_filepath')
diff --git a/airflow/operators/http_operator.py b/airflow/operators/http_operator.py
index e757118fc0..0585a92a86 100644
--- a/airflow/operators/http_operator.py
+++ b/airflow/operators/http_operator.py
@@ -28,11 +28,11 @@ class SimpleHttpOperator(BaseOperator):
     Calls an endpoint on an HTTP system to execute an action
 
     :param http_conn_id: The connection to run the operator against
-    :type http_conn_id: string
+    :type http_conn_id: str
     :param endpoint: The relative part of the full url. (templated)
-    :type endpoint: string
+    :type endpoint: str
     :param method: The HTTP method to use, default = "POST"
-    :type method: string
+    :type method: str
     :param data: The data to pass. POST-data in POST/PUT and params
         in the URL for a GET request. (templated)
     :type data: For POST/PUT, depends on the content-type parameter,
diff --git a/airflow/operators/jdbc_operator.py b/airflow/operators/jdbc_operator.py
index 9e7f24dbf6..a10bf30c5c 100644
--- a/airflow/operators/jdbc_operator.py
+++ b/airflow/operators/jdbc_operator.py
@@ -29,7 +29,7 @@ class JdbcOperator(BaseOperator):
     Requires jaydebeapi.
 
     :param jdbc_conn_id: reference to a predefined database
-    :type jdbc_conn_id: string
+    :type jdbc_conn_id: str
     :param sql: the sql code to be executed. (templated)
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
diff --git a/airflow/operators/mssql_operator.py b/airflow/operators/mssql_operator.py
index 1309be94e4..f27e2145eb 100644
--- a/airflow/operators/mssql_operator.py
+++ b/airflow/operators/mssql_operator.py
@@ -27,12 +27,12 @@ class MsSqlOperator(BaseOperator):
     Executes sql code in a specific Microsoft SQL database
 
     :param mssql_conn_id: reference to a specific mssql database
-    :type mssql_conn_id: string
+    :type mssql_conn_id: str
     :param sql: the sql code to be executed
-    :type sql: string or string pointing to a template file with .sql
+    :type sql: str or string pointing to a template file with .sql
         extension. (templated)
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/operators/mysql_operator.py b/airflow/operators/mysql_operator.py
index 2b940c785b..e5bd788bd1 100644
--- a/airflow/operators/mysql_operator.py
+++ b/airflow/operators/mysql_operator.py
@@ -27,13 +27,13 @@ class MySqlOperator(BaseOperator):
     Executes sql code in a specific MySQL database
 
     :param mysql_conn_id: reference to a specific mysql database
-    :type mysql_conn_id: string
+    :type mysql_conn_id: str
     :param sql: the sql code to be executed. (templated)
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
         Template reference are recognized by str ending in '.sql'
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/operators/oracle_operator.py b/airflow/operators/oracle_operator.py
index 275165f1c7..b97129715f 100644
--- a/airflow/operators/oracle_operator.py
+++ b/airflow/operators/oracle_operator.py
@@ -27,7 +27,7 @@ class OracleOperator(BaseOperator):
     Executes sql code in a specific Oracle database
 
     :param oracle_conn_id: reference to a specific Oracle database
-    :type oracle_conn_id: string
+    :type oracle_conn_id: str
     :param sql: the sql code to be executed. (templated)
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
diff --git a/airflow/operators/pig_operator.py b/airflow/operators/pig_operator.py
index d22f19f709..69a54b7f06 100644
--- a/airflow/operators/pig_operator.py
+++ b/airflow/operators/pig_operator.py
@@ -29,15 +29,15 @@ class PigOperator(BaseOperator):
     Executes pig script.
 
     :param pig: the pig latin script to be executed. (templated)
-    :type pig: string
+    :type pig: str
     :param pig_cli_conn_id: reference to the Hive database
-    :type pig_cli_conn_id: string
+    :type pig_cli_conn_id: str
     :param pigparams_jinja_translate: when True, pig params-type templating
         ${var} gets translated into jinja-type templating {{ var }}. Note that
         you may want to use this along with the
         ``DAG(user_defined_macros=myargs)`` parameter. View the DAG
         object documentation for more details.
-    :type pigparams_jinja_translate: boolean
+    :type pigparams_jinja_translate: bool
     """
 
     template_fields = ('pig',)
diff --git a/airflow/operators/postgres_operator.py b/airflow/operators/postgres_operator.py
index 5ff6e9e643..e329100aa2 100644
--- a/airflow/operators/postgres_operator.py
+++ b/airflow/operators/postgres_operator.py
@@ -26,13 +26,13 @@ class PostgresOperator(BaseOperator):
     Executes sql code in a specific Postgres database
 
     :param postgres_conn_id: reference to a specific postgres database
-    :type postgres_conn_id: string
+    :type postgres_conn_id: str
     :param sql: the sql code to be executed. (templated)
     :type sql: Can receive a str representing a sql statement,
         a list of str (sql statements), or reference to a template file.
         Template reference are recognized by str ending in '.sql'
     :param database: name of database which overwrite defined one in connection
-    :type database: string
+    :type database: str
     """
 
     template_fields = ('sql',)
diff --git a/airflow/operators/presto_check_operator.py b/airflow/operators/presto_check_operator.py
index 608aebfe29..16f5bc0212 100644
--- a/airflow/operators/presto_check_operator.py
+++ b/airflow/operators/presto_check_operator.py
@@ -52,9 +52,9 @@ class PrestoCheckOperator(CheckOperator):
     without stopping the progress of the DAG.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param presto_conn_id: reference to the Presto database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
@@ -76,9 +76,9 @@ class PrestoValueCheckOperator(ValueCheckOperator):
     Performs a simple value check using sql code.
 
     :param sql: the sql to be executed
-    :type sql: string
+    :type sql: str
     :param presto_conn_id: reference to the Presto database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
@@ -108,7 +108,7 @@ class PrestoIntervalCheckOperator(IntervalCheckOperator):
     :param metrics_threshold: a dictionary of ratios indexed by metrics
     :type metrics_threshold: dict
     :param presto_conn_id: reference to the Presto database
-    :type presto_conn_id: string
+    :type presto_conn_id: str
     """
 
     @apply_defaults
diff --git a/airflow/operators/redshift_to_s3_operator.py b/airflow/operators/redshift_to_s3_operator.py
index e6682c78df..b1fee56e95 100644
--- a/airflow/operators/redshift_to_s3_operator.py
+++ b/airflow/operators/redshift_to_s3_operator.py
@@ -28,17 +28,17 @@ class RedshiftToS3Transfer(BaseOperator):
     Executes an UNLOAD command to s3 as a CSV with headers
 
     :param schema: reference to a specific schema in redshift database
-    :type schema: string
+    :type schema: str
     :param table: reference to a specific table in redshift database
-    :type table: string
+    :type table: str
     :param s3_bucket: reference to a specific S3 bucket
-    :type s3_bucket: string
+    :type s3_bucket: str
     :param s3_key: reference to a specific S3 key
-    :type s3_key: string
+    :type s3_key: str
     :param redshift_conn_id: reference to a specific redshift database
-    :type redshift_conn_id: string
+    :type redshift_conn_id: str
     :param aws_conn_id: reference to a specific S3 connection
-    :type aws_conn_id: string
+    :type aws_conn_id: str
     :parame verify: Whether or not to verify SSL certificates for S3 connection.
         By default SSL certificates are verified.
         You can provide the following values:
diff --git a/airflow/operators/s3_to_redshift_operator.py b/airflow/operators/s3_to_redshift_operator.py
index 8c83f44372..265d6e2563 100644
--- a/airflow/operators/s3_to_redshift_operator.py
+++ b/airflow/operators/s3_to_redshift_operator.py
@@ -28,17 +28,17 @@ class S3ToRedshiftTransfer(BaseOperator):
     Executes an COPY command to load files from s3 to Redshift
 
     :param schema: reference to a specific schema in redshift database
-    :type schema: string
+    :type schema: str
     :param table: reference to a specific table in redshift database
-    :type table: string
+    :type table: str
     :param s3_bucket: reference to a specific S3 bucket
-    :type s3_bucket: string
+    :type s3_bucket: str
     :param s3_key: reference to a specific S3 key
-    :type s3_key: string
+    :type s3_key: str
     :param redshift_conn_id: reference to a specific redshift database
-    :type redshift_conn_id: string
+    :type redshift_conn_id: str
     :param aws_conn_id: reference to a specific S3 connection
-    :type aws_conn_id: string
+    :type aws_conn_id: str
     :parame verify: Whether or not to verify SSL certificates for S3 connection.
         By default SSL certificates are verified.
         You can provide the following values:
diff --git a/airflow/operators/slack_operator.py b/airflow/operators/slack_operator.py
index 3382bc2788..ddf8788e9b 100644
--- a/airflow/operators/slack_operator.py
+++ b/airflow/operators/slack_operator.py
@@ -32,11 +32,11 @@ class SlackAPIOperator(BaseOperator):
     In the future additional Slack API Operators will be derived from this class as well
 
     :param slack_conn_id: Slack connection ID which its password is Slack API token
-    :type slack_conn_id: string
+    :type slack_conn_id: str
     :param token: Slack API token (https://api.slack.com/web)
-    :type token: string
+    :type token: str
     :param method: The Slack API Method to Call (https://api.slack.com/methods)
-    :type method: string
+    :type method: str
     :param api_params: API Method call parameters (https://api.slack.com/methods)
     :type api_params: dict
     """
@@ -93,13 +93,13 @@ class SlackAPIPostOperator(SlackAPIOperator):
 
     :param channel: channel in which to post message on slack name (#general) or
         ID (C12318391). (templated)
-    :type channel: string
+    :type channel: str
     :param username: Username that airflow will be posting to Slack as. (templated)
-    :type username: string
+    :type username: str
     :param text: message to send to slack. (templated)
-    :type text: string
+    :type text: str
     :param icon_url: url to icon used for this message
-    :type icon_url: string
+    :type icon_url: str
     :param attachments: extra formatting details. (templated)
         - see https://api.slack.com/docs/attachments.
     :type attachments: array of hashes
diff --git a/airflow/operators/sqlite_operator.py b/airflow/operators/sqlite_operator.py
index 5b7213ed81..8280075a32 100644
--- a/airflow/operators/sqlite_operator.py
+++ b/airflow/operators/sqlite_operator.py
@@ -27,9 +27,9 @@ class SqliteOperator(BaseOperator):
     Executes sql code in a specific Sqlite database
 
     :param sqlite_conn_id: reference to a specific sqlite database
-    :type sqlite_conn_id: string
+    :type sqlite_conn_id: str
     :param sql: the sql code to be executed. (templated)
-    :type sql: string or string pointing to a template file. File must have
+    :type sql: str or string pointing to a template file. File must have
         a '.sql' extensions.
     """
 
diff --git a/airflow/sensors/external_task_sensor.py b/airflow/sensors/external_task_sensor.py
index eda1a2d9d2..c80986fa9f 100644
--- a/airflow/sensors/external_task_sensor.py
+++ b/airflow/sensors/external_task_sensor.py
@@ -30,10 +30,10 @@ class ExternalTaskSensor(BaseSensorOperator):
 
     :param external_dag_id: The dag_id that contains the task you want to
         wait for
-    :type external_dag_id: string
+    :type external_dag_id: str
     :param external_task_id: The task_id that contains the task you want to
         wait for
-    :type external_task_id: string
+    :type external_task_id: str
     :param allowed_states: list of allowed states, default is ``['success']``
     :type allowed_states: list
     :param execution_delta: time difference with the previous execution to
diff --git a/airflow/sensors/hive_partition_sensor.py b/airflow/sensors/hive_partition_sensor.py
index c8eddc3443..ca10c863b4 100644
--- a/airflow/sensors/hive_partition_sensor.py
+++ b/airflow/sensors/hive_partition_sensor.py
@@ -31,12 +31,12 @@ class HivePartitionSensor(BaseSensorOperator):
 
     :param table: The name of the table to wait for, supports the dot
         notation (my_database.my_table)
-    :type table: string
+    :type table: str
     :param partition: The partition clause to wait for. This is passed as
         is to the metastore Thrift client ``get_partitions_by_filter`` method,
         and apparently supports SQL like notation as in ``ds='2015-01-01'
         AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
-    :type partition: string
+    :type partition: str
     :param metastore_conn_id: reference to the metastore thrift service
         connection id
     :type metastore_conn_id: str
diff --git a/airflow/sensors/http_sensor.py b/airflow/sensors/http_sensor.py
index f665737e96..e60b175ed9 100644
--- a/airflow/sensors/http_sensor.py
+++ b/airflow/sensors/http_sensor.py
@@ -35,11 +35,11 @@ class HttpSensor(BaseSensorOperator):
     would fail the sensor itself directly (no more poking).
 
     :param http_conn_id: The connection to run the sensor against
-    :type http_conn_id: string
+    :type http_conn_id: str
     :param method: The HTTP request method to use
-    :type method: string
+    :type method: str
     :param endpoint: The relative part of the full url
-    :type endpoint: string
+    :type endpoint: str
     :param request_params: The parameters to be added to the GET url
     :type request_params: a dictionary of string key/value pairs
     :param headers: The HTTP headers to be added to the GET request
diff --git a/airflow/sensors/sql_sensor.py b/airflow/sensors/sql_sensor.py
index e9208fa890..de46e6d3db 100644
--- a/airflow/sensors/sql_sensor.py
+++ b/airflow/sensors/sql_sensor.py
@@ -30,7 +30,7 @@ class SqlSensor(BaseSensorOperator):
     sql returns no row, or if the first cell in (0, '0', '').
 
     :param conn_id: The connection to run the sensor against
-    :type conn_id: string
+    :type conn_id: str
     :param sql: The sql to run. To pass, it needs to return at least one cell
         that contains a non-zero / empty string value.
     """
diff --git a/airflow/ti_deps/dep_context.py b/airflow/ti_deps/dep_context.py
index 6d39998988..a0f30831e5 100644
--- a/airflow/ti_deps/dep_context.py
+++ b/airflow/ti_deps/dep_context.py
@@ -49,20 +49,20 @@ class DepContext(object):
         creation while checking to see whether the task instance is runnable. It was the
         shortest path to add the feature. This is bad since this class should be pure (no
         side effects).
-    :type flag_upstream_failed: boolean
+    :type flag_upstream_failed: bool
     :param ignore_all_deps: Whether or not the context should ignore all ignoreable
         dependencies. Overrides the other ignore_* parameters
-    :type ignore_all_deps: boolean
+    :type ignore_all_deps: bool
     :param ignore_depends_on_past: Ignore depends_on_past parameter of DAGs (e.g. for
         Backfills)
-    :type ignore_depends_on_past: boolean
+    :type ignore_depends_on_past: bool
     :param ignore_in_retry_period: Ignore the retry period for task instances
-    :type ignore_in_retry_period: boolean
+    :type ignore_in_retry_period: bool
     :param ignore_task_deps: Ignore task-specific dependencies such as depends_on_past and
         trigger rule
-    :type ignore_task_deps: boolean
+    :type ignore_task_deps: bool
     :param ignore_ti_state: Ignore the task instance's previous failure/success
-    :type ignore_ti_state: boolean
+    :type ignore_ti_state: bool
     """
     def __init__(
             self,
diff --git a/airflow/ti_deps/deps/trigger_rule_dep.py b/airflow/ti_deps/deps/trigger_rule_dep.py
index 76e5e1324f..8c9505db71 100644
--- a/airflow/ti_deps/deps/trigger_rule_dep.py
+++ b/airflow/ti_deps/deps/trigger_rule_dep.py
@@ -105,20 +105,20 @@ def _evaluate_trigger_rule(
         :param ti: the task instance to evaluate the trigger rule of
         :type ti: TaskInstance
         :param successes: Number of successful upstream tasks
-        :type successes: boolean
+        :type successes: bool
         :param skipped: Number of skipped upstream tasks
-        :type skipped: boolean
+        :type skipped: bool
         :param failed: Number of failed upstream tasks
-        :type failed: boolean
+        :type failed: bool
         :param upstream_failed: Number of upstream_failed upstream tasks
-        :type upstream_failed: boolean
+        :type upstream_failed: bool
         :param done: Number of completed upstream tasks
-        :type done: boolean
+        :type done: bool
         :param flag_upstream_failed: This is a hack to generate
             the upstream_failed state creation while checking to see
             whether the task instance is runnable. It was the shortest
             path to add the feature
-        :type flag_upstream_failed: boolean
+        :type flag_upstream_failed: bool
         :param session: database session
         :type session: Session
         """
diff --git a/airflow/utils/helpers.py b/airflow/utils/helpers.py
index d6b1d93b38..c264056ced 100644
--- a/airflow/utils/helpers.py
+++ b/airflow/utils/helpers.py
@@ -281,10 +281,10 @@ def __init__(self, parent_module, module_attributes):
         """
         :param parent_module: The string package name of the parent module. For
             example, 'airflow.operators'
-        :type parent_module: string
+        :type parent_module: str
         :param module_attributes: The file to class mappings for all importable
             classes.
-        :type module_attributes: string
+        :type module_attributes: str
         """
         self._parent_module = parent_module
         self._attribute_modules = self._build_attribute_modules(module_attributes)
diff --git a/airflow/utils/log/gcs_task_handler.py b/airflow/utils/log/gcs_task_handler.py
index e768882ac5..a9502ef8db 100644
--- a/airflow/utils/log/gcs_task_handler.py
+++ b/airflow/utils/log/gcs_task_handler.py
@@ -126,7 +126,7 @@ def gcs_read(self, remote_log_location):
         """
         Returns the log found at the remote_log_location.
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         """
         bkt, blob = self.parse_gcs_url(remote_log_location)
         return self.hook.download(bkt, blob).decode()
@@ -136,9 +136,9 @@ def gcs_write(self, log, remote_log_location, append=True):
         Writes the log to the remote_log_location. Fails silently if no hook
         was created.
         :param log: the log to write to the remote_log_location
-        :type log: string
+        :type log: str
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param append: if False, any existing log file is overwritten. If True,
             the new log is appended to any existing logs.
         :type append: bool
diff --git a/airflow/utils/log/s3_task_handler.py b/airflow/utils/log/s3_task_handler.py
index bdbf94b6bb..196aec50f1 100644
--- a/airflow/utils/log/s3_task_handler.py
+++ b/airflow/utils/log/s3_task_handler.py
@@ -132,7 +132,7 @@ def s3_read(self, remote_log_location, return_error=False):
         Returns the log found at the remote_log_location. Returns '' if no
         logs are found or there is an error.
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param return_error: if True, returns a string error message if an
             error occurs. Otherwise returns '' when an error occurs.
         :type return_error: bool
@@ -151,9 +151,9 @@ def s3_write(self, log, remote_log_location, append=True):
         Writes the log to the remote_log_location. Fails silently if no hook
         was created.
         :param log: the log to write to the remote_log_location
-        :type log: string
+        :type log: str
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param append: if False, any existing log file is overwritten. If True,
             the new log is appended to any existing logs.
         :type append: bool
diff --git a/airflow/utils/log/wasb_task_handler.py b/airflow/utils/log/wasb_task_handler.py
index a2a0c0daee..b7f726d41d 100644
--- a/airflow/utils/log/wasb_task_handler.py
+++ b/airflow/utils/log/wasb_task_handler.py
@@ -140,7 +140,7 @@ def wasb_read(self, remote_log_location, return_error=False):
         Returns the log found at the remote_log_location. Returns '' if no
         logs are found or there is an error.
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param return_error: if True, returns a string error message if an
             error occurs. Otherwise returns '' when an error occurs.
         :type return_error: bool
@@ -159,9 +159,9 @@ def wasb_write(self, log, remote_log_location, append=True):
         Writes the log to the remote_log_location. Fails silently if no hook
         was created.
         :param log: the log to write to the remote_log_location
-        :type log: string
+        :type log: str
         :param remote_log_location: the log's location in remote storage
-        :type remote_log_location: string (path)
+        :type remote_log_location: str (path)
         :param append: if False, any existing log file is overwritten. If True,
             the new log is appended to any existing logs.
         :type append: bool
diff --git a/airflow/utils/operator_resources.py b/airflow/utils/operator_resources.py
index 649961340e..44df83eafb 100644
--- a/airflow/utils/operator_resources.py
+++ b/airflow/utils/operator_resources.py
@@ -33,10 +33,10 @@ class Resource(object):
     Represents a resource requirement in an execution environment for an operator.
 
     :param name: Name of the resource
-    :type name: string
+    :type name: str
     :param units_str: The string representing the units of a resource (e.g. MB for a CPU
         resource) to be used for display purposes
-    :type units_str: string
+    :type units_str: str
     :param qty: The number of units of the specified resource that are required for
         execution of the operator.
     :type qty: long


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services