You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@superset.apache.org by jo...@apache.org on 2019/06/25 20:34:58 UTC

[incubator-superset] branch master updated: [format] Using Black (#7769)

This is an automated email from the ASF dual-hosted git repository.

johnbodley pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-superset.git


The following commit(s) were added to refs/heads/master by this push:
     new 5c58fd1  [format] Using Black (#7769)
5c58fd1 is described below

commit 5c58fd18026fa8ab947a8ff5985da469c84a6a6a
Author: John Bodley <45...@users.noreply.github.com>
AuthorDate: Tue Jun 25 13:34:48 2019 -0700

    [format] Using Black (#7769)
---
 requirements-dev.txt => .pre-commit-config.yaml    |   25 +-
 .pylintrc                                          |    4 +-
 CONTRIBUTING.md                                    |   19 +-
 requirements-dev.txt                               |    4 +-
 setup.py                                           |  145 +-
 superset/__init__.py                               |   99 +-
 superset/cli.py                                    |  355 +--
 superset/common/query_context.py                   |  145 +-
 superset/common/query_object.py                    |   84 +-
 superset/config.py                                 |  194 +-
 superset/connectors/base/models.py                 |  197 +-
 superset/connectors/base/views.py                  |   11 +-
 superset/connectors/connector_registry.py          |   22 +-
 superset/connectors/druid/models.py                | 1062 ++++----
 superset/connectors/druid/views.py                 |  473 ++--
 superset/connectors/sqla/models.py                 |  605 +++--
 superset/connectors/sqla/views.py                  |  493 ++--
 superset/data/__init__.py                          |    2 +-
 superset/data/bart_lines.py                        |   29 +-
 superset/data/birth_names.py                       |  445 ++--
 superset/data/countries.py                         |  498 ++--
 superset/data/country_map.py                       |  100 +-
 superset/data/css_templates.py                     |   23 +-
 superset/data/deck.py                              |  597 ++---
 superset/data/energy.py                            |   73 +-
 superset/data/flights.py                           |   37 +-
 superset/data/helpers.py                           |   19 +-
 superset/data/long_lat.py                          |   95 +-
 superset/data/misc_dashboard.py                    |   24 +-
 superset/data/multi_line.py                        |   45 +-
 superset/data/multiformat_time_series.py           |   85 +-
 superset/data/paris.py                             |   23 +-
 superset/data/random_time_series.py                |   62 +-
 superset/data/sf_population_polygons.py            |   23 +-
 superset/data/tabbed_dashboard.py                  |   22 +-
 superset/data/unicode_test_data.py                 |   84 +-
 superset/data/world_bank.py                        |  334 +--
 superset/dataframe.py                              |  109 +-
 superset/db_engine_specs/__init__.py               |    9 +-
 superset/db_engine_specs/athena.py                 |   33 +-
 superset/db_engine_specs/base.py                   |  220 +-
 superset/db_engine_specs/bigquery.py               |   85 +-
 superset/db_engine_specs/clickhouse.py             |   37 +-
 superset/db_engine_specs/db2.py                    |   46 +-
 superset/db_engine_specs/drill.py                  |   40 +-
 superset/db_engine_specs/druid.py                  |   23 +-
 superset/db_engine_specs/gsheets.py                |    3 +-
 superset/db_engine_specs/hive.py                   |  215 +-
 superset/db_engine_specs/impala.py                 |   33 +-
 superset/db_engine_specs/kylin.py                  |   33 +-
 superset/db_engine_specs/mssql.py                  |   34 +-
 superset/db_engine_specs/mysql.py                  |   56 +-
 superset/db_engine_specs/oracle.py                 |   26 +-
 superset/db_engine_specs/pinot.py                  |   33 +-
 superset/db_engine_specs/postgres.py               |   24 +-
 superset/db_engine_specs/presto.py                 |  515 ++--
 superset/db_engine_specs/redshift.py               |    2 +-
 superset/db_engine_specs/snowflake.py              |   36 +-
 superset/db_engine_specs/sqlite.py                 |   55 +-
 superset/db_engine_specs/teradata.py               |   27 +-
 superset/db_engine_specs/vertica.py                |    2 +-
 superset/db_engines/hive.py                        |   16 +-
 superset/extract_table_names.py                    |    4 +-
 superset/forms.py                                  |  171 +-
 superset/jinja_context.py                          |   66 +-
 superset/legacy.py                                 |    7 +-
 superset/migrations/env.py                         |   43 +-
 .../0b1f1ab473c0_add_extra_column_to_query.py      |   12 +-
 .../0c5070e96b57_add_user_attributes_table.py      |   33 +-
 ...9ee0e3_fix_wrong_constraint_on_table_columns.py |   43 +-
 .../versions/1296d28ec131_druid_exports.py         |   10 +-
 .../versions/12d55656cbca_is_featured.py           |    9 +-
 .../versions/130915240929_is_sqllab_viz_flow.py    |   13 +-
 superset/migrations/versions/18dc26817ad2_.py      |    4 +-
 .../versions/18e88e1cc004_making_audit_nullable.py |  161 +-
 .../19a814813610_adding_metric_warning_text.py     |   16 +-
 .../versions/1a1d627ebd8e_position_json.py         |   12 +-
 .../versions/1a48a5411020_adding_slug_to_dash.py   |   13 +-
 .../migrations/versions/1d2ddd543133_log_dt.py     |    8 +-
 superset/migrations/versions/1d9e835a84f9_.py      |   14 +-
 superset/migrations/versions/1e2841a4128_.py       |    9 +-
 .../versions/21e88bc06c02_annotation_migration.py  |   49 +-
 .../migrations/versions/2591d77e9831_user_id.py    |   16 +-
 .../versions/27ae655e4247_make_creator_owners.py   |   40 +-
 .../289ce07647b_add_encrypted_password_field.py    |   15 +-
 .../2929af7925ed_tz_offsets_in_data_sources.py     |   13 +-
 .../versions/2fcdcb35e487_saved_queries.py         |   40 +-
 superset/migrations/versions/30bb17c0dc76_.py      |   12 +-
 .../versions/315b3f4da9b0_adding_log_model.py      |   23 +-
 .../versions/33d996bcc382_update_slice_model.py    |   18 +-
 .../versions/3b626e2a6783_sync_db_with_models.py   |   91 +-
 .../3c3ffe173e4f_add_sql_string_to_table.py        |    8 +-
 ...1c4c6_migrate_num_period_compare_and_period_.py |  100 +-
 ...e1b21cd94a4_change_owner_to_m2m_relation_on_.py |  112 +-
 .../41f6a59a61f2_database_options_for_sql_lab.py   |   19 +-
 .../migrations/versions/430039611635_log_more.py   |   12 +-
 .../migrations/versions/43df8de3a5f4_dash_json.py  |    8 +-
 .../4451805bbaa1_remove_double_percents.py         |   30 +-
 .../versions/4500485bde7d_allow_run_sync_async.py  |   13 +-
 superset/migrations/versions/45e7da7cfeba_.py      |    4 +-
 superset/migrations/versions/46ba6aaaac97_.py      |    4 +-
 ...8b9b7_remove_coordinator_from_druid_cluster_.py |   23 +-
 superset/migrations/versions/472d2f73dfd4_.py      |    4 +-
 superset/migrations/versions/4736ec66ce19_.py      |  141 +-
 ...08545_migrate_time_range_for_default_filters.py |   60 +-
 superset/migrations/versions/4e6a06bad7a8_init.py  |  370 +--
 .../versions/4fa88fe24e94_owners_many_to_many.py   |   38 +-
 .../versions/525c854f0005_log_this_plus.py         |   12 +-
 .../migrations/versions/55179c7f25c7_sqla_descr.py |    8 +-
 ...826_add_metadata_column_to_annotation_model_.py |    8 +-
 superset/migrations/versions/5a7bad26f2a7_.py      |   12 +-
 superset/migrations/versions/5ccf602336a0_.py      |    4 +-
 .../5e4a03ef0bf0_add_request_access_model.py       |   28 +-
 superset/migrations/versions/6414e83d82b7_.py      |    4 +-
 .../migrations/versions/65903709c321_allow_dml.py  |    8 +-
 .../versions/67a6ac9b727b_update_spatial_params.py |   22 +-
 .../6c7537a6004a_models_for_email_reports.py       |  119 +-
 superset/migrations/versions/705732c70154_.py      |    4 +-
 .../732f1c06bcbf_add_fetch_values_predicate.py     |   19 +-
 .../versions/7467e77870e4_remove_aggs.py           |   51 +-
 .../versions/763d4b211ec9_fixing_audit_fk.py       |  284 +--
 .../versions/7dbf98566af7_slice_description.py     |   10 +-
 .../versions/7e3ddad2a00b_results_key_to_query.py  |   10 +-
 .../versions/7f2635b51f5d_update_base_columns.py   |   58 +-
 superset/migrations/versions/7fcdcde0761c_.py      |   28 +-
 .../80a67c5192fa_single_pie_chart_metric.py        |   26 +-
 ...a3f04bc82_add_parent_ids_in_dashboard_layout.py |   57 +-
 .../versions/836c0bf75904_cache_timeouts.py        |   22 +-
 ...4f117f9_adding_extra_field_to_database_model.py |    8 +-
 superset/migrations/versions/8b70aa3d0f87_.py      |    4 +-
 superset/migrations/versions/8e80a26a31db_.py      |   27 +-
 .../versions/937d04c16b64_update_datasources.py    |   16 +-
 .../versions/956a063c52b3_adjusting_key_length.py  |  184 +-
 superset/migrations/versions/960c69cb1f5b_.py      |   18 +-
 superset/migrations/versions/979c03af3341_.py      |    4 +-
 .../versions/a2d606a761d9_adding_favstar_model.py  |   23 +-
 .../a33a03f16c4a_add_extra_column_to_savedquery.py |   12 +-
 .../versions/a61b40f9f57f_remove_allow_run_sync.py |   15 +-
 ...a65458420354_add_result_backend_time_logging.py |   14 +-
 .../a6c18f869a4e_query_start_running_time.py       |   12 +-
 ...7c195a_rewriting_url_from_shortner_with_new_.py |   29 +-
 .../a9c47e2c1547_add_impersonate_user_to_dbs.py    |    8 +-
 ...d66c4246e_add_cache_timeout_to_druid_cluster.py |    9 +-
 .../versions/ad4d656d92bc_add_avg_metric.py        |   17 +-
 .../versions/ad82a75afd82_add_query_model.py       |   73 +-
 .../versions/afb7730f6a9c_remove_empty_filters.py  |   13 +-
 ...er_sql_column_data_type_in_query_mysql_table.py |   26 +-
 ...dfe5fb6c_adding_verbose_name_to_druid_column.py |   10 +-
 superset/migrations/versions/b347b202819b_.py      |    4 +-
 .../b4456560d4f3_change_table_unique_constraint.py |   13 +-
 .../versions/b46fa1b0b39e_add_params_to_tables.py  |   10 +-
 .../bb51420eaf83_add_schema_to_table_model.py      |    8 +-
 .../versions/bcf3126872fc_add_keyvalue.py          |   15 +-
 .../versions/bddc498dd179_adhoc_filters.py         |   14 +-
 .../bebcf3fed1fe_convert_dashboard_v1_positions.py |  472 ++--
 .../bf706ae5eb46_cal_heatmap_metric_to_metrics.py  |   19 +-
 superset/migrations/versions/c18bd4186f15_.py      |    4 +-
 .../c3a8f8611885_materializing_permission.py       |   17 +-
 .../versions/c5756bec8b47_time_grain_sqla.py       |   14 +-
 .../migrations/versions/c611f2b591b8_dim_spec.py   |    8 +-
 .../versions/c617da68de7d_form_nullable.py         |   28 +-
 superset/migrations/versions/c829ff0b37d0_.py      |    4 +-
 .../versions/c82ee8a39623_add_implicit_tags.py     |   23 +-
 superset/migrations/versions/c9495751e314_.py      |    4 +-
 .../versions/ca69c70ec99b_tracking_url.py          |    8 +-
 ...d38_increase_size_of_name_column_in_ab_view_.py |   18 +-
 superset/migrations/versions/d2424a248d63_.py      |    4 +-
 superset/migrations/versions/d39b1e37131d_.py      |    4 +-
 superset/migrations/versions/d6db5a5cdb5d_.py      |    4 +-
 ...1a0d6f2da_remove_limit_used_from_query_model.py |   10 +-
 .../versions/d827694c7555_css_templates.py         |   29 +-
 ...d8bc074f7aad_add_new_field_is_restricted_to_.py |   30 +-
 .../migrations/versions/d94d33dbe938_form_strip.py |   28 +-
 .../db0c65b146bd_update_slice_model_json.py        |   13 +-
 .../versions/db527d8c4c78_add_db_verbose_name.py   |   23 +-
 .../versions/ddd6ebdd853b_annotations.py           |   69 +-
 superset/migrations/versions/de021a1ca60d_.py      |    4 +-
 superset/migrations/versions/e3970889f38e_.py      |    4 +-
 .../versions/e46f2d27a08e_materialize_perms.py     |   19 +-
 .../e502db2af7be_add_template_params_to_tables.py  |    9 +-
 .../versions/e553e78e90c5_add_druid_auth_py_py.py  |   17 +-
 ...8c4473c581_allow_multi_schema_metadata_fetch.py |   10 +-
 .../versions/e866bd2d4976_smaller_grid.py          |   42 +-
 .../versions/e9df189e5c7e_update_base_metrics.py   |   90 +-
 superset/migrations/versions/ea033256294a_.py      |    4 +-
 superset/migrations/versions/ec1f88a35cc6_.py      |    4 +-
 .../eca4694defa7_sqllab_setting_defaults.py        |    8 +-
 superset/migrations/versions/ef8843b41dac_.py      |    4 +-
 ...bf6129e13_adding_verbose_name_to_tablecolumn.py |   12 +-
 .../versions/f162a1dea4c4_d3format_by_metric.py    |   16 +-
 .../f18570e03440_add_query_result_key_index.py     |   10 +-
 superset/migrations/versions/f1f2d4af5b90_.py      |   19 +-
 superset/migrations/versions/f231d82b9b26_.py      |   33 +-
 superset/migrations/versions/f959a6652acd_.py      |    4 +-
 .../versions/fb13d49b72f9_better_filters.py        |   51 +-
 superset/migrations/versions/fbd55e0f83eb_.py      |    4 +-
 superset/migrations/versions/fc480c87706c_.py      |    4 +-
 superset/migrations/versions/fee7b758c130_.py      |    4 +-
 superset/models/__init__.py                        |    2 +-
 superset/models/annotations.py                     |   30 +-
 superset/models/core.py                            |  654 ++---
 superset/models/helpers.py                         |  179 +-
 superset/models/schedules.py                       |   50 +-
 superset/models/sql_lab.py                         |  119 +-
 superset/models/sql_types/presto_sql_types.py      |   54 +-
 superset/models/tags.py                            |   78 +-
 superset/models/user_attributes.py                 |   12 +-
 superset/security.py                               |  351 +--
 superset/sql_lab.py                                |  168 +-
 superset/sql_parse.py                              |   59 +-
 superset/sql_validators/__init__.py                |    4 +-
 superset/sql_validators/base.py                    |   33 +-
 superset/sql_validators/presto_db.py               |   72 +-
 superset/stats_logger.py                           |   34 +-
 superset/tasks/__init__.py                         |    4 +-
 superset/tasks/cache.py                            |  125 +-
 superset/tasks/schedules.py                        |  151 +-
 superset/translations/utils.py                     |    4 +-
 superset/utils/cache.py                            |   17 +-
 superset/utils/core.py                             |  593 +++--
 superset/utils/dashboard_import_export.py          |    9 +-
 superset/utils/decorators.py                       |   18 +-
 superset/utils/dict_import_export.py               |   59 +-
 superset/utils/import_datasource.py                |   30 +-
 superset/views/__init__.py                         |   12 +-
 superset/views/annotations.py                      |  101 +-
 superset/views/api.py                              |   12 +-
 superset/views/base.py                             |  168 +-
 superset/views/core.py                             | 2666 +++++++++++---------
 superset/views/dashboard.py                        |    9 +-
 superset/views/datasource.py                       |   56 +-
 superset/views/schedules.py                        |  231 +-
 superset/views/sql_lab.py                          |  142 +-
 superset/views/tags.py                             |  115 +-
 superset/views/utils.py                            |  104 +-
 superset/viz.py                                    | 1948 +++++++-------
 tests/access_tests.py                              |  509 ++--
 tests/base_tests.py                                |  125 +-
 tests/cache_tests.py                               |   26 +-
 tests/celery_tests.py                              |  149 +-
 tests/core_tests.py                                |  718 +++---
 tests/dashboard_tests.py                           |  397 ++-
 tests/dataframe_test.py                            |  132 +-
 tests/datasource_tests.py                          |   66 +-
 tests/db_engine_specs_test.py                      |  776 +++---
 tests/dict_import_export_tests.py                  |  302 +--
 tests/druid_func_tests.py                          | 1121 ++++----
 tests/druid_tests.py                               |  467 ++--
 tests/email_tests.py                               |  123 +-
 tests/fixtures/datasource.py                       |  230 +-
 tests/fixtures/pyodbcRow.py                        |    2 +-
 tests/form_tests.py                                |   18 +-
 tests/import_export_tests.py                       |  486 ++--
 tests/load_examples_test.py                        |    1 -
 tests/macro_tests.py                               |   65 +-
 tests/migration_tests.py                           |   21 +-
 tests/model_tests.py                               |  189 +-
 tests/schedules_test.py                            |  177 +-
 tests/security_tests.py                            |  366 +--
 tests/sql_parse_tests.py                           |  225 +-
 tests/sql_validator_tests.py                       |  120 +-
 tests/sqla_models_tests.py                         |   11 +-
 tests/sqllab_tests.py                              |  311 ++-
 tests/stats_logger_tests.py                        |   19 +-
 tests/strategy_tests.py                            |  170 +-
 tests/superset_test_config.py                      |   29 +-
 tests/utils.py                                     |    4 +-
 tests/utils_tests.py                               |  736 +++---
 tests/viz_tests.py                                 | 1087 ++++----
 tox.ini                                            |   53 +-
 270 files changed, 15627 insertions(+), 14842 deletions(-)

diff --git a/requirements-dev.txt b/.pre-commit-config.yaml
similarity index 70%
copy from requirements-dev.txt
copy to .pre-commit-config.yaml
index 1b008ad..eb6274e 100644
--- a/requirements-dev.txt
+++ b/.pre-commit-config.yaml
@@ -14,22 +14,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
-coverage==4.5.3
-flake8-commas==2.0.0
-flake8-import-order==0.18.1
-flake8-mypy==17.8.0
-flake8-quotes==2.0.1
-flake8==3.7.7
-flask-cors==3.0.7
-ipdb==0.12
-mypy==0.670
-nose==1.3.7
-pip-tools==3.7.0
-psycopg2-binary==2.7.5
-pycodestyle==2.5.0
-pyhive==0.6.1
-pylint==1.9.2
-python-dotenv==0.10.1
-redis==2.10.6
-statsd==3.3.0
-tox==3.11.1
+repos:
+  - repo: https://github.com/ambv/black
+    rev: stable
+    hooks:
+      - id: black
+        language_version: python3.6
diff --git a/.pylintrc b/.pylintrc
index ec88ef0..04a53e0 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -81,7 +81,7 @@ confidence=
 # --enable=similarities". If you want to run only the classes checker, but have
 # no Warning level messages displayed, use"--disable=all --enable=classes
 # --disable=W"
-disable=standarderror-builtin,long-builtin,dict-view-method,intern-builtin,suppressed-message,no-absolute-import,unpacking-in-except,apply-builtin,delslice-method,indexing-exception,old-raise-syntax,print-statement,cmp-builtin,reduce-builtin,useless-suppression,coerce-method,input-builtin,cmp-method,raw_input-builtin,nonzero-method,backtick,basestring-builtin,setslice-method,reload-builtin,oct-method,map-builtin-not-iterating,execfile-builtin,old-octal-literal,zip-builtin-not-iterating,b [...]
+disable=standarderror-builtin,long-builtin,dict-view-method,intern-builtin,suppressed-message,no-absolute-import,unpacking-in-except,apply-builtin,delslice-method,indexing-exception,old-raise-syntax,print-statement,cmp-builtin,reduce-builtin,useless-suppression,coerce-method,input-builtin,cmp-method,raw_input-builtin,nonzero-method,backtick,basestring-builtin,setslice-method,reload-builtin,oct-method,map-builtin-not-iterating,execfile-builtin,old-octal-literal,zip-builtin-not-iterating,b [...]
 
 
 [REPORTS]
@@ -209,7 +209,7 @@ max-nested-blocks=5
 [FORMAT]
 
 # Maximum number of characters on a single line.
-max-line-length=90
+max-line-length=88
 
 # Regexp for a line that is allowed to be longer than the limit.
 ignore-long-lines=^\s*(# )?<?https?://\S+>?$
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 569cc91..d165ef4 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -296,9 +296,9 @@ python setup.py build_sphinx
 
 #### OS Dependencies
 
-Make sure your machine meets the [OS dependencies](https://superset.incubator.apache.org/installation.html#os-dependencies) before following these steps. 
+Make sure your machine meets the [OS dependencies](https://superset.incubator.apache.org/installation.html#os-dependencies) before following these steps.
 
-Developers should use a virtualenv. 
+Developers should use a virtualenv.
 
 ```
 pip install virtualenv
@@ -447,6 +447,15 @@ export enum FeatureFlag {
 those specified under FEATURE_FLAGS in `superset_config.py`. For example, `DEFAULT_FEATURE_FLAGS = { 'FOO': True, 'BAR': False }` in `superset/config.py` and `FEATURE_FLAGS = { 'BAR': True, 'BAZ': True }` in `superset_config.py` will result
 in combined feature flags of `{ 'FOO': True, 'BAR': True, 'BAZ': True }`.
 
+## Git Hooks
+
+Superset uses Git pre-commit hooks courtesy of [pre-commit](https://pre-commit.com/). To install run the following:
+
+```bash
+pip3 install -r requirements-dev.txt
+pre-commit install
+```
+
 ## Linting
 
 Lint the project with:
@@ -461,6 +470,10 @@ npm ci
 npm run lint
 ```
 
+The Python code is auto-formatted using [Black](https://github.com/python/black) which
+is configured as a pre-commit hook. There are also numerous [editor integrations](https://black.readthedocs.io/en/stable/editor_integration.html).
+
+
 ## Testing
 
 ### Python Testing
@@ -736,7 +749,7 @@ to work on `async` related features.
 
 To do this, you'll need to:
 * Add an additional database entry. We recommend you copy the connection
-  string from the database labeled `main`, and then enable `SQL Lab` and the 
+  string from the database labeled `main`, and then enable `SQL Lab` and the
   features you want to use. Don't forget to check the `Async` box
 * Configure a results backend, here's a local `FileSystemCache` example,
   not recommended for production,
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 1b008ad..52b341c 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -14,17 +14,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+black==19.3b0
 coverage==4.5.3
-flake8-commas==2.0.0
 flake8-import-order==0.18.1
 flake8-mypy==17.8.0
-flake8-quotes==2.0.1
 flake8==3.7.7
 flask-cors==3.0.7
 ipdb==0.12
 mypy==0.670
 nose==1.3.7
 pip-tools==3.7.0
+pre-commit==1.17.0
 psycopg2-binary==2.7.5
 pycodestyle==2.5.0
 pyhive==0.6.1
diff --git a/setup.py b/setup.py
index 8d3fcc6..dc21c38 100644
--- a/setup.py
+++ b/setup.py
@@ -23,113 +23,100 @@ import sys
 from setuptools import find_packages, setup
 
 if sys.version_info < (3, 6):
-    sys.exit('Sorry, Python < 3.6 is not supported')
+    sys.exit("Sorry, Python < 3.6 is not supported")
 
 BASE_DIR = os.path.abspath(os.path.dirname(__file__))
-PACKAGE_DIR = os.path.join(BASE_DIR, 'superset', 'static', 'assets')
-PACKAGE_FILE = os.path.join(PACKAGE_DIR, 'package.json')
+PACKAGE_DIR = os.path.join(BASE_DIR, "superset", "static", "assets")
+PACKAGE_FILE = os.path.join(PACKAGE_DIR, "package.json")
 with open(PACKAGE_FILE) as package_file:
-    version_string = json.load(package_file)['version']
+    version_string = json.load(package_file)["version"]
 
-with io.open('README.md', encoding='utf-8') as f:
+with io.open("README.md", encoding="utf-8") as f:
     long_description = f.read()
 
 
 def get_git_sha():
     try:
-        s = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
+        s = subprocess.check_output(["git", "rev-parse", "HEAD"])
         return s.decode().strip()
     except Exception:
-        return ''
+        return ""
 
 
 GIT_SHA = get_git_sha()
-version_info = {
-    'GIT_SHA': GIT_SHA,
-    'version': version_string,
-}
-print('-==-' * 15)
-print('VERSION: ' + version_string)
-print('GIT SHA: ' + GIT_SHA)
-print('-==-' * 15)
+version_info = {"GIT_SHA": GIT_SHA, "version": version_string}
+print("-==-" * 15)
+print("VERSION: " + version_string)
+print("GIT SHA: " + GIT_SHA)
+print("-==-" * 15)
 
-with open(os.path.join(PACKAGE_DIR, 'version_info.json'), 'w') as version_file:
+with open(os.path.join(PACKAGE_DIR, "version_info.json"), "w") as version_file:
     json.dump(version_info, version_file)
 
 
 setup(
-    name='apache-superset',
-    description=(
-        'A modern, enterprise-ready business intelligence web application'),
+    name="apache-superset",
+    description=("A modern, enterprise-ready business intelligence web application"),
     long_description=long_description,
-    long_description_content_type='text/markdown',
+    long_description_content_type="text/markdown",
     version=version_string,
     packages=find_packages(),
     include_package_data=True,
     zip_safe=False,
-    scripts=['superset/bin/superset'],
+    scripts=["superset/bin/superset"],
     install_requires=[
-        'bleach>=3.0.2, <4.0.0',
-        'celery>=4.2.0, <5.0.0',
-        'click>=6.0, <7.0.0',  # `click`>=7 forces "-" instead of "_"
-        'colorama',
-        'contextlib2',
-        'croniter>=0.3.28',
-        'cryptography>=2.4.2',
-        'flask>=1.0.0, <2.0.0',
-        'flask-appbuilder>=2.1.5, <2.3.0',
-        'flask-caching',
-        'flask-compress',
-        'flask-talisman',
-        'flask-migrate',
-        'flask-wtf',
-        'geopy',
-        'gunicorn',  # deprecated
-        'humanize',
-        'idna',
-        'isodate',
-        'markdown>=3.0',
-        'pandas>=0.18.0, <0.24.0',  # `pandas`>=0.24.0 changes datetimelike API
-        'parsedatetime',
-        'pathlib2',
-        'polyline',
-        'pydruid>=0.5.2',
-        'python-dateutil',
-        'python-dotenv',
-        'python-geohash',
-        'pyyaml>=5.1',
-        'requests>=2.22.0',
-        'retry>=0.9.2',
-        'selenium>=3.141.0',
-        'simplejson>=3.15.0',
-        'sqlalchemy>=1.3.5,<2.0',
-        'sqlalchemy-utils>=0.33.2',
-        'sqlparse',
-        'wtforms-json',
+        "bleach>=3.0.2, <4.0.0",
+        "celery>=4.2.0, <5.0.0",
+        "click>=6.0, <7.0.0",  # `click`>=7 forces "-" instead of "_"
+        "colorama",
+        "contextlib2",
+        "croniter>=0.3.28",
+        "cryptography>=2.4.2",
+        "flask>=1.0.0, <2.0.0",
+        "flask-appbuilder>=2.1.5, <2.3.0",
+        "flask-caching",
+        "flask-compress",
+        "flask-talisman",
+        "flask-migrate",
+        "flask-wtf",
+        "geopy",
+        "gunicorn",  # deprecated
+        "humanize",
+        "idna",
+        "isodate",
+        "markdown>=3.0",
+        "pandas>=0.18.0, <0.24.0",  # `pandas`>=0.24.0 changes datetimelike API
+        "parsedatetime",
+        "pathlib2",
+        "polyline",
+        "pydruid>=0.5.2",
+        "python-dateutil",
+        "python-dotenv",
+        "python-geohash",
+        "pyyaml>=5.1",
+        "requests>=2.22.0",
+        "retry>=0.9.2",
+        "selenium>=3.141.0",
+        "simplejson>=3.15.0",
+        "sqlalchemy>=1.3.5,<2.0",
+        "sqlalchemy-utils>=0.33.2",
+        "sqlparse",
+        "wtforms-json",
     ],
     extras_require={
-        'bigquery': [
-            'pybigquery>=0.4.10',
-            'pandas_gbq>=0.10.0',
-        ],
-        'cors': ['flask-cors>=2.0.0'],
-        'gsheets': ['gsheetsdb>=0.1.9'],
-        'hive': [
-            'pyhive[hive]>=0.6.1',
-            'tableschema',
-            'thrift>=0.11.0, <1.0.0',
-        ],
-        'mysql': ['mysqlclient==1.4.2.post1'],
-        'postgres': ['psycopg2-binary==2.7.5'],
-        'presto': ['pyhive[presto]>=0.4.0'],
+        "bigquery": ["pybigquery>=0.4.10", "pandas_gbq>=0.10.0"],
+        "cors": ["flask-cors>=2.0.0"],
+        "gsheets": ["gsheetsdb>=0.1.9"],
+        "hive": ["pyhive[hive]>=0.6.1", "tableschema", "thrift>=0.11.0, <1.0.0"],
+        "mysql": ["mysqlclient==1.4.2.post1"],
+        "postgres": ["psycopg2-binary==2.7.5"],
+        "presto": ["pyhive[presto]>=0.4.0"],
     },
-    author='Apache Software Foundation',
-    author_email='dev@superset.incubator.apache.org',
-    url='https://superset.apache.org/',
+    author="Apache Software Foundation",
+    author_email="dev@superset.incubator.apache.org",
+    url="https://superset.apache.org/",
     download_url=(
-        'https://dist.apache.org/repos/dist/release/superset/' + version_string
+        "https://dist.apache.org/repos/dist/release/superset/" + version_string
     ),
-    classifiers=[
-        'Programming Language :: Python :: 3.6',
-    ],
+    classifiers=["Programming Language :: Python :: 3.6"],
 )
diff --git a/superset/__init__.py b/superset/__init__.py
index d03f28e..2f13660 100644
--- a/superset/__init__.py
+++ b/superset/__init__.py
@@ -40,7 +40,7 @@ from superset.utils.core import pessimistic_connection_handling, setup_cache
 wtforms_json.init()
 
 APP_DIR = os.path.dirname(__file__)
-CONFIG_MODULE = os.environ.get('SUPERSET_CONFIG', 'superset.config')
+CONFIG_MODULE = os.environ.get("SUPERSET_CONFIG", "superset.config")
 
 if not os.path.exists(config.DATA_DIR):
     os.makedirs(config.DATA_DIR)
@@ -52,18 +52,18 @@ conf = app.config
 #################################################################
 # Handling manifest file logic at app start
 #################################################################
-MANIFEST_FILE = APP_DIR + '/static/assets/dist/manifest.json'
+MANIFEST_FILE = APP_DIR + "/static/assets/dist/manifest.json"
 manifest = {}
 
 
 def parse_manifest_json():
     global manifest
     try:
-        with open(MANIFEST_FILE, 'r') as f:
+        with open(MANIFEST_FILE, "r") as f:
             # the manifest inclues non-entry files
             # we only need entries in templates
             full_manifest = json.load(f)
-            manifest = full_manifest.get('entrypoints', {})
+            manifest = full_manifest.get("entrypoints", {})
     except Exception:
         pass
 
@@ -72,14 +72,14 @@ def get_js_manifest_files(filename):
     if app.debug:
         parse_manifest_json()
     entry_files = manifest.get(filename, {})
-    return entry_files.get('js', [])
+    return entry_files.get("js", [])
 
 
 def get_css_manifest_files(filename):
     if app.debug:
         parse_manifest_json()
     entry_files = manifest.get(filename, {})
-    return entry_files.get('css', [])
+    return entry_files.get("css", [])
 
 
 def get_unloaded_chunks(files, loaded_chunks):
@@ -104,16 +104,16 @@ def get_manifest():
 
 #################################################################
 
-for bp in conf.get('BLUEPRINTS'):
+for bp in conf.get("BLUEPRINTS"):
     try:
         print("Registering blueprint: '{}'".format(bp.name))
         app.register_blueprint(bp)
     except Exception as e:
-        print('blueprint registration failed')
+        print("blueprint registration failed")
         logging.exception(e)
 
-if conf.get('SILENCE_FAB'):
-    logging.getLogger('flask_appbuilder').setLevel(logging.ERROR)
+if conf.get("SILENCE_FAB"):
+    logging.getLogger("flask_appbuilder").setLevel(logging.ERROR)
 
 if app.debug:
     app.logger.setLevel(logging.DEBUG)  # pylint: disable=no-member
@@ -121,44 +121,46 @@ else:
     # In production mode, add log handler to sys.stderr.
     app.logger.addHandler(logging.StreamHandler())  # pylint: disable=no-member
     app.logger.setLevel(logging.INFO)  # pylint: disable=no-member
-logging.getLogger('pyhive.presto').setLevel(logging.INFO)
+logging.getLogger("pyhive.presto").setLevel(logging.INFO)
 
 db = SQLA(app)
 
-if conf.get('WTF_CSRF_ENABLED'):
+if conf.get("WTF_CSRF_ENABLED"):
     csrf = CSRFProtect(app)
-    csrf_exempt_list = conf.get('WTF_CSRF_EXEMPT_LIST', [])
+    csrf_exempt_list = conf.get("WTF_CSRF_EXEMPT_LIST", [])
     for ex in csrf_exempt_list:
         csrf.exempt(ex)
 
 pessimistic_connection_handling(db.engine)
 
-cache = setup_cache(app, conf.get('CACHE_CONFIG'))
-tables_cache = setup_cache(app, conf.get('TABLE_NAMES_CACHE_CONFIG'))
+cache = setup_cache(app, conf.get("CACHE_CONFIG"))
+tables_cache = setup_cache(app, conf.get("TABLE_NAMES_CACHE_CONFIG"))
 
-migrate = Migrate(app, db, directory=APP_DIR + '/migrations')
+migrate = Migrate(app, db, directory=APP_DIR + "/migrations")
 
 # Logging configuration
-logging.basicConfig(format=app.config.get('LOG_FORMAT'))
-logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
+logging.basicConfig(format=app.config.get("LOG_FORMAT"))
+logging.getLogger().setLevel(app.config.get("LOG_LEVEL"))
 
-if app.config.get('ENABLE_TIME_ROTATE'):
-    logging.getLogger().setLevel(app.config.get('TIME_ROTATE_LOG_LEVEL'))
+if app.config.get("ENABLE_TIME_ROTATE"):
+    logging.getLogger().setLevel(app.config.get("TIME_ROTATE_LOG_LEVEL"))
     handler = TimedRotatingFileHandler(
-        app.config.get('FILENAME'),
-        when=app.config.get('ROLLOVER'),
-        interval=app.config.get('INTERVAL'),
-        backupCount=app.config.get('BACKUP_COUNT'))
+        app.config.get("FILENAME"),
+        when=app.config.get("ROLLOVER"),
+        interval=app.config.get("INTERVAL"),
+        backupCount=app.config.get("BACKUP_COUNT"),
+    )
     logging.getLogger().addHandler(handler)
 
-if app.config.get('ENABLE_CORS'):
+if app.config.get("ENABLE_CORS"):
     from flask_cors import CORS
-    CORS(app, **app.config.get('CORS_OPTIONS'))
 
-if app.config.get('ENABLE_PROXY_FIX'):
+    CORS(app, **app.config.get("CORS_OPTIONS"))
+
+if app.config.get("ENABLE_PROXY_FIX"):
     app.wsgi_app = ProxyFix(app.wsgi_app)
 
-if app.config.get('ENABLE_CHUNK_ENCODING'):
+if app.config.get("ENABLE_CHUNK_ENCODING"):
 
     class ChunkedEncodingFix(object):
         def __init__(self, app):
@@ -167,40 +169,41 @@ if app.config.get('ENABLE_CHUNK_ENCODING'):
         def __call__(self, environ, start_response):
             # Setting wsgi.input_terminated tells werkzeug.wsgi to ignore
             # content-length and read the stream till the end.
-            if environ.get('HTTP_TRANSFER_ENCODING', '').lower() == u'chunked':
-                environ['wsgi.input_terminated'] = True
+            if environ.get("HTTP_TRANSFER_ENCODING", "").lower() == u"chunked":
+                environ["wsgi.input_terminated"] = True
             return self.app(environ, start_response)
 
     app.wsgi_app = ChunkedEncodingFix(app.wsgi_app)
 
-if app.config.get('UPLOAD_FOLDER'):
+if app.config.get("UPLOAD_FOLDER"):
     try:
-        os.makedirs(app.config.get('UPLOAD_FOLDER'))
+        os.makedirs(app.config.get("UPLOAD_FOLDER"))
     except OSError:
         pass
 
-for middleware in app.config.get('ADDITIONAL_MIDDLEWARE'):
+for middleware in app.config.get("ADDITIONAL_MIDDLEWARE"):
     app.wsgi_app = middleware(app.wsgi_app)
 
 
 class MyIndexView(IndexView):
-    @expose('/')
+    @expose("/")
     def index(self):
-        return redirect('/superset/welcome')
+        return redirect("/superset/welcome")
 
 
-custom_sm = app.config.get('CUSTOM_SECURITY_MANAGER') or SupersetSecurityManager
+custom_sm = app.config.get("CUSTOM_SECURITY_MANAGER") or SupersetSecurityManager
 if not issubclass(custom_sm, SupersetSecurityManager):
     raise Exception(
         """Your CUSTOM_SECURITY_MANAGER must now extend SupersetSecurityManager,
          not FAB's security manager.
-         See [4565] in UPDATING.md""")
+         See [4565] in UPDATING.md"""
+    )
 
 with app.app_context():
     appbuilder = AppBuilder(
         app,
         db.session,
-        base_template='superset/base.html',
+        base_template="superset/base.html",
         indexview=MyIndexView,
         security_manager_class=custom_sm,
         update_perms=False,  # Run `superset init` to update FAB's perms
@@ -208,15 +211,15 @@ with app.app_context():
 
 security_manager = appbuilder.sm
 
-results_backend = app.config.get('RESULTS_BACKEND')
+results_backend = app.config.get("RESULTS_BACKEND")
 
 # Merge user defined feature flags with default feature flags
-_feature_flags = app.config.get('DEFAULT_FEATURE_FLAGS') or {}
-_feature_flags.update(app.config.get('FEATURE_FLAGS') or {})
+_feature_flags = app.config.get("DEFAULT_FEATURE_FLAGS") or {}
+_feature_flags.update(app.config.get("FEATURE_FLAGS") or {})
 
 
 def get_feature_flags():
-    GET_FEATURE_FLAGS_FUNC = app.config.get('GET_FEATURE_FLAGS_FUNC')
+    GET_FEATURE_FLAGS_FUNC = app.config.get("GET_FEATURE_FLAGS_FUNC")
     if GET_FEATURE_FLAGS_FUNC:
         return GET_FEATURE_FLAGS_FUNC(deepcopy(_feature_flags))
     return _feature_flags
@@ -228,22 +231,22 @@ def is_feature_enabled(feature):
 
 
 # Flask-Compress
-if conf.get('ENABLE_FLASK_COMPRESS'):
+if conf.get("ENABLE_FLASK_COMPRESS"):
     Compress(app)
 
-if app.config['TALISMAN_ENABLED']:
-    talisman_config = app.config.get('TALISMAN_CONFIG')
+if app.config["TALISMAN_ENABLED"]:
+    talisman_config = app.config.get("TALISMAN_CONFIG")
     Talisman(app, **talisman_config)
 
 # Hook that provides administrators a handle on the Flask APP
 # after initialization
-flask_app_mutator = app.config.get('FLASK_APP_MUTATOR')
+flask_app_mutator = app.config.get("FLASK_APP_MUTATOR")
 if flask_app_mutator:
     flask_app_mutator(app)
 
 from superset import views  # noqa
 
 # Registering sources
-module_datasource_map = app.config.get('DEFAULT_MODULE_DS_MAP')
-module_datasource_map.update(app.config.get('ADDITIONAL_MODULE_DS_MAP'))
+module_datasource_map = app.config.get("DEFAULT_MODULE_DS_MAP")
+module_datasource_map.update(app.config.get("ADDITIONAL_MODULE_DS_MAP"))
 ConnectorRegistry.register_sources(module_datasource_map)
diff --git a/superset/cli.py b/superset/cli.py
index 6691b01..cb363c2 100755
--- a/superset/cli.py
+++ b/superset/cli.py
@@ -26,11 +26,8 @@ from colorama import Fore, Style
 from pathlib2 import Path
 import yaml
 
-from superset import (
-    app, appbuilder, data, db, security_manager,
-)
-from superset.utils import (
-    core as utils, dashboard_import_export, dict_import_export)
+from superset import app, appbuilder, data, db, security_manager
+from superset.utils import core as utils, dashboard_import_export, dict_import_export
 
 config = app.config
 celery_app = utils.get_celery_app(config)
@@ -54,114 +51,128 @@ def init():
 
 
 @app.cli.command()
-@click.option('--verbose', '-v', is_flag=True, help='Show extra information')
+@click.option("--verbose", "-v", is_flag=True, help="Show extra information")
 def version(verbose):
     """Prints the current version number"""
-    print(Fore.BLUE + '-=' * 15)
-    print(Fore.YELLOW + 'Superset ' + Fore.CYAN + '{version}'.format(
-        version=config.get('VERSION_STRING')))
-    print(Fore.BLUE + '-=' * 15)
+    print(Fore.BLUE + "-=" * 15)
+    print(
+        Fore.YELLOW
+        + "Superset "
+        + Fore.CYAN
+        + "{version}".format(version=config.get("VERSION_STRING"))
+    )
+    print(Fore.BLUE + "-=" * 15)
     if verbose:
-        print('[DB] : ' + '{}'.format(db.engine))
+        print("[DB] : " + "{}".format(db.engine))
     print(Style.RESET_ALL)
 
 
 def load_examples_run(load_test_data):
-    print('Loading examples into {}'.format(db))
+    print("Loading examples into {}".format(db))
 
     data.load_css_templates()
 
-    print('Loading energy related dataset')
+    print("Loading energy related dataset")
     data.load_energy()
 
     print("Loading [World Bank's Health Nutrition and Population Stats]")
     data.load_world_bank_health_n_pop()
 
-    print('Loading [Birth names]')
+    print("Loading [Birth names]")
     data.load_birth_names()
 
-    print('Loading [Unicode test data]')
+    print("Loading [Unicode test data]")
     data.load_unicode_test_data()
 
     if not load_test_data:
-        print('Loading [Random time series data]')
+        print("Loading [Random time series data]")
         data.load_random_time_series_data()
 
-        print('Loading [Random long/lat data]')
+        print("Loading [Random long/lat data]")
         data.load_long_lat_data()
 
-        print('Loading [Country Map data]')
+        print("Loading [Country Map data]")
         data.load_country_map_data()
 
-        print('Loading [Multiformat time series]')
+        print("Loading [Multiformat time series]")
         data.load_multiformat_time_series()
 
-        print('Loading [Paris GeoJson]')
+        print("Loading [Paris GeoJson]")
         data.load_paris_iris_geojson()
 
-        print('Loading [San Francisco population polygons]')
+        print("Loading [San Francisco population polygons]")
         data.load_sf_population_polygons()
 
-        print('Loading [Flights data]')
+        print("Loading [Flights data]")
         data.load_flights()
 
-        print('Loading [BART lines]')
+        print("Loading [BART lines]")
         data.load_bart_lines()
 
-        print('Loading [Multi Line]')
+        print("Loading [Multi Line]")
         data.load_multi_line()
 
-        print('Loading [Misc Charts] dashboard')
+        print("Loading [Misc Charts] dashboard")
         data.load_misc_dashboard()
 
-        print('Loading DECK.gl demo')
+        print("Loading DECK.gl demo")
         data.load_deck_dash()
 
-    print('Loading [Tabbed dashboard]')
+    print("Loading [Tabbed dashboard]")
     data.load_tabbed_dashboard()
 
 
 @app.cli.command()
-@click.option('--load-test-data', '-t', is_flag=True, help='Load additional test data')
+@click.option("--load-test-data", "-t", is_flag=True, help="Load additional test data")
 def load_examples(load_test_data):
     """Loads a set of Slices and Dashboards and a supporting dataset """
     load_examples_run(load_test_data)
 
 
 @app.cli.command()
-@click.option('--datasource', '-d', help='Specify which datasource name to load, if '
-                                         'omitted, all datasources will be refreshed')
-@click.option('--merge', '-m', is_flag=True, default=False,
-              help="Specify using 'merge' property during operation. "
-                   'Default value is False.')
+@click.option(
+    "--datasource",
+    "-d",
+    help="Specify which datasource name to load, if "
+    "omitted, all datasources will be refreshed",
+)
+@click.option(
+    "--merge",
+    "-m",
+    is_flag=True,
+    default=False,
+    help="Specify using 'merge' property during operation. " "Default value is False.",
+)
 def refresh_druid(datasource, merge):
     """Refresh druid datasources"""
     session = db.session()
     from superset.connectors.druid.models import DruidCluster
+
     for cluster in session.query(DruidCluster).all():
         try:
-            cluster.refresh_datasources(datasource_name=datasource,
-                                        merge_flag=merge)
+            cluster.refresh_datasources(datasource_name=datasource, merge_flag=merge)
         except Exception as e:
-            print(
-                "Error while processing cluster '{}'\n{}".format(
-                    cluster, str(e)))
+            print("Error while processing cluster '{}'\n{}".format(cluster, str(e)))
             logging.exception(e)
         cluster.metadata_last_refreshed = datetime.now()
-        print(
-            'Refreshed metadata from cluster '
-            '[' + cluster.cluster_name + ']')
+        print("Refreshed metadata from cluster " "[" + cluster.cluster_name + "]")
     session.commit()
 
 
 @app.cli.command()
 @click.option(
-    '--path', '-p',
-    help='Path to a single JSON file or path containing multiple JSON files'
-         'files to import (*.json)')
+    "--path",
+    "-p",
+    help="Path to a single JSON file or path containing multiple JSON files"
+    "files to import (*.json)",
+)
 @click.option(
-    '--recursive', '-r', is_flag=True, default=False,
-    help='recursively search the path for json files')
+    "--recursive",
+    "-r",
+    is_flag=True,
+    default=False,
+    help="recursively search the path for json files",
+)
 def import_dashboards(path, recursive):
     """Import dashboards from JSON"""
     p = Path(path)
@@ -169,114 +180,135 @@ def import_dashboards(path, recursive):
     if p.is_file():
         files.append(p)
     elif p.exists() and not recursive:
-        files.extend(p.glob('*.json'))
+        files.extend(p.glob("*.json"))
     elif p.exists() and recursive:
-        files.extend(p.rglob('*.json'))
+        files.extend(p.rglob("*.json"))
     for f in files:
-        logging.info('Importing dashboard from file %s', f)
+        logging.info("Importing dashboard from file %s", f)
         try:
             with f.open() as data_stream:
-                dashboard_import_export.import_dashboards(
-                    db.session, data_stream)
+                dashboard_import_export.import_dashboards(db.session, data_stream)
         except Exception as e:
-            logging.error('Error when importing dashboard from file %s', f)
+            logging.error("Error when importing dashboard from file %s", f)
             logging.error(e)
 
 
 @app.cli.command()
 @click.option(
-    '--dashboard-file', '-f', default=None,
-    help='Specify the the file to export to')
+    "--dashboard-file", "-f", default=None, help="Specify the the file to export to"
+)
 @click.option(
-    '--print_stdout', '-p', is_flag=True, default=False,
-    help='Print JSON to stdout')
+    "--print_stdout", "-p", is_flag=True, default=False, help="Print JSON to stdout"
+)
 def export_dashboards(print_stdout, dashboard_file):
     """Export dashboards to JSON"""
     data = dashboard_import_export.export_dashboards(db.session)
     if print_stdout or not dashboard_file:
         print(data)
     if dashboard_file:
-        logging.info('Exporting dashboards to %s', dashboard_file)
-        with open(dashboard_file, 'w') as data_stream:
+        logging.info("Exporting dashboards to %s", dashboard_file)
+        with open(dashboard_file, "w") as data_stream:
             data_stream.write(data)
 
 
 @app.cli.command()
 @click.option(
-    '--path', '-p',
-    help='Path to a single YAML file or path containing multiple YAML '
-         'files to import (*.yaml or *.yml)')
+    "--path",
+    "-p",
+    help="Path to a single YAML file or path containing multiple YAML "
+    "files to import (*.yaml or *.yml)",
+)
 @click.option(
-    '--sync', '-s', 'sync', default='',
-    help='comma seperated list of element types to synchronize '
-         'e.g. "metrics,columns" deletes metrics and columns in the DB '
-         'that are not specified in the YAML file')
+    "--sync",
+    "-s",
+    "sync",
+    default="",
+    help="comma seperated list of element types to synchronize "
+    'e.g. "metrics,columns" deletes metrics and columns in the DB '
+    "that are not specified in the YAML file",
+)
 @click.option(
-    '--recursive', '-r', is_flag=True, default=False,
-    help='recursively search the path for yaml files')
+    "--recursive",
+    "-r",
+    is_flag=True,
+    default=False,
+    help="recursively search the path for yaml files",
+)
 def import_datasources(path, sync, recursive):
     """Import datasources from YAML"""
-    sync_array = sync.split(',')
+    sync_array = sync.split(",")
     p = Path(path)
     files = []
     if p.is_file():
         files.append(p)
     elif p.exists() and not recursive:
-        files.extend(p.glob('*.yaml'))
-        files.extend(p.glob('*.yml'))
+        files.extend(p.glob("*.yaml"))
+        files.extend(p.glob("*.yml"))
     elif p.exists() and recursive:
-        files.extend(p.rglob('*.yaml'))
-        files.extend(p.rglob('*.yml'))
+        files.extend(p.rglob("*.yaml"))
+        files.extend(p.rglob("*.yml"))
     for f in files:
-        logging.info('Importing datasources from file %s', f)
+        logging.info("Importing datasources from file %s", f)
         try:
             with f.open() as data_stream:
                 dict_import_export.import_from_dict(
-                    db.session,
-                    yaml.safe_load(data_stream),
-                    sync=sync_array)
+                    db.session, yaml.safe_load(data_stream), sync=sync_array
+                )
         except Exception as e:
-            logging.error('Error when importing datasources from file %s', f)
+            logging.error("Error when importing datasources from file %s", f)
             logging.error(e)
 
 
 @app.cli.command()
 @click.option(
-    '--datasource-file', '-f', default=None,
-    help='Specify the the file to export to')
+    "--datasource-file", "-f", default=None, help="Specify the the file to export to"
+)
 @click.option(
-    '--print_stdout', '-p', is_flag=True, default=False,
-    help='Print YAML to stdout')
+    "--print_stdout", "-p", is_flag=True, default=False, help="Print YAML to stdout"
+)
 @click.option(
-    '--back-references', '-b', is_flag=True, default=False,
-    help='Include parent back references')
+    "--back-references",
+    "-b",
+    is_flag=True,
+    default=False,
+    help="Include parent back references",
+)
 @click.option(
-    '--include-defaults', '-d', is_flag=True, default=False,
-    help='Include fields containing defaults')
-def export_datasources(print_stdout, datasource_file,
-                       back_references, include_defaults):
+    "--include-defaults",
+    "-d",
+    is_flag=True,
+    default=False,
+    help="Include fields containing defaults",
+)
+def export_datasources(
+    print_stdout, datasource_file, back_references, include_defaults
+):
     """Export datasources to YAML"""
     data = dict_import_export.export_to_dict(
         session=db.session,
         recursive=True,
         back_references=back_references,
-        include_defaults=include_defaults)
+        include_defaults=include_defaults,
+    )
     if print_stdout or not datasource_file:
         yaml.safe_dump(data, stdout, default_flow_style=False)
     if datasource_file:
-        logging.info('Exporting datasources to %s', datasource_file)
-        with open(datasource_file, 'w') as data_stream:
+        logging.info("Exporting datasources to %s", datasource_file)
+        with open(datasource_file, "w") as data_stream:
             yaml.safe_dump(data, data_stream, default_flow_style=False)
 
 
 @app.cli.command()
 @click.option(
-    '--back-references', '-b', is_flag=True, default=False,
-    help='Include parent back references')
+    "--back-references",
+    "-b",
+    is_flag=True,
+    default=False,
+    help="Include parent back references",
+)
 def export_datasource_schema(back_references):
     """Export datasource YAML schema to stdout"""
-    data = dict_import_export.export_schema_to_dict(
-        back_references=back_references)
+    data = dict_import_export.export_schema_to_dict(back_references=back_references)
     yaml.safe_dump(data, stdout, default_flow_style=False)
 
 
@@ -284,47 +316,49 @@ def export_datasource_schema(back_references):
 def update_datasources_cache():
     """Refresh sqllab datasources cache"""
     from superset.models.core import Database
+
     for database in db.session.query(Database).all():
         if database.allow_multi_schema_metadata_fetch:
-            print('Fetching {} datasources ...'.format(database.name))
+            print("Fetching {} datasources ...".format(database.name))
             try:
                 database.get_all_table_names_in_database(
-                    force=True, cache=True, cache_timeout=24 * 60 * 60)
+                    force=True, cache=True, cache_timeout=24 * 60 * 60
+                )
                 database.get_all_view_names_in_database(
-                    force=True, cache=True, cache_timeout=24 * 60 * 60)
+                    force=True, cache=True, cache_timeout=24 * 60 * 60
+                )
             except Exception as e:
-                print('{}'.format(str(e)))
+                print("{}".format(str(e)))
 
 
 @app.cli.command()
 @click.option(
-    '--workers', '-w',
-    type=int,
-    help='Number of celery server workers to fire up')
+    "--workers", "-w", type=int, help="Number of celery server workers to fire up"
+)
 def worker(workers):
     """Starts a Superset worker for async SQL query execution."""
     logging.info(
         "The 'superset worker' command is deprecated. Please use the 'celery "
-        "worker' command instead.")
+        "worker' command instead."
+    )
     if workers:
         celery_app.conf.update(CELERYD_CONCURRENCY=workers)
-    elif config.get('SUPERSET_CELERY_WORKERS'):
+    elif config.get("SUPERSET_CELERY_WORKERS"):
         celery_app.conf.update(
-            CELERYD_CONCURRENCY=config.get('SUPERSET_CELERY_WORKERS'))
+            CELERYD_CONCURRENCY=config.get("SUPERSET_CELERY_WORKERS")
+        )
 
-    worker = celery_app.Worker(optimization='fair')
+    worker = celery_app.Worker(optimization="fair")
     worker.start()
 
 
 @app.cli.command()
 @click.option(
-    '-p', '--port',
-    default='5555',
-    help='Port on which to start the Flower process')
+    "-p", "--port", default="5555", help="Port on which to start the Flower process"
+)
 @click.option(
-    '-a', '--address',
-    default='localhost',
-    help='Address on which to run the service')
+    "-a", "--address", default="localhost", help="Address on which to run the service"
+)
 def flower(port, address):
     """Runs a Celery Flower web server
 
@@ -332,18 +366,19 @@ def flower(port, address):
     broker"""
     BROKER_URL = celery_app.conf.BROKER_URL
     cmd = (
-        'celery flower '
-        f'--broker={BROKER_URL} '
-        f'--port={port} '
-        f'--address={address} '
+        "celery flower "
+        f"--broker={BROKER_URL} "
+        f"--port={port} "
+        f"--address={address} "
     )
     logging.info(
         "The 'superset flower' command is deprecated. Please use the 'celery "
-        "flower' command instead.")
-    print(Fore.GREEN + 'Starting a Celery Flower instance')
-    print(Fore.BLUE + '-=' * 40)
+        "flower' command instead."
+    )
+    print(Fore.GREEN + "Starting a Celery Flower instance")
+    print(Fore.BLUE + "-=" * 40)
     print(Fore.YELLOW + cmd)
-    print(Fore.BLUE + '-=' * 40)
+    print(Fore.BLUE + "-=" * 40)
     Popen(cmd, shell=True).wait()
 
 
@@ -354,7 +389,7 @@ def load_test_users():
 
     Syncs permissions for those users/roles
     """
-    print(Fore.GREEN + 'Loading a set of users for unit tests')
+    print(Fore.GREEN + "Loading a set of users for unit tests")
     load_test_users_run()
 
 
@@ -364,51 +399,73 @@ def load_test_users_run():
 
     Syncs permissions for those users/roles
     """
-    if config.get('TESTING'):
+    if config.get("TESTING"):
         security_manager.sync_role_definitions()
-        gamma_sqllab_role = security_manager.add_role('gamma_sqllab')
-        for perm in security_manager.find_role('Gamma').permissions:
+        gamma_sqllab_role = security_manager.add_role("gamma_sqllab")
+        for perm in security_manager.find_role("Gamma").permissions:
             security_manager.add_permission_role(gamma_sqllab_role, perm)
         utils.get_or_create_main_db()
         db_perm = utils.get_main_database(security_manager.get_session).perm
-        security_manager.add_permission_view_menu('database_access', db_perm)
+        security_manager.add_permission_view_menu("database_access", db_perm)
         db_pvm = security_manager.find_permission_view_menu(
-            view_menu_name=db_perm, permission_name='database_access')
+            view_menu_name=db_perm, permission_name="database_access"
+        )
         gamma_sqllab_role.permissions.append(db_pvm)
-        for perm in security_manager.find_role('sql_lab').permissions:
+        for perm in security_manager.find_role("sql_lab").permissions:
             security_manager.add_permission_role(gamma_sqllab_role, perm)
 
-        admin = security_manager.find_user('admin')
+        admin = security_manager.find_user("admin")
         if not admin:
             security_manager.add_user(
-                'admin', 'admin', ' user', 'admin@fab.org',
-                security_manager.find_role('Admin'),
-                password='general')
-
-        gamma = security_manager.find_user('gamma')
+                "admin",
+                "admin",
+                " user",
+                "admin@fab.org",
+                security_manager.find_role("Admin"),
+                password="general",
+            )
+
+        gamma = security_manager.find_user("gamma")
         if not gamma:
             security_manager.add_user(
-                'gamma', 'gamma', 'user', 'gamma@fab.org',
-                security_manager.find_role('Gamma'),
-                password='general')
-
-        gamma2 = security_manager.find_user('gamma2')
+                "gamma",
+                "gamma",
+                "user",
+                "gamma@fab.org",
+                security_manager.find_role("Gamma"),
+                password="general",
+            )
+
+        gamma2 = security_manager.find_user("gamma2")
         if not gamma2:
             security_manager.add_user(
-                'gamma2', 'gamma2', 'user', 'gamma2@fab.org',
-                security_manager.find_role('Gamma'),
-                password='general')
-
-        gamma_sqllab_user = security_manager.find_user('gamma_sqllab')
+                "gamma2",
+                "gamma2",
+                "user",
+                "gamma2@fab.org",
+                security_manager.find_role("Gamma"),
+                password="general",
+            )
+
+        gamma_sqllab_user = security_manager.find_user("gamma_sqllab")
         if not gamma_sqllab_user:
             security_manager.add_user(
-                'gamma_sqllab', 'gamma_sqllab', 'user', 'gamma_sqllab@fab.org',
-                gamma_sqllab_role, password='general')
-
-        alpha = security_manager.find_user('alpha')
+                "gamma_sqllab",
+                "gamma_sqllab",
+                "user",
+                "gamma_sqllab@fab.org",
+                gamma_sqllab_role,
+                password="general",
+            )
+
+        alpha = security_manager.find_user("alpha")
         if not alpha:
             security_manager.add_user(
-                'alpha', 'alpha', 'user', 'alpha@fab.org',
-                security_manager.find_role('Alpha'),
-                password='general')
+                "alpha",
+                "alpha",
+                "user",
+                "alpha@fab.org",
+                security_manager.find_role("Alpha"),
+                password="general",
+            )
         security_manager.get_session.commit()
diff --git a/superset/common/query_context.py b/superset/common/query_context.py
index fd6298d..40e3291 100644
--- a/superset/common/query_context.py
+++ b/superset/common/query_context.py
@@ -32,7 +32,7 @@ from superset.utils.core import DTTM_ALIAS
 from .query_object import QueryObject
 
 config = app.config
-stats_logger = config.get('STATS_LOGGER')
+stats_logger = config.get("STATS_LOGGER")
 
 
 class QueryContext:
@@ -41,21 +41,21 @@ class QueryContext:
     to retrieve the data payload for a given viz.
     """
 
-    cache_type = 'df'
+    cache_type = "df"
     enforce_numerical_metrics = True
 
     # TODO: Type datasource and query_object dictionary with TypedDict when it becomes
     # a vanilla python type https://github.com/python/mypy/issues/5288
     def __init__(
-            self,
-            datasource: Dict,
-            queries: List[Dict],
-            force: bool = False,
-            custom_cache_timeout: int = None,
+        self,
+        datasource: Dict,
+        queries: List[Dict],
+        force: bool = False,
+        custom_cache_timeout: int = None,
     ):
-        self.datasource = ConnectorRegistry.get_datasource(datasource.get('type'),
-                                                           int(datasource.get('id')),  # noqa: E501, T400
-                                                           db.session)
+        self.datasource = ConnectorRegistry.get_datasource(
+            datasource.get("type"), int(datasource.get("id")), db.session  # noqa: T400
+        )
         self.queries = list(map(lambda query_obj: QueryObject(**query_obj), queries))
 
         self.force = force
@@ -72,7 +72,7 @@ class QueryContext:
         # support multiple queries from different data source.
 
         timestamp_format = None
-        if self.datasource.type == 'table':
+        if self.datasource.type == "table":
             dttm_col = self.datasource.get_col(query_object.granularity)
             if dttm_col:
                 timestamp_format = dttm_col.python_date_format
@@ -88,12 +88,13 @@ class QueryContext:
         # parsing logic
         if df is not None and not df.empty:
             if DTTM_ALIAS in df.columns:
-                if timestamp_format in ('epoch_s', 'epoch_ms'):
+                if timestamp_format in ("epoch_s", "epoch_ms"):
                     # Column has already been formatted as a timestamp.
                     df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(pd.Timestamp)
                 else:
                     df[DTTM_ALIAS] = pd.to_datetime(
-                        df[DTTM_ALIAS], utc=False, format=timestamp_format)
+                        df[DTTM_ALIAS], utc=False, format=timestamp_format
+                    )
                 if self.datasource.offset:
                     df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
                 df[DTTM_ALIAS] += query_object.time_shift
@@ -103,10 +104,10 @@ class QueryContext:
 
             df.replace([np.inf, -np.inf], np.nan)
         return {
-            'query': result.query,
-            'status': result.status,
-            'error_message': result.error_message,
-            'df': df,
+            "query": result.query,
+            "status": result.status,
+            "error_message": result.error_message,
+            "df": df,
         }
 
     def df_metrics_to_num(self, df, query_object):
@@ -114,23 +115,23 @@ class QueryContext:
         metrics = [metric for metric in query_object.metrics]
         for col, dtype in df.dtypes.items():
             if dtype.type == np.object_ and col in metrics:
-                df[col] = pd.to_numeric(df[col], errors='coerce')
+                df[col] = pd.to_numeric(df[col], errors="coerce")
 
     def get_data(self, df):
-        return df.to_dict(orient='records')
+        return df.to_dict(orient="records")
 
     def get_single_payload(self, query_obj):
         """Returns a payload of metadata and data"""
         payload = self.get_df_payload(query_obj)
-        df = payload.get('df')
-        status = payload.get('status')
+        df = payload.get("df")
+        status = payload.get("status")
         if status != utils.QueryStatus.FAILED:
             if df is not None and df.empty:
-                payload['error'] = 'No data'
+                payload["error"] = "No data"
             else:
-                payload['data'] = self.get_data(df)
-        if 'df' in payload:
-            del payload['df']
+                payload["data"] = self.get_data(df)
+        if "df" in payload:
+            del payload["df"]
         return payload
 
     def get_payload(self):
@@ -144,94 +145,94 @@ class QueryContext:
         if self.datasource.cache_timeout is not None:
             return self.datasource.cache_timeout
         if (
-                hasattr(self.datasource, 'database') and
-                self.datasource.database.cache_timeout) is not None:
+            hasattr(self.datasource, "database")
+            and self.datasource.database.cache_timeout
+        ) is not None:
             return self.datasource.database.cache_timeout
-        return config.get('CACHE_DEFAULT_TIMEOUT')
+        return config.get("CACHE_DEFAULT_TIMEOUT")
 
     def get_df_payload(self, query_obj, **kwargs):
         """Handles caching around the df paylod retrieval"""
-        cache_key = query_obj.cache_key(
-            datasource=self.datasource.uid, **kwargs) if query_obj else None
-        logging.info('Cache key: {}'.format(cache_key))
+        cache_key = (
+            query_obj.cache_key(datasource=self.datasource.uid, **kwargs)
+            if query_obj
+            else None
+        )
+        logging.info("Cache key: {}".format(cache_key))
         is_loaded = False
         stacktrace = None
         df = None
-        cached_dttm = datetime.utcnow().isoformat().split('.')[0]
+        cached_dttm = datetime.utcnow().isoformat().split(".")[0]
         cache_value = None
         status = None
-        query = ''
+        query = ""
         error_message = None
         if cache_key and cache and not self.force:
             cache_value = cache.get(cache_key)
             if cache_value:
-                stats_logger.incr('loaded_from_cache')
+                stats_logger.incr("loaded_from_cache")
                 try:
                     cache_value = pkl.loads(cache_value)
-                    df = cache_value['df']
-                    query = cache_value['query']
+                    df = cache_value["df"]
+                    query = cache_value["query"]
                     status = utils.QueryStatus.SUCCESS
                     is_loaded = True
                 except Exception as e:
                     logging.exception(e)
-                    logging.error('Error reading cache: ' +
-                                  utils.error_msg_from_exception(e))
-                logging.info('Serving from cache')
+                    logging.error(
+                        "Error reading cache: " + utils.error_msg_from_exception(e)
+                    )
+                logging.info("Serving from cache")
 
         if query_obj and not is_loaded:
             try:
                 query_result = self.get_query_result(query_obj)
-                status = query_result['status']
-                query = query_result['query']
-                error_message = query_result['error_message']
-                df = query_result['df']
+                status = query_result["status"]
+                query = query_result["query"]
+                error_message = query_result["error_message"]
+                df = query_result["df"]
                 if status != utils.QueryStatus.FAILED:
-                    stats_logger.incr('loaded_from_source')
+                    stats_logger.incr("loaded_from_source")
                     is_loaded = True
             except Exception as e:
                 logging.exception(e)
                 if not error_message:
-                    error_message = '{}'.format(e)
+                    error_message = "{}".format(e)
                 status = utils.QueryStatus.FAILED
                 stacktrace = traceback.format_exc()
 
-            if (
-                    is_loaded and
-                    cache_key and
-                    cache and
-                    status != utils.QueryStatus.FAILED):
+            if is_loaded and cache_key and cache and status != utils.QueryStatus.FAILED:
                 try:
                     cache_value = dict(
-                        dttm=cached_dttm,
-                        df=df if df is not None else None,
-                        query=query,
+                        dttm=cached_dttm, df=df if df is not None else None, query=query
                     )
-                    cache_binary = pkl.dumps(
-                        cache_value, protocol=pkl.HIGHEST_PROTOCOL)
+                    cache_binary = pkl.dumps(cache_value, protocol=pkl.HIGHEST_PROTOCOL)
 
-                    logging.info('Caching {} chars at key {}'.format(
-                        len(cache_binary), cache_key))
+                    logging.info(
+                        "Caching {} chars at key {}".format(
+                            len(cache_binary), cache_key
+                        )
+                    )
 
-                    stats_logger.incr('set_cache_key')
+                    stats_logger.incr("set_cache_key")
                     cache.set(
-                        cache_key,
-                        cache_value=cache_binary,
-                        timeout=self.cache_timeout)
+                        cache_key, cache_value=cache_binary, timeout=self.cache_timeout
+                    )
                 except Exception as e:
                     # cache.set call can fail if the backend is down or if
                     # the key is too large or whatever other reasons
-                    logging.warning('Could not cache key {}'.format(cache_key))
+                    logging.warning("Could not cache key {}".format(cache_key))
                     logging.exception(e)
                     cache.delete(cache_key)
         return {
-            'cache_key': cache_key,
-            'cached_dttm': cache_value['dttm'] if cache_value is not None else None,
-            'cache_timeout': self.cache_timeout,
-            'df': df,
-            'error': error_message,
-            'is_cached': cache_key is not None,
-            'query': query,
-            'status': status,
-            'stacktrace': stacktrace,
-            'rowcount': len(df.index) if df is not None else 0,
+            "cache_key": cache_key,
+            "cached_dttm": cache_value["dttm"] if cache_value is not None else None,
+            "cache_timeout": self.cache_timeout,
+            "df": df,
+            "error": error_message,
+            "is_cached": cache_key is not None,
+            "query": query,
+            "status": status,
+            "stacktrace": stacktrace,
+            "rowcount": len(df.index) if df is not None else 0,
         }
diff --git a/superset/common/query_object.py b/superset/common/query_object.py
index 553c0b9..f413031 100644
--- a/superset/common/query_object.py
+++ b/superset/common/query_object.py
@@ -27,6 +27,7 @@ from superset.utils import core as utils
 # TODO: Type Metrics dictionary with TypedDict when it becomes a vanilla python type
 # https://github.com/python/mypy/issues/5288
 
+
 class QueryObject:
     """
     The query object's schema matches the interfaces of DB connectors like sqla
@@ -34,25 +35,25 @@ class QueryObject:
     """
 
     def __init__(
-            self,
-            granularity: str,
-            metrics: List[Union[Dict, str]],
-            groupby: List[str] = None,
-            filters: List[str] = None,
-            time_range: Optional[str] = None,
-            time_shift: Optional[str] = None,
-            is_timeseries: bool = False,
-            timeseries_limit: int = 0,
-            row_limit: int = app.config.get('ROW_LIMIT'),
-            timeseries_limit_metric: Optional[Dict] = None,
-            order_desc: bool = True,
-            extras: Optional[Dict] = None,
-            prequeries: Optional[List[Dict]] = None,
-            is_prequery: bool = False,
-            columns: List[str] = None,
-            orderby: List[List] = None,
-            relative_start: str = app.config.get('DEFAULT_RELATIVE_START_TIME', 'today'),
-            relative_end: str = app.config.get('DEFAULT_RELATIVE_END_TIME', 'today'),
+        self,
+        granularity: str,
+        metrics: List[Union[Dict, str]],
+        groupby: List[str] = None,
+        filters: List[str] = None,
+        time_range: Optional[str] = None,
+        time_shift: Optional[str] = None,
+        is_timeseries: bool = False,
+        timeseries_limit: int = 0,
+        row_limit: int = app.config.get("ROW_LIMIT"),
+        timeseries_limit_metric: Optional[Dict] = None,
+        order_desc: bool = True,
+        extras: Optional[Dict] = None,
+        prequeries: Optional[List[Dict]] = None,
+        is_prequery: bool = False,
+        columns: List[str] = None,
+        orderby: List[List] = None,
+        relative_start: str = app.config.get("DEFAULT_RELATIVE_START_TIME", "today"),
+        relative_end: str = app.config.get("DEFAULT_RELATIVE_END_TIME", "today"),
     ):
         self.granularity = granularity
         self.from_dttm, self.to_dttm = utils.get_since_until(
@@ -69,7 +70,7 @@ class QueryObject:
         # Temporal solution for backward compatability issue
         # due the new format of non-ad-hoc metric.
         self.metrics = [
-            metric if 'expressionType' in metric else metric['label']   # noqa: T484
+            metric if "expressionType" in metric else metric["label"]  # noqa: T484
             for metric in metrics
         ]
         self.row_limit = row_limit
@@ -85,22 +86,22 @@ class QueryObject:
 
     def to_dict(self):
         query_object_dict = {
-            'granularity': self.granularity,
-            'from_dttm': self.from_dttm,
-            'to_dttm': self.to_dttm,
-            'is_timeseries': self.is_timeseries,
-            'groupby': self.groupby,
-            'metrics': self.metrics,
-            'row_limit': self.row_limit,
-            'filter': self.filter,
-            'timeseries_limit': self.timeseries_limit,
-            'timeseries_limit_metric': self.timeseries_limit_metric,
-            'order_desc': self.order_desc,
-            'prequeries': self.prequeries,
-            'is_prequery': self.is_prequery,
-            'extras': self.extras,
-            'columns': self.columns,
-            'orderby': self.orderby,
+            "granularity": self.granularity,
+            "from_dttm": self.from_dttm,
+            "to_dttm": self.to_dttm,
+            "is_timeseries": self.is_timeseries,
+            "groupby": self.groupby,
+            "metrics": self.metrics,
+            "row_limit": self.row_limit,
+            "filter": self.filter,
+            "timeseries_limit": self.timeseries_limit,
+            "timeseries_limit_metric": self.timeseries_limit_metric,
+            "order_desc": self.order_desc,
+            "prequeries": self.prequeries,
+            "is_prequery": self.is_prequery,
+            "extras": self.extras,
+            "columns": self.columns,
+            "orderby": self.orderby,
         }
         return query_object_dict
 
@@ -115,17 +116,14 @@ class QueryObject:
         cache_dict = self.to_dict()
         cache_dict.update(extra)
 
-        for k in ['from_dttm', 'to_dttm']:
+        for k in ["from_dttm", "to_dttm"]:
             del cache_dict[k]
         if self.time_range:
-            cache_dict['time_range'] = self.time_range
+            cache_dict["time_range"] = self.time_range
         json_data = self.json_dumps(cache_dict, sort_keys=True)
-        return hashlib.md5(json_data.encode('utf-8')).hexdigest()
+        return hashlib.md5(json_data.encode("utf-8")).hexdigest()
 
     def json_dumps(self, obj, sort_keys=False):
         return json.dumps(
-            obj,
-            default=utils.json_int_dttm_ser,
-            ignore_nan=True,
-            sort_keys=sort_keys,
+            obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
         )
diff --git a/superset/config.py b/superset/config.py
index 68ce8b5..4261b54 100644
--- a/superset/config.py
+++ b/superset/config.py
@@ -37,18 +37,18 @@ from superset.stats_logger import DummyStatsLogger
 STATS_LOGGER = DummyStatsLogger()
 
 BASE_DIR = os.path.abspath(os.path.dirname(__file__))
-if 'SUPERSET_HOME' in os.environ:
-    DATA_DIR = os.environ['SUPERSET_HOME']
+if "SUPERSET_HOME" in os.environ:
+    DATA_DIR = os.environ["SUPERSET_HOME"]
 else:
-    DATA_DIR = os.path.join(os.path.expanduser('~'), '.superset')
+    DATA_DIR = os.path.join(os.path.expanduser("~"), ".superset")
 
 # ---------------------------------------------------------
 # Superset specific config
 # ---------------------------------------------------------
-PACKAGE_DIR = os.path.join(BASE_DIR, 'static', 'assets')
-PACKAGE_FILE = os.path.join(PACKAGE_DIR, 'package.json')
+PACKAGE_DIR = os.path.join(BASE_DIR, "static", "assets")
+PACKAGE_FILE = os.path.join(PACKAGE_DIR, "package.json")
 with open(PACKAGE_FILE) as package_file:
-    VERSION_STRING = json.load(package_file)['version']
+    VERSION_STRING = json.load(package_file)["version"]
 
 ROW_LIMIT = 50000
 VIZ_ROW_LIMIT = 10000
@@ -57,7 +57,7 @@ FILTER_SELECT_ROW_LIMIT = 10000
 SUPERSET_WORKERS = 2  # deprecated
 SUPERSET_CELERY_WORKERS = 32  # deprecated
 
-SUPERSET_WEBSERVER_ADDRESS = '0.0.0.0'
+SUPERSET_WEBSERVER_ADDRESS = "0.0.0.0"
 SUPERSET_WEBSERVER_PORT = 8088
 
 # This is an important setting, and should be lower than your
@@ -73,10 +73,10 @@ SQLALCHEMY_TRACK_MODIFICATIONS = False
 # ---------------------------------------------------------
 
 # Your App secret key
-SECRET_KEY = '\2\1thisismyscretkey\1\2\e\y\y\h'  # noqa
+SECRET_KEY = "\2\1thisismyscretkey\1\2\e\y\y\h"  # noqa
 
 # The SQLAlchemy connection string.
-SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(DATA_DIR, 'superset.db')
+SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join(DATA_DIR, "superset.db")
 # SQLALCHEMY_DATABASE_URI = 'mysql://myapp@localhost/myapp'
 # SQLALCHEMY_DATABASE_URI = 'postgresql://root:password@localhost/myapp'
 
@@ -96,10 +96,10 @@ QUERY_SEARCH_LIMIT = 1000
 WTF_CSRF_ENABLED = True
 
 # Add endpoints that need to be exempt from CSRF protection
-WTF_CSRF_EXEMPT_LIST = ['superset.views.core.log']
+WTF_CSRF_EXEMPT_LIST = ["superset.views.core.log"]
 
 # Whether to run the web server in debug mode or not
-DEBUG = os.environ.get('FLASK_ENV') == 'development'
+DEBUG = os.environ.get("FLASK_ENV") == "development"
 FLASK_USE_RELOAD = True
 
 # Whether to show the stacktrace on 500 error
@@ -112,10 +112,10 @@ ENABLE_PROXY_FIX = False
 # GLOBALS FOR APP Builder
 # ------------------------------
 # Uncomment to setup Your App name
-APP_NAME = 'Superset'
+APP_NAME = "Superset"
 
 # Uncomment to setup an App icon
-APP_ICON = '/static/assets/images/superset-logo@2x.png'
+APP_ICON = "/static/assets/images/superset-logo@2x.png"
 APP_ICON_WIDTH = 126
 
 # Uncomment to specify where clicking the logo would take the user
@@ -131,7 +131,7 @@ LOGO_TARGET_PATH = None
 # other tz can be overridden by providing a local_config
 DRUID_IS_ACTIVE = True
 DRUID_TZ = tz.tzutc()
-DRUID_ANALYSIS_TYPES = ['cardinality']
+DRUID_ANALYSIS_TYPES = ["cardinality"]
 
 # ----------------------------------------------------
 # AUTHENTICATION CONFIG
@@ -175,21 +175,21 @@ PUBLIC_ROLE_LIKE_GAMMA = False
 # Babel config for translations
 # ---------------------------------------------------
 # Setup default language
-BABEL_DEFAULT_LOCALE = 'en'
+BABEL_DEFAULT_LOCALE = "en"
 # Your application default translation path
-BABEL_DEFAULT_FOLDER = 'superset/translations'
+BABEL_DEFAULT_FOLDER = "superset/translations"
 # The allowed translation for you app
 LANGUAGES = {
-    'en': {'flag': 'us', 'name': 'English'},
-    'it': {'flag': 'it', 'name': 'Italian'},
-    'fr': {'flag': 'fr', 'name': 'French'},
-    'zh': {'flag': 'cn', 'name': 'Chinese'},
-    'ja': {'flag': 'jp', 'name': 'Japanese'},
-    'de': {'flag': 'de', 'name': 'German'},
-    'pt': {'flag': 'pt', 'name': 'Portuguese'},
-    'pt_BR': {'flag': 'br', 'name': 'Brazilian Portuguese'},
-    'ru': {'flag': 'ru', 'name': 'Russian'},
-    'ko': {'flag': 'kr', 'name': 'Korean'},
+    "en": {"flag": "us", "name": "English"},
+    "it": {"flag": "it", "name": "Italian"},
+    "fr": {"flag": "fr", "name": "French"},
+    "zh": {"flag": "cn", "name": "Chinese"},
+    "ja": {"flag": "jp", "name": "Japanese"},
+    "de": {"flag": "de", "name": "German"},
+    "pt": {"flag": "pt", "name": "Portuguese"},
+    "pt_BR": {"flag": "br", "name": "Brazilian Portuguese"},
+    "ru": {"flag": "ru", "name": "Russian"},
+    "ko": {"flag": "kr", "name": "Korean"},
 }
 
 # ---------------------------------------------------
@@ -202,7 +202,7 @@ LANGUAGES = {
 # will result in combined feature flags of { 'FOO': True, 'BAR': True, 'BAZ': True }
 DEFAULT_FEATURE_FLAGS = {
     # Experimental feature introducing a client (browser) cache
-    'CLIENT_CACHE': False,
+    "CLIENT_CACHE": False
 }
 
 # A function that receives a dict of all feature flags
@@ -225,19 +225,19 @@ GET_FEATURE_FLAGS_FUNC = None
 # Image and file configuration
 # ---------------------------------------------------
 # The file upload folder, when using models with files
-UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
+UPLOAD_FOLDER = BASE_DIR + "/app/static/uploads/"
 
 # The image upload folder, when using models with images
-IMG_UPLOAD_FOLDER = BASE_DIR + '/app/static/uploads/'
+IMG_UPLOAD_FOLDER = BASE_DIR + "/app/static/uploads/"
 
 # The image upload url, when using models with images
-IMG_UPLOAD_URL = '/static/uploads/'
+IMG_UPLOAD_URL = "/static/uploads/"
 # Setup image size default is (300, 200, True)
 # IMG_SIZE = (300, 200, True)
 
 CACHE_DEFAULT_TIMEOUT = 60 * 60 * 24
-CACHE_CONFIG = {'CACHE_TYPE': 'null'}
-TABLE_NAMES_CACHE_CONFIG = {'CACHE_TYPE': 'null'}
+CACHE_CONFIG = {"CACHE_TYPE": "null"}
+TABLE_NAMES_CACHE_CONFIG = {"CACHE_TYPE": "null"}
 
 # CORS Options
 ENABLE_CORS = False
@@ -252,13 +252,12 @@ SUPERSET_WEBSERVER_DOMAINS = None
 
 # Allowed format types for upload on Database view
 # TODO: Add processing of other spreadsheet formats (xls, xlsx etc)
-ALLOWED_EXTENSIONS = set(['csv'])
+ALLOWED_EXTENSIONS = set(["csv"])
 
-# CSV Options: key/value pairs that will be passed as argument to DataFrame.to_csv method
+# CSV Options: key/value pairs that will be passed as argument to DataFrame.to_csv
+# method.
 # note: index option should not be overridden
-CSV_EXPORT = {
-    'encoding': 'utf-8',
-}
+CSV_EXPORT = {"encoding": "utf-8"}
 
 # ---------------------------------------------------
 # Time grain configurations
@@ -301,10 +300,12 @@ DRUID_DATA_SOURCE_BLACKLIST = []
 # --------------------------------------------------
 # Modules, datasources and middleware to be registered
 # --------------------------------------------------
-DEFAULT_MODULE_DS_MAP = OrderedDict([
-    ('superset.connectors.sqla.models', ['SqlaTable']),
-    ('superset.connectors.druid.models', ['DruidDatasource']),
-])
+DEFAULT_MODULE_DS_MAP = OrderedDict(
+    [
+        ("superset.connectors.sqla.models", ["SqlaTable"]),
+        ("superset.connectors.druid.models", ["DruidDatasource"]),
+    ]
+)
 ADDITIONAL_MODULE_DS_MAP = {}
 ADDITIONAL_MIDDLEWARE = []
 
@@ -315,8 +316,8 @@ ADDITIONAL_MIDDLEWARE = []
 
 # Console Log Settings
 
-LOG_FORMAT = '%(asctime)s:%(levelname)s:%(name)s:%(message)s'
-LOG_LEVEL = 'DEBUG'
+LOG_FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
+LOG_LEVEL = "DEBUG"
 
 # ---------------------------------------------------
 # Enable Time Rotate Log Handler
@@ -324,9 +325,9 @@ LOG_LEVEL = 'DEBUG'
 # LOG_LEVEL = DEBUG, INFO, WARNING, ERROR, CRITICAL
 
 ENABLE_TIME_ROTATE = False
-TIME_ROTATE_LOG_LEVEL = 'DEBUG'
-FILENAME = os.path.join(DATA_DIR, 'superset.log')
-ROLLOVER = 'midnight'
+TIME_ROTATE_LOG_LEVEL = "DEBUG"
+FILENAME = os.path.join(DATA_DIR, "superset.log")
+ROLLOVER = "midnight"
 INTERVAL = 1
 BACKUP_COUNT = 30
 
@@ -344,7 +345,7 @@ BACKUP_COUNT = 30
 #     pass
 
 # Set this API key to enable Mapbox visualizations
-MAPBOX_API_KEY = os.environ.get('MAPBOX_API_KEY', '')
+MAPBOX_API_KEY = os.environ.get("MAPBOX_API_KEY", "")
 
 # Maximum number of rows returned from a database
 # in async mode, no more than SQL_MAX_ROW will be returned and stored
@@ -378,31 +379,26 @@ WARNING_MSG = None
 
 
 class CeleryConfig(object):
-    BROKER_URL = 'sqla+sqlite:///celerydb.sqlite'
-    CELERY_IMPORTS = (
-        'superset.sql_lab',
-        'superset.tasks',
-    )
-    CELERY_RESULT_BACKEND = 'db+sqlite:///celery_results.sqlite'
-    CELERYD_LOG_LEVEL = 'DEBUG'
+    BROKER_URL = "sqla+sqlite:///celerydb.sqlite"
+    CELERY_IMPORTS = ("superset.sql_lab", "superset.tasks")
+    CELERY_RESULT_BACKEND = "db+sqlite:///celery_results.sqlite"
+    CELERYD_LOG_LEVEL = "DEBUG"
     CELERYD_PREFETCH_MULTIPLIER = 1
     CELERY_ACKS_LATE = True
     CELERY_ANNOTATIONS = {
-        'sql_lab.get_sql_results': {
-            'rate_limit': '100/s',
-        },
-        'email_reports.send': {
-            'rate_limit': '1/s',
-            'time_limit': 120,
-            'soft_time_limit': 150,
-            'ignore_result': True,
+        "sql_lab.get_sql_results": {"rate_limit": "100/s"},
+        "email_reports.send": {
+            "rate_limit": "1/s",
+            "time_limit": 120,
+            "soft_time_limit": 150,
+            "ignore_result": True,
         },
     }
     CELERYBEAT_SCHEDULE = {
-        'email_reports.schedule_hourly': {
-            'task': 'email_reports.schedule_hourly',
-            'schedule': crontab(minute=1, hour='*'),
-        },
+        "email_reports.schedule_hourly": {
+            "task": "email_reports.schedule_hourly",
+            "schedule": crontab(minute=1, hour="*"),
+        }
     }
 
 
@@ -444,7 +440,7 @@ CSV_TO_HIVE_UPLOAD_S3_BUCKET = None
 
 # The directory within the bucket specified above that will
 # contain all the external tables
-CSV_TO_HIVE_UPLOAD_DIRECTORY = 'EXTERNAL_HIVE_TABLES/'
+CSV_TO_HIVE_UPLOAD_DIRECTORY = "EXTERNAL_HIVE_TABLES/"
 
 # The namespace within hive where the tables created from
 # uploading CSVs will be stored.
@@ -458,9 +454,9 @@ JINJA_CONTEXT_ADDONS = {}
 
 # Roles that are controlled by the API / Superset and should not be changes
 # by humans.
-ROBOT_PERMISSION_ROLES = ['Public', 'Gamma', 'Alpha', 'Admin', 'sql_lab']
+ROBOT_PERMISSION_ROLES = ["Public", "Gamma", "Alpha", "Admin", "sql_lab"]
 
-CONFIG_PATH_ENV_VAR = 'SUPERSET_CONFIG_PATH'
+CONFIG_PATH_ENV_VAR = "SUPERSET_CONFIG_PATH"
 
 # If a callable is specified, it will be called at app startup while passing
 # a reference to the Flask app. This can be used to alter the Flask app
@@ -474,16 +470,16 @@ ENABLE_ACCESS_REQUEST = False
 
 # smtp server configuration
 EMAIL_NOTIFICATIONS = False  # all the emails are sent using dryrun
-SMTP_HOST = 'localhost'
+SMTP_HOST = "localhost"
 SMTP_STARTTLS = True
 SMTP_SSL = False
-SMTP_USER = 'superset'
+SMTP_USER = "superset"
 SMTP_PORT = 25
-SMTP_PASSWORD = 'superset'
-SMTP_MAIL_FROM = 'superset@superset.com'
+SMTP_PASSWORD = "superset"
+SMTP_MAIL_FROM = "superset@superset.com"
 
 if not CACHE_DEFAULT_TIMEOUT:
-    CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get('CACHE_DEFAULT_TIMEOUT')
+    CACHE_DEFAULT_TIMEOUT = CACHE_CONFIG.get("CACHE_DEFAULT_TIMEOUT")
 
 # Whether to bump the logging level to ERROR on the flask_appbuilder package
 # Set to False if/when debugging FAB related issues like
@@ -492,14 +488,14 @@ SILENCE_FAB = True
 
 # The link to a page containing common errors and their resolutions
 # It will be appended at the bottom of sql_lab errors.
-TROUBLESHOOTING_LINK = ''
+TROUBLESHOOTING_LINK = ""
 
 # CSRF token timeout, set to None for a token that never expires
 WTF_CSRF_TIME_LIMIT = 60 * 60 * 24 * 7
 
 # This link should lead to a page with instructions on how to gain access to a
 # Datasource. It will be placed at the bottom of permissions errors.
-PERMISSION_INSTRUCTIONS_LINK = ''
+PERMISSION_INSTRUCTIONS_LINK = ""
 
 # Integrate external Blueprints to the app by passing them to your
 # configuration. These blueprints will get integrated in the app
@@ -565,7 +561,7 @@ EMAIL_REPORTS_CRON_RESOLUTION = 15
 
 # Email report configuration
 # From address in emails
-EMAIL_REPORT_FROM_ADDRESS = 'reports@superset.org'
+EMAIL_REPORT_FROM_ADDRESS = "reports@superset.org"
 
 # Send bcc of all reports to this address. Set to None to disable.
 # This is useful for maintaining an audit trail of all email deliveries.
@@ -575,8 +571,8 @@ EMAIL_REPORT_BCC_ADDRESS = None
 # This user should have permissions to browse all the dashboards and
 # slices.
 # TODO: In the future, login as the owner of the item to generate reports
-EMAIL_REPORTS_USER = 'admin'
-EMAIL_REPORTS_SUBJECT_PREFIX = '[Report] '
+EMAIL_REPORTS_USER = "admin"
+EMAIL_REPORTS_SUBJECT_PREFIX = "[Report] "
 
 # The webdriver to use for generating reports. Use one of the following
 # firefox
@@ -585,19 +581,16 @@ EMAIL_REPORTS_SUBJECT_PREFIX = '[Report] '
 # chrome:
 #   Requires: headless chrome
 #   Limitations: unable to generate screenshots of elements
-EMAIL_REPORTS_WEBDRIVER = 'firefox'
+EMAIL_REPORTS_WEBDRIVER = "firefox"
 
 # Window size - this will impact the rendering of the data
-WEBDRIVER_WINDOW = {
-    'dashboard': (1600, 2000),
-    'slice': (3000, 1200),
-}
+WEBDRIVER_WINDOW = {"dashboard": (1600, 2000), "slice": (3000, 1200)}
 
 # Any config options to be passed as-is to the webdriver
 WEBDRIVER_CONFIGURATION = {}
 
 # The base URL to query for accessing the user interface
-WEBDRIVER_BASEURL = 'http://0.0.0.0:8080/'
+WEBDRIVER_BASEURL = "http://0.0.0.0:8080/"
 
 # Send user to a link where they can report bugs
 BUG_REPORT_URL = None
@@ -611,33 +604,34 @@ DOCUMENTATION_URL = None
 # filter a moving window. By only setting the end time to now,
 # start time will be set to midnight, while end will be relative to
 # the query issue time.
-DEFAULT_RELATIVE_START_TIME = 'today'
-DEFAULT_RELATIVE_END_TIME = 'today'
+DEFAULT_RELATIVE_START_TIME = "today"
+DEFAULT_RELATIVE_END_TIME = "today"
 
 # Configure which SQL validator to use for each engine
-SQL_VALIDATORS_BY_ENGINE = {
-    'presto': 'PrestoDBSQLValidator',
-}
+SQL_VALIDATORS_BY_ENGINE = {"presto": "PrestoDBSQLValidator"}
 
 # Do you want Talisman enabled?
 TALISMAN_ENABLED = False
 # If you want Talisman, how do you want it configured??
 TALISMAN_CONFIG = {
-    'content_security_policy': None,
-    'force_https': True,
-    'force_https_permanent': False,
+    "content_security_policy": None,
+    "force_https": True,
+    "force_https_permanent": False,
 }
 
 try:
     if CONFIG_PATH_ENV_VAR in os.environ:
         # Explicitly import config module that is not in pythonpath; useful
         # for case where app is being executed via pex.
-        print('Loaded your LOCAL configuration at [{}]'.format(
-            os.environ[CONFIG_PATH_ENV_VAR]))
+        print(
+            "Loaded your LOCAL configuration at [{}]".format(
+                os.environ[CONFIG_PATH_ENV_VAR]
+            )
+        )
         module = sys.modules[__name__]
         override_conf = imp.load_source(
-            'superset_config',
-            os.environ[CONFIG_PATH_ENV_VAR])
+            "superset_config", os.environ[CONFIG_PATH_ENV_VAR]
+        )
         for key in dir(override_conf):
             if key.isupper():
                 setattr(module, key, getattr(override_conf, key))
@@ -645,7 +639,9 @@ try:
     else:
         from superset_config import *  # noqa
         import superset_config
-        print('Loaded your LOCAL configuration at [{}]'.format(
-            superset_config.__file__))
+
+        print(
+            "Loaded your LOCAL configuration at [{}]".format(superset_config.__file__)
+        )
 except ImportError:
     pass
diff --git a/superset/connectors/base/models.py b/superset/connectors/base/models.py
index 2e8bc25..2a5fbc7 100644
--- a/superset/connectors/base/models.py
+++ b/superset/connectors/base/models.py
@@ -17,9 +17,7 @@
 # pylint: disable=C,R,W
 import json
 
-from sqlalchemy import (
-    and_, Boolean, Column, Integer, String, Text,
-)
+from sqlalchemy import and_, Boolean, Column, Integer, String, Text
 from sqlalchemy.ext.declarative import declared_attr
 from sqlalchemy.orm import foreign, relationship
 
@@ -67,7 +65,7 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
     @declared_attr
     def slices(self):
         return relationship(
-            'Slice',
+            "Slice",
             primaryjoin=lambda: and_(
                 foreign(Slice.datasource_id) == self.id,
                 foreign(Slice.datasource_type) == self.type,
@@ -82,11 +80,11 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
     @property
     def uid(self):
         """Unique id across datasource types"""
-        return f'{self.id}__{self.type}'
+        return f"{self.id}__{self.type}"
 
     @property
     def column_names(self):
-        return sorted([c.column_name for c in self.columns], key=lambda x: x or '')
+        return sorted([c.column_name for c in self.columns], key=lambda x: x or "")
 
     @property
     def columns_types(self):
@@ -94,7 +92,7 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
 
     @property
     def main_dttm_col(self):
-        return 'timestamp'
+        return "timestamp"
 
     @property
     def datasource_name(self):
@@ -120,22 +118,18 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
 
     @property
     def url(self):
-        return '/{}/edit/{}'.format(self.baselink, self.id)
+        return "/{}/edit/{}".format(self.baselink, self.id)
 
     @property
     def explore_url(self):
         if self.default_endpoint:
             return self.default_endpoint
         else:
-            return '/superset/explore/{obj.type}/{obj.id}/'.format(obj=self)
+            return "/superset/explore/{obj.type}/{obj.id}/".format(obj=self)
 
     @property
     def column_formats(self):
-        return {
-            m.metric_name: m.d3format
-            for m in self.metrics
-            if m.d3format
-        }
+        return {m.metric_name: m.d3format for m in self.metrics if m.d3format}
 
     def add_missing_metrics(self, metrics):
         exisiting_metrics = {m.metric_name for m in self.metrics}
@@ -148,14 +142,14 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
     def short_data(self):
         """Data representation of the datasource sent to the frontend"""
         return {
-            'edit_url': self.url,
-            'id': self.id,
-            'uid': self.uid,
-            'schema': self.schema,
-            'name': self.name,
-            'type': self.type,
-            'connection': self.connection,
-            'creator': str(self.created_by),
+            "edit_url": self.url,
+            "id": self.id,
+            "uid": self.uid,
+            "schema": self.schema,
+            "name": self.name,
+            "type": self.type,
+            "connection": self.connection,
+            "creator": str(self.created_by),
         }
 
     @property
@@ -168,68 +162,65 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
         order_by_choices = []
         # self.column_names return sorted column_names
         for s in self.column_names:
-            s = str(s or '')
-            order_by_choices.append((json.dumps([s, True]), s + ' [asc]'))
-            order_by_choices.append((json.dumps([s, False]), s + ' [desc]'))
-
-        verbose_map = {'__timestamp': 'Time'}
-        verbose_map.update({
-            o.metric_name: o.verbose_name or o.metric_name
-            for o in self.metrics
-        })
-        verbose_map.update({
-            o.column_name: o.verbose_name or o.column_name
-            for o in self.columns
-        })
+            s = str(s or "")
+            order_by_choices.append((json.dumps([s, True]), s + " [asc]"))
+            order_by_choices.append((json.dumps([s, False]), s + " [desc]"))
+
+        verbose_map = {"__timestamp": "Time"}
+        verbose_map.update(
+            {o.metric_name: o.verbose_name or o.metric_name for o in self.metrics}
+        )
+        verbose_map.update(
+            {o.column_name: o.verbose_name or o.column_name for o in self.columns}
+        )
         return {
             # simple fields
-            'id': self.id,
-            'column_formats': self.column_formats,
-            'description': self.description,
-            'database': self.database.data,  # pylint: disable=no-member
-            'default_endpoint': self.default_endpoint,
-            'filter_select': self.filter_select_enabled,  # TODO deprecate
-            'filter_select_enabled': self.filter_select_enabled,
-            'name': self.name,
-            'datasource_name': self.datasource_name,
-            'type': self.type,
-            'schema': self.schema,
-            'offset': self.offset,
-            'cache_timeout': self.cache_timeout,
-            'params': self.params,
-            'perm': self.perm,
-            'edit_url': self.url,
-
+            "id": self.id,
+            "column_formats": self.column_formats,
+            "description": self.description,
+            "database": self.database.data,  # pylint: disable=no-member
+            "default_endpoint": self.default_endpoint,
+            "filter_select": self.filter_select_enabled,  # TODO deprecate
+            "filter_select_enabled": self.filter_select_enabled,
+            "name": self.name,
+            "datasource_name": self.datasource_name,
+            "type": self.type,
+            "schema": self.schema,
+            "offset": self.offset,
+            "cache_timeout": self.cache_timeout,
+            "params": self.params,
+            "perm": self.perm,
+            "edit_url": self.url,
             # sqla-specific
-            'sql': self.sql,
-
+            "sql": self.sql,
             # one to many
-            'columns': [o.data for o in self.columns],
-            'metrics': [o.data for o in self.metrics],
-
+            "columns": [o.data for o in self.columns],
+            "metrics": [o.data for o in self.metrics],
             # TODO deprecate, move logic to JS
-            'order_by_choices': order_by_choices,
-            'owners': [owner.id for owner in self.owners],
-            'verbose_map': verbose_map,
-            'select_star': self.select_star,
+            "order_by_choices": order_by_choices,
+            "owners": [owner.id for owner in self.owners],
+            "verbose_map": verbose_map,
+            "select_star": self.select_star,
         }
 
     @staticmethod
     def filter_values_handler(
-            values, target_column_is_numeric=False, is_list_target=False):
+        values, target_column_is_numeric=False, is_list_target=False
+    ):
         def handle_single_value(v):
             # backward compatibility with previous <select> components
             if isinstance(v, str):
-                v = v.strip('\t\n\'"')
+                v = v.strip("\t\n'\"")
                 if target_column_is_numeric:
                     # For backwards compatibility and edge cases
                     # where a column data type might have changed
                     v = utils.string_to_num(v)
-                if v == '<NULL>':
+                if v == "<NULL>":
                     return None
-                elif v == '<empty string>':
-                    return ''
+                elif v == "<empty string>":
+                    return ""
             return v
+
         if isinstance(values, (list, tuple)):
             values = [handle_single_value(v) for v in values]
         else:
@@ -278,8 +269,7 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
             if col.column_name == column_name:
                 return col
 
-    def get_fk_many_from_list(
-            self, object_list, fkmany, fkmany_class, key_attr):
+    def get_fk_many_from_list(self, object_list, fkmany, fkmany_class, key_attr):
         """Update ORM one-to-many list from object list
 
         Used for syncing metrics and columns using the same code"""
@@ -302,13 +292,10 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
         for obj in object_list:
             key = obj.get(key_attr)
             if key not in orm_keys:
-                del obj['id']
+                del obj["id"]
                 orm_kwargs = {}
                 for k in obj:
-                    if (
-                        k in fkmany_class.update_from_object_fields and
-                        k in obj
-                    ):
+                    if k in fkmany_class.update_from_object_fields and k in obj:
                         orm_kwargs[k] = obj[k]
                 new_obj = fkmany_class(**orm_kwargs)
                 new_fks.append(new_obj)
@@ -329,16 +316,18 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
         for attr in self.update_from_object_fields:
             setattr(self, attr, obj.get(attr))
 
-        self.owners = obj.get('owners', [])
+        self.owners = obj.get("owners", [])
 
         # Syncing metrics
         metrics = self.get_fk_many_from_list(
-            obj.get('metrics'), self.metrics, self.metric_class, 'metric_name')
+            obj.get("metrics"), self.metrics, self.metric_class, "metric_name"
+        )
         self.metrics = metrics
 
         # Syncing columns
         self.columns = self.get_fk_many_from_list(
-            obj.get('columns'), self.columns, self.column_class, 'column_name')
+            obj.get("columns"), self.columns, self.column_class, "column_name"
+        )
 
 
 class BaseColumn(AuditMixinNullable, ImportMixin):
@@ -363,32 +352,31 @@ class BaseColumn(AuditMixinNullable, ImportMixin):
         return self.column_name
 
     num_types = (
-        'DOUBLE', 'FLOAT', 'INT', 'BIGINT', 'NUMBER',
-        'LONG', 'REAL', 'NUMERIC', 'DECIMAL', 'MONEY',
+        "DOUBLE",
+        "FLOAT",
+        "INT",
+        "BIGINT",
+        "NUMBER",
+        "LONG",
+        "REAL",
+        "NUMERIC",
+        "DECIMAL",
+        "MONEY",
     )
-    date_types = ('DATE', 'TIME', 'DATETIME')
-    str_types = ('VARCHAR', 'STRING', 'CHAR')
+    date_types = ("DATE", "TIME", "DATETIME")
+    str_types = ("VARCHAR", "STRING", "CHAR")
 
     @property
     def is_num(self):
-        return (
-            self.type and
-            any([t in self.type.upper() for t in self.num_types])
-        )
+        return self.type and any([t in self.type.upper() for t in self.num_types])
 
     @property
     def is_time(self):
-        return (
-            self.type and
-            any([t in self.type.upper() for t in self.date_types])
-        )
+        return self.type and any([t in self.type.upper() for t in self.date_types])
 
     @property
     def is_string(self):
-        return (
-            self.type and
-            any([t in self.type.upper() for t in self.str_types])
-        )
+        return self.type and any([t in self.type.upper() for t in self.str_types])
 
     @property
     def expression(self):
@@ -397,9 +385,17 @@ class BaseColumn(AuditMixinNullable, ImportMixin):
     @property
     def data(self):
         attrs = (
-            'id', 'column_name', 'verbose_name', 'description', 'expression',
-            'filterable', 'groupby', 'is_dttm', 'type',
-            'database_expression', 'python_date_format',
+            "id",
+            "column_name",
+            "verbose_name",
+            "description",
+            "expression",
+            "filterable",
+            "groupby",
+            "is_dttm",
+            "type",
+            "database_expression",
+            "python_date_format",
         )
         return {s: getattr(self, s) for s in attrs if hasattr(self, s)}
 
@@ -432,6 +428,7 @@ class BaseMetric(AuditMixinNullable, ImportMixin):
         backref=backref('metrics', cascade='all, delete-orphan'),
         enable_typechecks=False)
     """
+
     @property
     def perm(self):
         raise NotImplementedError()
@@ -443,6 +440,12 @@ class BaseMetric(AuditMixinNullable, ImportMixin):
     @property
     def data(self):
         attrs = (
-            'id', 'metric_name', 'verbose_name', 'description', 'expression',
-            'warning_text', 'd3format')
+            "id",
+            "metric_name",
+            "verbose_name",
+            "description",
+            "expression",
+            "warning_text",
+            "d3format",
+        )
         return {s: getattr(self, s) for s in attrs}
diff --git a/superset/connectors/base/views.py b/superset/connectors/base/views.py
index d73dc19..6b7cdf8 100644
--- a/superset/connectors/base/views.py
+++ b/superset/connectors/base/views.py
@@ -24,7 +24,10 @@ from superset.views.base import SupersetModelView
 class DatasourceModelView(SupersetModelView):
     def pre_delete(self, obj):
         if obj.slices:
-            raise SupersetException(Markup(
-                'Cannot delete a datasource that has slices attached to it.'
-                "Here's the list of associated charts: " +
-                ''.join([o.slice_name for o in obj.slices])))
+            raise SupersetException(
+                Markup(
+                    "Cannot delete a datasource that has slices attached to it."
+                    "Here's the list of associated charts: "
+                    + "".join([o.slice_name for o in obj.slices])
+                )
+            )
diff --git a/superset/connectors/connector_registry.py b/superset/connectors/connector_registry.py
index f080553..be31a37 100644
--- a/superset/connectors/connector_registry.py
+++ b/superset/connectors/connector_registry.py
@@ -51,15 +51,21 @@ class ConnectorRegistry(object):
         return datasources
 
     @classmethod
-    def get_datasource_by_name(cls, session, datasource_type, datasource_name,
-                               schema, database_name):
+    def get_datasource_by_name(
+        cls, session, datasource_type, datasource_name, schema, database_name
+    ):
         datasource_class = ConnectorRegistry.sources[datasource_type]
         datasources = session.query(datasource_class).all()
 
         # Filter datasoures that don't have database.
-        db_ds = [d for d in datasources if d.database and
-                 d.database.name == database_name and
-                 d.name == datasource_name and schema == schema]
+        db_ds = [
+            d
+            for d in datasources
+            if d.database
+            and d.database.name == database_name
+            and d.name == datasource_name
+            and schema == schema
+        ]
         return db_ds[0]
 
     @classmethod
@@ -87,8 +93,8 @@ class ConnectorRegistry(object):
         )
 
     @classmethod
-    def query_datasources_by_name(
-            cls, session, database, datasource_name, schema=None):
+    def query_datasources_by_name(cls, session, database, datasource_name, schema=None):
         datasource_class = ConnectorRegistry.sources[database.type]
         return datasource_class.query_datasources_by_name(
-            session, database, datasource_name, schema=None)
+            session, database, datasource_name, schema=None
+        )
diff --git a/superset/connectors/druid/models.py b/superset/connectors/druid/models.py
index c71bc80..3f81ca7 100644
--- a/superset/connectors/druid/models.py
+++ b/superset/connectors/druid/models.py
@@ -37,12 +37,25 @@ from pydruid.utils.dimensions import MapLookupExtraction, RegexExtraction
 from pydruid.utils.filters import Dimension, Filter
 from pydruid.utils.having import Aggregation
 from pydruid.utils.postaggregator import (
-    Const, Field, HyperUniqueCardinality, Postaggregator, Quantile, Quantiles,
+    Const,
+    Field,
+    HyperUniqueCardinality,
+    Postaggregator,
+    Quantile,
+    Quantiles,
 )
 import requests
 import sqlalchemy as sa
 from sqlalchemy import (
-    Boolean, Column, DateTime, ForeignKey, Integer, String, Table, Text, UniqueConstraint,
+    Boolean,
+    Column,
+    DateTime,
+    ForeignKey,
+    Integer,
+    String,
+    Table,
+    Text,
+    UniqueConstraint,
 )
 from sqlalchemy.orm import backref, relationship
 from sqlalchemy_utils import EncryptedType
@@ -50,16 +63,12 @@ from sqlalchemy_utils import EncryptedType
 from superset import conf, db, security_manager
 from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
 from superset.exceptions import MetricPermException, SupersetException
-from superset.models.helpers import (
-    AuditMixinNullable, ImportMixin, QueryResult,
-)
+from superset.models.helpers import AuditMixinNullable, ImportMixin, QueryResult
 from superset.utils import core as utils, import_datasource
-from superset.utils.core import (
-    DimSelector, DTTM_ALIAS, flasher,
-)
+from superset.utils.core import DimSelector, DTTM_ALIAS, flasher
 
-DRUID_TZ = conf.get('DRUID_TZ')
-POST_AGG_TYPE = 'postagg'
+DRUID_TZ = conf.get("DRUID_TZ")
+POST_AGG_TYPE = "postagg"
 metadata = Model.metadata  # pylint: disable=no-member
 
 
@@ -72,16 +81,17 @@ def _fetch_metadata_for(datasource):
 class JavascriptPostAggregator(Postaggregator):
     def __init__(self, name, field_names, function):
         self.post_aggregator = {
-            'type': 'javascript',
-            'fieldNames': field_names,
-            'name': name,
-            'function': function,
+            "type": "javascript",
+            "fieldNames": field_names,
+            "name": name,
+            "function": function,
         }
         self.name = name
 
 
 class CustomPostAggregator(Postaggregator):
     """A way to allow users to specify completely custom PostAggregators"""
+
     def __init__(self, name, post_aggregator):
         self.name = name
         self.post_aggregator = post_aggregator
@@ -91,8 +101,8 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
 
     """ORM object referencing the Druid clusters"""
 
-    __tablename__ = 'clusters'
-    type = 'druid'
+    __tablename__ = "clusters"
+    type = "druid"
 
     id = Column(Integer, primary_key=True)
     verbose_name = Column(String(250), unique=True)
@@ -100,16 +110,22 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
     cluster_name = Column(String(250), unique=True)
     broker_host = Column(String(255))
     broker_port = Column(Integer, default=8082)
-    broker_endpoint = Column(String(255), default='druid/v2')
+    broker_endpoint = Column(String(255), default="druid/v2")
     metadata_last_refreshed = Column(DateTime)
     cache_timeout = Column(Integer)
     broker_user = Column(String(255))
-    broker_pass = Column(EncryptedType(String(255), conf.get('SECRET_KEY')))
+    broker_pass = Column(EncryptedType(String(255), conf.get("SECRET_KEY")))
 
-    export_fields = ('cluster_name', 'broker_host', 'broker_port',
-                     'broker_endpoint', 'cache_timeout', 'broker_user')
+    export_fields = (
+        "cluster_name",
+        "broker_host",
+        "broker_port",
+        "broker_endpoint",
+        "cache_timeout",
+        "broker_user",
+    )
     update_from_object_fields = export_fields
-    export_children = ['datasources']
+    export_children = ["datasources"]
 
     def __repr__(self):
         return self.verbose_name if self.verbose_name else self.cluster_name
@@ -119,43 +135,37 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
 
     @property
     def data(self):
-        return {
-            'id': self.id,
-            'name': self.cluster_name,
-            'backend': 'druid',
-        }
+        return {"id": self.id, "name": self.cluster_name, "backend": "druid"}
 
     @staticmethod
     def get_base_url(host, port):
-        if not re.match('http(s)?://', host):
-            host = 'http://' + host
+        if not re.match("http(s)?://", host):
+            host = "http://" + host
 
-        url = '{0}:{1}'.format(host, port) if port else host
+        url = "{0}:{1}".format(host, port) if port else host
         return url
 
     def get_base_broker_url(self):
-        base_url = self.get_base_url(
-            self.broker_host, self.broker_port)
-        return f'{base_url}/{self.broker_endpoint}'
+        base_url = self.get_base_url(self.broker_host, self.broker_port)
+        return f"{base_url}/{self.broker_endpoint}"
 
     def get_pydruid_client(self):
         cli = PyDruid(
-            self.get_base_url(self.broker_host, self.broker_port),
-            self.broker_endpoint)
+            self.get_base_url(self.broker_host, self.broker_port), self.broker_endpoint
+        )
         if self.broker_user and self.broker_pass:
             cli.set_basic_auth_credentials(self.broker_user, self.broker_pass)
         return cli
 
     def get_datasources(self):
-        endpoint = self.get_base_broker_url() + '/datasources'
+        endpoint = self.get_base_broker_url() + "/datasources"
         auth = requests.auth.HTTPBasicAuth(self.broker_user, self.broker_pass)
         return json.loads(requests.get(endpoint, auth=auth).text)
 
     def get_druid_version(self):
-        endpoint = self.get_base_url(
-            self.broker_host, self.broker_port) + '/status'
+        endpoint = self.get_base_url(self.broker_host, self.broker_port) + "/status"
         auth = requests.auth.HTTPBasicAuth(self.broker_user, self.broker_pass)
-        return json.loads(requests.get(endpoint, auth=auth).text)['version']
+        return json.loads(requests.get(endpoint, auth=auth).text)["version"]
 
     @property
     @utils.memoized
@@ -163,15 +173,13 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
         return self.get_druid_version()
 
     def refresh_datasources(
-            self,
-            datasource_name=None,
-            merge_flag=True,
-            refreshAll=True):
+        self, datasource_name=None, merge_flag=True, refreshAll=True
+    ):
         """Refresh metadata of all datasources in the cluster
         If ``datasource_name`` is specified, only that datasource is updated
         """
         ds_list = self.get_datasources()
-        blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
+        blacklist = conf.get("DRUID_DATA_SOURCE_BLACKLIST", [])
         ds_refresh = []
         if not datasource_name:
             ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
@@ -199,12 +207,10 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
                 datasource = DruidDatasource(datasource_name=ds_name)
                 with session.no_autoflush:
                     session.add(datasource)
-                flasher(
-                    _('Adding new datasource [{}]').format(ds_name), 'success')
+                flasher(_("Adding new datasource [{}]").format(ds_name), "success")
                 ds_map[ds_name] = datasource
             elif refreshAll:
-                flasher(
-                    _('Refreshing datasource [{}]').format(ds_name), 'info')
+                flasher(_("Refreshing datasource [{}]").format(ds_name), "info")
             else:
                 del ds_map[ds_name]
                 continue
@@ -230,18 +236,18 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
                 )
                 col_objs = {col.column_name: col for col in col_objs_list}
                 for col in cols:
-                    if col == '__time':  # skip the time column
+                    if col == "__time":  # skip the time column
                         continue
                     col_obj = col_objs.get(col)
                     if not col_obj:
                         col_obj = DruidColumn(
-                            datasource_id=datasource.id,
-                            column_name=col)
+                            datasource_id=datasource.id, column_name=col
+                        )
                         with session.no_autoflush:
                             session.add(col_obj)
-                    col_obj.type = cols[col]['type']
+                    col_obj.type = cols[col]["type"]
                     col_obj.datasource = datasource
-                    if col_obj.type == 'STRING':
+                    if col_obj.type == "STRING":
                         col_obj.groupby = True
                         col_obj.filterable = True
                 datasource.refresh_metrics()
@@ -249,7 +255,7 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
 
     @property
     def perm(self):
-        return '[{obj.cluster_name}].(id:{obj.id})'.format(obj=self)
+        return "[{obj.cluster_name}].(id:{obj.id})".format(obj=self)
 
     def get_perm(self):
         return self.perm
@@ -266,23 +272,31 @@ class DruidCluster(Model, AuditMixinNullable, ImportMixin):
 class DruidColumn(Model, BaseColumn):
     """ORM model for storing Druid datasource column metadata"""
 
-    __tablename__ = 'columns'
-    __table_args__ = (UniqueConstraint('column_name', 'datasource_id'),)
+    __tablename__ = "columns"
+    __table_args__ = (UniqueConstraint("column_name", "datasource_id"),)
 
-    datasource_id = Column(Integer, ForeignKey('datasources.id'))
+    datasource_id = Column(Integer, ForeignKey("datasources.id"))
     # Setting enable_typechecks=False disables polymorphic inheritance.
     datasource = relationship(
-        'DruidDatasource',
-        backref=backref('columns', cascade='all, delete-orphan'),
-        enable_typechecks=False)
+        "DruidDatasource",
+        backref=backref("columns", cascade="all, delete-orphan"),
+        enable_typechecks=False,
+    )
     dimension_spec_json = Column(Text)
 
     export_fields = (
-        'datasource_id', 'column_name', 'is_active', 'type', 'groupby',
-        'filterable', 'description', 'dimension_spec_json', 'verbose_name',
+        "datasource_id",
+        "column_name",
+        "is_active",
+        "type",
+        "groupby",
+        "filterable",
+        "description",
+        "dimension_spec_json",
+        "verbose_name",
     )
     update_from_object_fields = export_fields
-    export_parent = 'datasource'
+    export_parent = "datasource"
 
     def __repr__(self):
         return self.column_name or str(self.id)
@@ -298,11 +312,11 @@ class DruidColumn(Model, BaseColumn):
 
     def get_metrics(self):
         metrics = {}
-        metrics['count'] = DruidMetric(
-            metric_name='count',
-            verbose_name='COUNT(*)',
-            metric_type='count',
-            json=json.dumps({'type': 'count', 'name': 'count'}),
+        metrics["count"] = DruidMetric(
+            metric_name="count",
+            verbose_name="COUNT(*)",
+            metric_type="count",
+            json=json.dumps({"type": "count", "name": "count"}),
         )
         return metrics
 
@@ -318,7 +332,7 @@ class DruidColumn(Model, BaseColumn):
         for metric in metrics.values():
             dbmetric = dbmetrics.get(metric.metric_name)
             if dbmetric:
-                for attr in ['json', 'metric_type']:
+                for attr in ["json", "metric_type"]:
                     setattr(dbmetric, attr, getattr(metric, attr))
             else:
                 with db.session.no_autoflush:
@@ -328,9 +342,14 @@ class DruidColumn(Model, BaseColumn):
     @classmethod
     def import_obj(cls, i_column):
         def lookup_obj(lookup_column):
-            return db.session.query(DruidColumn).filter(
-                DruidColumn.datasource_id == lookup_column.datasource_id,
-                DruidColumn.column_name == lookup_column.column_name).first()
+            return (
+                db.session.query(DruidColumn)
+                .filter(
+                    DruidColumn.datasource_id == lookup_column.datasource_id,
+                    DruidColumn.column_name == lookup_column.column_name,
+                )
+                .first()
+            )
 
         return import_datasource.import_simple_obj(db.session, i_column, lookup_obj)
 
@@ -339,23 +358,31 @@ class DruidMetric(Model, BaseMetric):
 
     """ORM object referencing Druid metrics for a datasource"""
 
-    __tablename__ = 'metrics'
-    __table_args__ = (UniqueConstraint('metric_name', 'datasource_id'),)
-    datasource_id = Column(Integer, ForeignKey('datasources.id'))
+    __tablename__ = "metrics"
+    __table_args__ = (UniqueConstraint("metric_name", "datasource_id"),)
+    datasource_id = Column(Integer, ForeignKey("datasources.id"))
 
     # Setting enable_typechecks=False disables polymorphic inheritance.
     datasource = relationship(
-        'DruidDatasource',
-        backref=backref('metrics', cascade='all, delete-orphan'),
-        enable_typechecks=False)
+        "DruidDatasource",
+        backref=backref("metrics", cascade="all, delete-orphan"),
+        enable_typechecks=False,
+    )
     json = Column(Text, nullable=False)
 
     export_fields = (
-        'metric_name', 'verbose_name', 'metric_type', 'datasource_id',
-        'json', 'description', 'is_restricted', 'd3format', 'warning_text',
+        "metric_name",
+        "verbose_name",
+        "metric_type",
+        "datasource_id",
+        "json",
+        "description",
+        "is_restricted",
+        "d3format",
+        "warning_text",
     )
     update_from_object_fields = export_fields
-    export_parent = 'datasource'
+    export_parent = "datasource"
 
     @property
     def expression(self):
@@ -372,10 +399,12 @@ class DruidMetric(Model, BaseMetric):
     @property
     def perm(self):
         return (
-            '{parent_name}.[{obj.metric_name}](id:{obj.id})'
-        ).format(obj=self,
-                 parent_name=self.datasource.full_name,
-                 ) if self.datasource else None
+            ("{parent_name}.[{obj.metric_name}](id:{obj.id})").format(
+                obj=self, parent_name=self.datasource.full_name
+            )
+            if self.datasource
+            else None
+        )
 
     def get_perm(self):
         return self.perm
@@ -383,17 +412,24 @@ class DruidMetric(Model, BaseMetric):
     @classmethod
     def import_obj(cls, i_metric):
         def lookup_obj(lookup_metric):
-            return db.session.query(DruidMetric).filter(
-                DruidMetric.datasource_id == lookup_metric.datasource_id,
-                DruidMetric.metric_name == lookup_metric.metric_name).first()
+            return (
+                db.session.query(DruidMetric)
+                .filter(
+                    DruidMetric.datasource_id == lookup_metric.datasource_id,
+                    DruidMetric.metric_name == lookup_metric.metric_name,
+                )
+                .first()
+            )
+
         return import_datasource.import_simple_obj(db.session, i_metric, lookup_obj)
 
 
 druiddatasource_user = Table(
-    'druiddatasource_user', metadata,
-    Column('id', Integer, primary_key=True),
-    Column('user_id', Integer, ForeignKey('ab_user.id')),
-    Column('datasource_id', Integer, ForeignKey('datasources.id')),
+    "druiddatasource_user",
+    metadata,
+    Column("id", Integer, primary_key=True),
+    Column("user_id", Integer, ForeignKey("ab_user.id")),
+    Column("datasource_id", Integer, ForeignKey("datasources.id")),
 )
 
 
@@ -401,39 +437,46 @@ class DruidDatasource(Model, BaseDatasource):
 
     """ORM object referencing Druid datasources (tables)"""
 
-    __tablename__ = 'datasources'
-    __table_args__ = (UniqueConstraint('datasource_name', 'cluster_name'),)
+    __tablename__ = "datasources"
+    __table_args__ = (UniqueConstraint("datasource_name", "cluster_name"),)
 
-    type = 'druid'
-    query_language = 'json'
+    type = "druid"
+    query_language = "json"
     cluster_class = DruidCluster
     metric_class = DruidMetric
     column_class = DruidColumn
     owner_class = security_manager.user_model
 
-    baselink = 'druiddatasourcemodelview'
+    baselink = "druiddatasourcemodelview"
 
     # Columns
     datasource_name = Column(String(255), nullable=False)
     is_hidden = Column(Boolean, default=False)
     filter_select_enabled = Column(Boolean, default=True)  # override default
     fetch_values_from = Column(String(100))
-    cluster_name = Column(
-        String(250), ForeignKey('clusters.cluster_name'))
+    cluster_name = Column(String(250), ForeignKey("clusters.cluster_name"))
     cluster = relationship(
-        'DruidCluster', backref='datasources', foreign_keys=[cluster_name])
-    owners = relationship(owner_class, secondary=druiddatasource_user,
-                          backref='druiddatasources')
+        "DruidCluster", backref="datasources", foreign_keys=[cluster_name]
+    )
+    owners = relationship(
+        owner_class, secondary=druiddatasource_user, backref="druiddatasources"
+    )
 
     export_fields = (
-        'datasource_name', 'is_hidden', 'description', 'default_endpoint',
-        'cluster_name', 'offset', 'cache_timeout', 'params',
-        'filter_select_enabled',
+        "datasource_name",
+        "is_hidden",
+        "description",
+        "default_endpoint",
+        "cluster_name",
+        "offset",
+        "cache_timeout",
+        "params",
+        "filter_select_enabled",
     )
     update_from_object_fields = export_fields
 
-    export_parent = 'cluster'
-    export_children = ['columns', 'metrics']
+    export_parent = "cluster"
+    export_children = ["columns", "metrics"]
 
     @property
     def database(self):
@@ -453,8 +496,8 @@ class DruidDatasource(Model, BaseDatasource):
 
     @property
     def schema(self):
-        ds_name = self.datasource_name or ''
-        name_pieces = ds_name.split('.')
+        ds_name = self.datasource_name or ""
+        name_pieces = ds_name.split(".")
         if len(name_pieces) > 1:
             return name_pieces[0]
         else:
@@ -466,9 +509,9 @@ class DruidDatasource(Model, BaseDatasource):
         return security_manager.get_schema_perm(self.cluster, self.schema)
 
     def get_perm(self):
-        return (
-            '[{obj.cluster_name}].[{obj.datasource_name}]'
-            '(id:{obj.id})').format(obj=self)
+        return ("[{obj.cluster_name}].[{obj.datasource_name}]" "(id:{obj.id})").format(
+            obj=self
+        )
 
     def update_from_object(self, obj):
         return NotImplementedError()
@@ -480,35 +523,43 @@ class DruidDatasource(Model, BaseDatasource):
 
     @property
     def full_name(self):
-        return utils.get_datasource_full_name(
-            self.cluster_name, self.datasource_name)
+        return utils.get_datasource_full_name(self.cluster_name, self.datasource_name)
 
     @property
     def time_column_grains(self):
         return {
-            'time_columns': [
-                'all', '5 seconds', '30 seconds', '1 minute', '5 minutes',
-                '30 minutes', '1 hour', '6 hour', '1 day', '7 days',
-                'week', 'week_starting_sunday', 'week_ending_saturday',
-                'month', 'quarter', 'year',
+            "time_columns": [
+                "all",
+                "5 seconds",
+                "30 seconds",
+                "1 minute",
+                "5 minutes",
+                "30 minutes",
+                "1 hour",
+                "6 hour",
+                "1 day",
+                "7 days",
+                "week",
+                "week_starting_sunday",
+                "week_ending_saturday",
+                "month",
+                "quarter",
+                "year",
             ],
-            'time_grains': ['now'],
+            "time_grains": ["now"],
         }
 
     def __repr__(self):
         return self.datasource_name
 
-    @renders('datasource_name')
+    @renders("datasource_name")
     def datasource_link(self):
-        url = f'/superset/explore/{self.type}/{self.id}/'
+        url = f"/superset/explore/{self.type}/{self.id}/"
         name = escape(self.datasource_name)
         return Markup(f'<a href="{url}">{name}</a>')
 
     def get_metric_obj(self, metric_name):
-        return [
-            m.json_obj for m in self.metrics
-            if m.metric_name == metric_name
-        ][0]
+        return [m.json_obj for m in self.metrics if m.metric_name == metric_name][0]
 
     @classmethod
     def import_obj(cls, i_datasource, import_time=None):
@@ -518,29 +569,38 @@ class DruidDatasource(Model, BaseDatasource):
          This function can be used to import/export dashboards between multiple
          superset instances. Audit metadata isn't copies over.
         """
+
         def lookup_datasource(d):
-            return db.session.query(DruidDatasource).filter(
-                DruidDatasource.datasource_name == d.datasource_name,
-                DruidCluster.cluster_name == d.cluster_name,
-            ).first()
+            return (
+                db.session.query(DruidDatasource)
+                .filter(
+                    DruidDatasource.datasource_name == d.datasource_name,
+                    DruidCluster.cluster_name == d.cluster_name,
+                )
+                .first()
+            )
 
         def lookup_cluster(d):
-            return db.session.query(DruidCluster).filter_by(
-                cluster_name=d.cluster_name).one()
+            return (
+                db.session.query(DruidCluster)
+                .filter_by(cluster_name=d.cluster_name)
+                .one()
+            )
+
         return import_datasource.import_datasource(
-            db.session, i_datasource, lookup_cluster, lookup_datasource,
-            import_time)
+            db.session, i_datasource, lookup_cluster, lookup_datasource, import_time
+        )
 
     def latest_metadata(self):
         """Returns segment metadata from the latest segment"""
-        logging.info('Syncing datasource [{}]'.format(self.datasource_name))
+        logging.info("Syncing datasource [{}]".format(self.datasource_name))
         client = self.cluster.get_pydruid_client()
         try:
             results = client.time_boundary(datasource=self.datasource_name)
         except IOError:
             results = None
         if results:
-            max_time = results[0]['result']['maxTime']
+            max_time = results[0]["result"]["maxTime"]
             max_time = dparse(max_time)
         else:
             max_time = datetime.now()
@@ -549,7 +609,7 @@ class DruidDatasource(Model, BaseDatasource):
         # realtime segments, which triggered a bug (fixed in druid 0.8.2).
         # https://groups.google.com/forum/#!topic/druid-user/gVCqqspHqOQ
         lbound = (max_time - timedelta(days=7)).isoformat()
-        if LooseVersion(self.cluster.druid_version) < LooseVersion('0.8.2'):
+        if LooseVersion(self.cluster.druid_version) < LooseVersion("0.8.2"):
             rbound = (max_time - timedelta(1)).isoformat()
         else:
             rbound = max_time.isoformat()
@@ -557,53 +617,48 @@ class DruidDatasource(Model, BaseDatasource):
         try:
             segment_metadata = client.segment_metadata(
                 datasource=self.datasource_name,
-                intervals=lbound + '/' + rbound,
+                intervals=lbound + "/" + rbound,
                 merge=self.merge_flag,
-                analysisTypes=[])
+                analysisTypes=[],
+            )
         except Exception as e:
-            logging.warning('Failed first attempt to get latest segment')
+            logging.warning("Failed first attempt to get latest segment")
             logging.exception(e)
         if not segment_metadata:
             # if no segments in the past 7 days, look at all segments
             lbound = datetime(1901, 1, 1).isoformat()[:10]
-            if LooseVersion(self.cluster.druid_version) < LooseVersion('0.8.2'):
+            if LooseVersion(self.cluster.druid_version) < LooseVersion("0.8.2"):
                 rbound = datetime.now().isoformat()
             else:
                 rbound = datetime(2050, 1, 1).isoformat()[:10]
             try:
                 segment_metadata = client.segment_metadata(
                     datasource=self.datasource_name,
-                    intervals=lbound + '/' + rbound,
+                    intervals=lbound + "/" + rbound,
                     merge=self.merge_flag,
-                    analysisTypes=[])
+                    analysisTypes=[],
+                )
             except Exception as e:
-                logging.warning('Failed 2nd attempt to get latest segment')
+                logging.warning("Failed 2nd attempt to get latest segment")
                 logging.exception(e)
         if segment_metadata:
-            return segment_metadata[-1]['columns']
+            return segment_metadata[-1]["columns"]
 
     def refresh_metrics(self):
         for col in self.columns:
             col.refresh_metrics()
 
     @classmethod
-    def sync_to_db_from_config(
-            cls,
-            druid_config,
-            user,
-            cluster,
-            refresh=True):
+    def sync_to_db_from_config(cls, druid_config, user, cluster, refresh=True):
         """Merges the ds config from druid_config into one stored in the db."""
         session = db.session
         datasource = (
-            session.query(cls)
-            .filter_by(datasource_name=druid_config['name'])
-            .first()
+            session.query(cls).filter_by(datasource_name=druid_config["name"]).first()
         )
         # Create a new datasource.
         if not datasource:
             datasource = cls(
-                datasource_name=druid_config['name'],
+                datasource_name=druid_config["name"],
                 cluster=cluster,
                 owners=[user],
                 changed_by_fk=user.id,
@@ -613,7 +668,7 @@ class DruidDatasource(Model, BaseDatasource):
         elif not refresh:
             return
 
-        dimensions = druid_config['dimensions']
+        dimensions = druid_config["dimensions"]
         col_objs = (
             session.query(DruidColumn)
             .filter(DruidColumn.datasource_id == datasource.id)
@@ -629,7 +684,7 @@ class DruidDatasource(Model, BaseDatasource):
                     groupby=True,
                     filterable=True,
                     # TODO: fetch type from Hive.
-                    type='STRING',
+                    type="STRING",
                     datasource=datasource,
                 )
                 session.add(col_obj)
@@ -637,42 +692,43 @@ class DruidDatasource(Model, BaseDatasource):
         metric_objs = (
             session.query(DruidMetric)
             .filter(DruidMetric.datasource_id == datasource.id)
-            .filter(DruidMetric.metric_name.in_(
-                spec['name'] for spec in druid_config['metrics_spec']
-            ))
+            .filter(
+                DruidMetric.metric_name.in_(
+                    spec["name"] for spec in druid_config["metrics_spec"]
+                )
+            )
         )
         metric_objs = {metric.metric_name: metric for metric in metric_objs}
-        for metric_spec in druid_config['metrics_spec']:
-            metric_name = metric_spec['name']
-            metric_type = metric_spec['type']
+        for metric_spec in druid_config["metrics_spec"]:
+            metric_name = metric_spec["name"]
+            metric_type = metric_spec["type"]
             metric_json = json.dumps(metric_spec)
 
-            if metric_type == 'count':
-                metric_type = 'longSum'
-                metric_json = json.dumps({
-                    'type': 'longSum',
-                    'name': metric_name,
-                    'fieldName': metric_name,
-                })
+            if metric_type == "count":
+                metric_type = "longSum"
+                metric_json = json.dumps(
+                    {"type": "longSum", "name": metric_name, "fieldName": metric_name}
+                )
 
             metric_obj = metric_objs.get(metric_name, None)
             if not metric_obj:
                 metric_obj = DruidMetric(
                     metric_name=metric_name,
                     metric_type=metric_type,
-                    verbose_name='%s(%s)' % (metric_type, metric_name),
+                    verbose_name="%s(%s)" % (metric_type, metric_name),
                     datasource=datasource,
                     json=metric_json,
                     description=(
-                        'Imported from the airolap config dir for %s' %
-                        druid_config['name']),
+                        "Imported from the airolap config dir for %s"
+                        % druid_config["name"]
+                    ),
                 )
                 session.add(metric_obj)
         session.commit()
 
     @staticmethod
     def time_offset(granularity):
-        if granularity == 'week_ending_saturday':
+        if granularity == "week_ending_saturday":
             return 6 * 24 * 3600 * 1000  # 6 days
         return 0
 
@@ -681,50 +737,51 @@ class DruidDatasource(Model, BaseDatasource):
     # TODO: pass origin from the UI
     @staticmethod
     def granularity(period_name, timezone=None, origin=None):
-        if not period_name or period_name == 'all':
-            return 'all'
+        if not period_name or period_name == "all":
+            return "all"
         iso_8601_dict = {
-            '5 seconds': 'PT5S',
-            '30 seconds': 'PT30S',
-            '1 minute': 'PT1M',
-            '5 minutes': 'PT5M',
-            '30 minutes': 'PT30M',
-            '1 hour': 'PT1H',
-            '6 hour': 'PT6H',
-            'one day': 'P1D',
-            '1 day': 'P1D',
-            '7 days': 'P7D',
-            'week': 'P1W',
-            'week_starting_sunday': 'P1W',
-            'week_ending_saturday': 'P1W',
-            'month': 'P1M',
-            'quarter': 'P3M',
-            'year': 'P1Y',
+            "5 seconds": "PT5S",
+            "30 seconds": "PT30S",
+            "1 minute": "PT1M",
+            "5 minutes": "PT5M",
+            "30 minutes": "PT30M",
+            "1 hour": "PT1H",
+            "6 hour": "PT6H",
+            "one day": "P1D",
+            "1 day": "P1D",
+            "7 days": "P7D",
+            "week": "P1W",
+            "week_starting_sunday": "P1W",
+            "week_ending_saturday": "P1W",
+            "month": "P1M",
+            "quarter": "P3M",
+            "year": "P1Y",
         }
 
-        granularity = {'type': 'period'}
+        granularity = {"type": "period"}
         if timezone:
-            granularity['timeZone'] = timezone
+            granularity["timeZone"] = timezone
 
         if origin:
             dttm = utils.parse_human_datetime(origin)
-            granularity['origin'] = dttm.isoformat()
+            granularity["origin"] = dttm.isoformat()
 
         if period_name in iso_8601_dict:
-            granularity['period'] = iso_8601_dict[period_name]
-            if period_name in ('week_ending_saturday', 'week_starting_sunday'):
+            granularity["period"] = iso_8601_dict[period_name]
+            if period_name in ("week_ending_saturday", "week_starting_sunday"):
                 # use Sunday as start of the week
-                granularity['origin'] = '2016-01-03T00:00:00'
+                granularity["origin"] = "2016-01-03T00:00:00"
         elif not isinstance(period_name, str):
-            granularity['type'] = 'duration'
-            granularity['duration'] = period_name
-        elif period_name.startswith('P'):
+            granularity["type"] = "duration"
+            granularity["duration"] = period_name
+        elif period_name.startswith("P"):
             # identify if the string is the iso_8601 period
-            granularity['period'] = period_name
+            granularity["period"] = period_name
         else:
-            granularity['type'] = 'duration'
-            granularity['duration'] = utils.parse_human_timedelta(
-                period_name).total_seconds() * 1000
+            granularity["type"] = "duration"
+            granularity["duration"] = (
+                utils.parse_human_timedelta(period_name).total_seconds() * 1000
+            )
         return granularity
 
     @staticmethod
@@ -733,47 +790,35 @@ class DruidDatasource(Model, BaseDatasource):
         For a metric specified as `postagg` returns the
         kind of post aggregation for pydruid.
         """
-        if mconf.get('type') == 'javascript':
+        if mconf.get("type") == "javascript":
             return JavascriptPostAggregator(
-                name=mconf.get('name', ''),
-                field_names=mconf.get('fieldNames', []),
-                function=mconf.get('function', ''))
-        elif mconf.get('type') == 'quantile':
-            return Quantile(
-                mconf.get('name', ''),
-                mconf.get('probability', ''),
-            )
-        elif mconf.get('type') == 'quantiles':
-            return Quantiles(
-                mconf.get('name', ''),
-                mconf.get('probabilities', ''),
-            )
-        elif mconf.get('type') == 'fieldAccess':
-            return Field(mconf.get('name'))
-        elif mconf.get('type') == 'constant':
-            return Const(
-                mconf.get('value'),
-                output_name=mconf.get('name', ''),
+                name=mconf.get("name", ""),
+                field_names=mconf.get("fieldNames", []),
+                function=mconf.get("function", ""),
             )
-        elif mconf.get('type') == 'hyperUniqueCardinality':
-            return HyperUniqueCardinality(
-                mconf.get('name'),
-            )
-        elif mconf.get('type') == 'arithmetic':
+        elif mconf.get("type") == "quantile":
+            return Quantile(mconf.get("name", ""), mconf.get("probability", ""))
+        elif mconf.get("type") == "quantiles":
+            return Quantiles(mconf.get("name", ""), mconf.get("probabilities", ""))
+        elif mconf.get("type") == "fieldAccess":
+            return Field(mconf.get("name"))
+        elif mconf.get("type") == "constant":
+            return Const(mconf.get("value"), output_name=mconf.get("name", ""))
+        elif mconf.get("type") == "hyperUniqueCardinality":
+            return HyperUniqueCardinality(mconf.get("name"))
+        elif mconf.get("type") == "arithmetic":
             return Postaggregator(
-                mconf.get('fn', '/'),
-                mconf.get('fields', []),
-                mconf.get('name', ''))
+                mconf.get("fn", "/"), mconf.get("fields", []), mconf.get("name", "")
+            )
         else:
-            return CustomPostAggregator(
-                mconf.get('name', ''),
-                mconf)
+            return CustomPostAggregator(mconf.get("name", ""), mconf)
 
     @staticmethod
     def find_postaggs_for(postagg_names, metrics_dict):
         """Return a list of metrics that are post aggregations"""
         postagg_metrics = [
-            metrics_dict[name] for name in postagg_names
+            metrics_dict[name]
+            for name in postagg_names
             if metrics_dict[name].metric_type == POST_AGG_TYPE
         ]
         # Remove post aggregations that were found
@@ -783,13 +828,12 @@ class DruidDatasource(Model, BaseDatasource):
 
     @staticmethod
     def recursive_get_fields(_conf):
-        _type = _conf.get('type')
-        _field = _conf.get('field')
-        _fields = _conf.get('fields')
+        _type = _conf.get("type")
+        _field = _conf.get("field")
+        _fields = _conf.get("fields")
         field_names = []
-        if _type in ['fieldAccess', 'hyperUniqueCardinality',
-                     'quantile', 'quantiles']:
-            field_names.append(_conf.get('fieldName', ''))
+        if _type in ["fieldAccess", "hyperUniqueCardinality", "quantile", "quantiles"]:
+            field_names.append(_conf.get("fieldName", ""))
         if _field:
             field_names += DruidDatasource.recursive_get_fields(_field)
         if _fields:
@@ -801,18 +845,22 @@ class DruidDatasource(Model, BaseDatasource):
     def resolve_postagg(postagg, post_aggs, agg_names, visited_postaggs, metrics_dict):
         mconf = postagg.json_obj
         required_fields = set(
-            DruidDatasource.recursive_get_fields(mconf) +
-            mconf.get('fieldNames', []))
+            DruidDatasource.recursive_get_fields(mconf) + mconf.get("fieldNames", [])
+        )
         # Check if the fields are already in aggs
         # or is a previous postagg
-        required_fields = set([
-            field for field in required_fields
-            if field not in visited_postaggs and field not in agg_names
-        ])
+        required_fields = set(
+            [
+                field
+                for field in required_fields
+                if field not in visited_postaggs and field not in agg_names
+            ]
+        )
         # First try to find postaggs that match
         if len(required_fields) > 0:
             missing_postaggs = DruidDatasource.find_postaggs_for(
-                required_fields, metrics_dict)
+                required_fields, metrics_dict
+            )
             for missing_metric in required_fields:
                 agg_names.add(missing_metric)
             for missing_postagg in missing_postaggs:
@@ -821,7 +869,12 @@ class DruidDatasource(Model, BaseDatasource):
                 visited_postaggs.add(missing_postagg.metric_name)
             for missing_postagg in missing_postaggs:
                 DruidDatasource.resolve_postagg(
-                    missing_postagg, post_aggs, agg_names, visited_postaggs, metrics_dict)
+                    missing_postagg,
+                    post_aggs,
+                    agg_names,
+                    visited_postaggs,
+                    metrics_dict,
+                )
         post_aggs[postagg.metric_name] = DruidDatasource.get_post_agg(postagg.json_obj)
 
     @staticmethod
@@ -846,21 +899,18 @@ class DruidDatasource(Model, BaseDatasource):
             postagg = metrics_dict[postagg_name]
             visited_postaggs.add(postagg_name)
             DruidDatasource.resolve_postagg(
-                postagg, post_aggs, saved_agg_names, visited_postaggs, metrics_dict)
+                postagg, post_aggs, saved_agg_names, visited_postaggs, metrics_dict
+            )
         aggs = DruidDatasource.get_aggregations(
-            metrics_dict,
-            saved_agg_names,
-            adhoc_agg_configs,
+            metrics_dict, saved_agg_names, adhoc_agg_configs
         )
         return aggs, post_aggs
 
-    def values_for_column(self,
-                          column_name,
-                          limit=10000):
+    def values_for_column(self, column_name, limit=10000):
         """Retrieve some values for the given column"""
         logging.info(
-            'Getting values for columns [{}] limited to [{}]'
-            .format(column_name, limit))
+            "Getting values for columns [{}] limited to [{}]".format(column_name, limit)
+        )
         # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
         if self.fetch_values_from:
             from_dttm = utils.parse_human_datetime(self.fetch_values_from)
@@ -869,11 +919,11 @@ class DruidDatasource(Model, BaseDatasource):
 
         qry = dict(
             datasource=self.datasource_name,
-            granularity='all',
-            intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
-            aggregations=dict(count=count('count')),
+            granularity="all",
+            intervals=from_dttm.isoformat() + "/" + datetime.now().isoformat(),
+            aggregations=dict(count=count("count")),
             dimension=column_name,
-            metric='count',
+            metric="count",
             threshold=limit,
         )
 
@@ -895,16 +945,18 @@ class DruidDatasource(Model, BaseDatasource):
                     f = None
                     # Check if this dimension uses an extraction function
                     # If so, create the appropriate pydruid extraction object
-                    if isinstance(dim, dict) and 'extractionFn' in dim:
-                        (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim)
-                        dim_val = dim['outputName']
+                    if isinstance(dim, dict) and "extractionFn" in dim:
+                        (col, extraction_fn) = DruidDatasource._create_extraction_fn(
+                            dim
+                        )
+                        dim_val = dim["outputName"]
                         f = Filter(
                             dimension=col,
                             value=row[dim_val],
                             extraction_function=extraction_fn,
                         )
                     elif isinstance(dim, dict):
-                        dim_val = dim['outputName']
+                        dim_val = dim["outputName"]
                         if dim_val:
                             f = Dimension(dim_val) == row[dim_val]
                     else:
@@ -912,27 +964,27 @@ class DruidDatasource(Model, BaseDatasource):
                     if f:
                         fields.append(f)
                 if len(fields) > 1:
-                    term = Filter(type='and', fields=fields)
+                    term = Filter(type="and", fields=fields)
                     new_filters.append(term)
                 elif fields:
                     new_filters.append(fields[0])
             if new_filters:
-                ff = Filter(type='or', fields=new_filters)
+                ff = Filter(type="or", fields=new_filters)
                 if not dim_filter:
                     ret = ff
                 else:
-                    ret = Filter(type='and', fields=[ff, dim_filter])
+                    ret = Filter(type="and", fields=[ff, dim_filter])
         return ret
 
     @staticmethod
     def druid_type_from_adhoc_metric(adhoc_metric):
-        column_type = adhoc_metric['column']['type'].lower()
-        aggregate = adhoc_metric['aggregate'].lower()
+        column_type = adhoc_metric["column"]["type"].lower()
+        aggregate = adhoc_metric["aggregate"].lower()
 
-        if aggregate == 'count':
-            return 'count'
-        if aggregate == 'count_distinct':
-            return 'hyperUnique' if column_type == 'hyperunique' else 'cardinality'
+        if aggregate == "count":
+            return "count"
+        if aggregate == "count_distinct":
+            return "hyperUnique" if column_type == "hyperunique" else "cardinality"
         else:
             return column_type + aggregate.capitalize()
 
@@ -959,26 +1011,28 @@ class DruidDatasource(Model, BaseDatasource):
                 invalid_metric_names.append(metric_name)
         if len(invalid_metric_names) > 0:
             raise SupersetException(
-                _('Metric(s) {} must be aggregations.').format(invalid_metric_names))
+                _("Metric(s) {} must be aggregations.").format(invalid_metric_names)
+            )
         for adhoc_metric in adhoc_metrics:
-            aggregations[adhoc_metric['label']] = {
-                'fieldName': adhoc_metric['column']['column_name'],
-                'fieldNames': [adhoc_metric['column']['column_name']],
-                'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
-                'name': adhoc_metric['label'],
+            aggregations[adhoc_metric["label"]] = {
+                "fieldName": adhoc_metric["column"]["column_name"],
+                "fieldNames": [adhoc_metric["column"]["column_name"]],
+                "type": DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
+                "name": adhoc_metric["label"],
             }
         return aggregations
 
     def check_restricted_metrics(self, aggregations):
         rejected_metrics = [
-            m.metric_name for m in self.metrics
-            if m.is_restricted and
-            m.metric_name in aggregations.keys() and
-            not security_manager.has_access('metric_access', m.perm)
+            m.metric_name
+            for m in self.metrics
+            if m.is_restricted
+            and m.metric_name in aggregations.keys()
+            and not security_manager.has_access("metric_access", m.perm)
         ]
         if rejected_metrics:
             raise MetricPermException(
-                'Access to the metrics denied: ' + ', '.join(rejected_metrics),
+                "Access to the metrics denied: " + ", ".join(rejected_metrics)
             )
 
     def get_dimensions(self, groupby, columns_dict):
@@ -1001,9 +1055,9 @@ class DruidDatasource(Model, BaseDatasource):
         # add tzinfo to native datetime with config
         from_dttm = from_dttm.replace(tzinfo=DRUID_TZ)
         to_dttm = to_dttm.replace(tzinfo=DRUID_TZ)
-        return '{}/{}'.format(
-            from_dttm.isoformat() if from_dttm else '',
-            to_dttm.isoformat() if to_dttm else '',
+        return "{}/{}".format(
+            from_dttm.isoformat() if from_dttm else "",
+            to_dttm.isoformat() if to_dttm else "",
         )
 
     @staticmethod
@@ -1015,10 +1069,10 @@ class DruidDatasource(Model, BaseDatasource):
         values = []
         for dimension in dimensions:
             if isinstance(dimension, dict):
-                if 'extractionFn' in dimension:
+                if "extractionFn" in dimension:
                     values.append(dimension)
-                elif 'dimension' in dimension:
-                    values.append(dimension['dimension'])
+                elif "dimension" in dimension:
+                    values.append(dimension["dimension"])
             else:
                 values.append(dimension)
 
@@ -1031,60 +1085,64 @@ class DruidDatasource(Model, BaseDatasource):
         :param dict metric: The metric to sanitize
         """
         if (
-            utils.is_adhoc_metric(metric) and
-            metric['column']['type'].upper() == 'FLOAT'
+            utils.is_adhoc_metric(metric)
+            and metric["column"]["type"].upper() == "FLOAT"
         ):
-            metric['column']['type'] = 'DOUBLE'
+            metric["column"]["type"] = "DOUBLE"
 
     def run_query(  # noqa / druid
-            self,
-            groupby, metrics,
-            granularity,
-            from_dttm, to_dttm,
-            filter=None,  # noqa
-            is_timeseries=True,
-            timeseries_limit=None,
-            timeseries_limit_metric=None,
-            row_limit=None,
-            inner_from_dttm=None, inner_to_dttm=None,
-            orderby=None,
-            extras=None,  # noqa
-            columns=None, phase=2, client=None,
-            order_desc=True,
-            prequeries=None,
-            is_prequery=False,
-        ):
+        self,
+        groupby,
+        metrics,
+        granularity,
+        from_dttm,
+        to_dttm,
+        filter=None,  # noqa
+        is_timeseries=True,
+        timeseries_limit=None,
+        timeseries_limit_metric=None,
+        row_limit=None,
+        inner_from_dttm=None,
+        inner_to_dttm=None,
+        orderby=None,
+        extras=None,  # noqa
+        columns=None,
+        phase=2,
+        client=None,
+        order_desc=True,
+        prequeries=None,
+        is_prequery=False,
+    ):
         """Runs a query against Druid and returns a dataframe.
         """
         # TODO refactor into using a TBD Query object
         client = client or self.cluster.get_pydruid_client()
-        row_limit = row_limit or conf.get('ROW_LIMIT')
+        row_limit = row_limit or conf.get("ROW_LIMIT")
 
         if not is_timeseries:
-            granularity = 'all'
+            granularity = "all"
 
-        if granularity == 'all':
+        if granularity == "all":
             phase = 1
         inner_from_dttm = inner_from_dttm or from_dttm
         inner_to_dttm = inner_to_dttm or to_dttm
 
         timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None
 
-        query_str = ''
+        query_str = ""
         metrics_dict = {m.metric_name: m for m in self.metrics}
         columns_dict = {c.column_name: c for c in self.columns}
 
-        if (
-            self.cluster and
-            LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0')
-        ):
+        if self.cluster and LooseVersion(
+            self.cluster.get_druid_version()
+        ) < LooseVersion("0.11.0"):
             for metric in metrics:
                 self.sanitize_metric_object(metric)
             self.sanitize_metric_object(timeseries_limit_metric)
 
         aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
-            metrics,
-            metrics_dict)
+            metrics, metrics_dict
+        )
 
         self.check_restricted_metrics(aggregations)
 
@@ -1096,9 +1154,7 @@ class DruidDatasource(Model, BaseDatasource):
             dimensions=dimensions,
             aggregations=aggregations,
             granularity=DruidDatasource.granularity(
-                granularity,
-                timezone=timezone,
-                origin=extras.get('druid_time_origin'),
+                granularity, timezone=timezone, origin=extras.get("druid_time_origin")
             ),
             post_aggregations=post_aggs,
             intervals=self.intervals_from_dttms(from_dttm, to_dttm),
@@ -1106,100 +1162,94 @@ class DruidDatasource(Model, BaseDatasource):
 
         filters = DruidDatasource.get_filters(filter, self.num_cols, columns_dict)
         if filters:
-            qry['filter'] = filters
+            qry["filter"] = filters
 
-        having_filters = self.get_having_filters(extras.get('having_druid'))
+        having_filters = self.get_having_filters(extras.get("having_druid"))
         if having_filters:
-            qry['having'] = having_filters
+            qry["having"] = having_filters
 
-        order_direction = 'descending' if order_desc else 'ascending'
+        order_direction = "descending" if order_desc else "ascending"
 
         if columns:
-            columns.append('__time')
-            del qry['post_aggregations']
-            del qry['aggregations']
-            del qry['dimensions']
-            qry['columns'] = columns
-            qry['metrics'] = []
-            qry['granularity'] = 'all'
-            qry['limit'] = row_limit
+            columns.append("__time")
+            del qry["post_aggregations"]
+            del qry["aggregations"]
+            del qry["dimensions"]
+            qry["columns"] = columns
+            qry["metrics"] = []
+            qry["granularity"] = "all"
+            qry["limit"] = row_limit
             client.scan(**qry)
         elif len(groupby) == 0 and not having_filters:
-            logging.info('Running timeseries query for no groupby values')
-            del qry['dimensions']
+            logging.info("Running timeseries query for no groupby values")
+            del qry["dimensions"]
             client.timeseries(**qry)
-        elif (
-                not having_filters and
-                len(groupby) == 1 and
-                order_desc
-        ):
-            dim = list(qry.get('dimensions'))[0]
-            logging.info('Running two-phase topn query for dimension [{}]'.format(dim))
+        elif not having_filters and len(groupby) == 1 and order_desc:
+            dim = list(qry.get("dimensions"))[0]
+            logging.info("Running two-phase topn query for dimension [{}]".format(dim))
             pre_qry = deepcopy(qry)
             if timeseries_limit_metric:
                 order_by = utils.get_metric_name(timeseries_limit_metric)
                 aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
-                    [timeseries_limit_metric],
-                    metrics_dict)
+                    [timeseries_limit_metric], metrics_dict
+                )
                 if phase == 1:
-                    pre_qry['aggregations'].update(aggs_dict)
-                    pre_qry['post_aggregations'].update(post_aggs_dict)
+                    pre_qry["aggregations"].update(aggs_dict)
+                    pre_qry["post_aggregations"].update(post_aggs_dict)
                 else:
-                    pre_qry['aggregations'] = aggs_dict
-                    pre_qry['post_aggregations'] = post_aggs_dict
+                    pre_qry["aggregations"] = aggs_dict
+                    pre_qry["post_aggregations"] = post_aggs_dict
             else:
-                agg_keys = qry['aggregations'].keys()
+                agg_keys = qry["aggregations"].keys()
                 order_by = list(agg_keys)[0] if agg_keys else None
 
             # Limit on the number of timeseries, doing a two-phases query
-            pre_qry['granularity'] = 'all'
-            pre_qry['threshold'] = min(row_limit,
-                                       timeseries_limit or row_limit)
-            pre_qry['metric'] = order_by
-            pre_qry['dimension'] = self._dimensions_to_values(qry.get('dimensions'))[0]
-            del pre_qry['dimensions']
+            pre_qry["granularity"] = "all"
+            pre_qry["threshold"] = min(row_limit, timeseries_limit or row_limit)
+            pre_qry["metric"] = order_by
+            pre_qry["dimension"] = self._dimensions_to_values(qry.get("dimensions"))[0]
+            del pre_qry["dimensions"]
 
             client.topn(**pre_qry)
-            logging.info('Phase 1 Complete')
+            logging.info("Phase 1 Complete")
             if phase == 2:
-                query_str += '// Two phase query\n// Phase 1\n'
+                query_str += "// Two phase query\n// Phase 1\n"
             query_str += json.dumps(
-                client.query_builder.last_query.query_dict, indent=2)
-            query_str += '\n'
+                client.query_builder.last_query.query_dict, indent=2
+            )
+            query_str += "\n"
             if phase == 1:
                 return query_str
-            query_str += (
-                "// Phase 2 (built based on phase one's results)\n")
+            query_str += "// Phase 2 (built based on phase one's results)\n"
             df = client.export_pandas()
-            qry['filter'] = self._add_filter_from_pre_query_data(
-                df,
-                [pre_qry['dimension']],
-                filters)
-            qry['threshold'] = timeseries_limit or 1000
-            if row_limit and granularity == 'all':
-                qry['threshold'] = row_limit
-            qry['dimension'] = dim
-            del qry['dimensions']
-            qry['metric'] = list(qry['aggregations'].keys())[0]
+            qry["filter"] = self._add_filter_from_pre_query_data(
+                df, [pre_qry["dimension"]], filters
+            )
+            qry["threshold"] = timeseries_limit or 1000
+            if row_limit and granularity == "all":
+                qry["threshold"] = row_limit
+            qry["dimension"] = dim
+            del qry["dimensions"]
+            qry["metric"] = list(qry["aggregations"].keys())[0]
             client.topn(**qry)
-            logging.info('Phase 2 Complete')
+            logging.info("Phase 2 Complete")
         elif len(groupby) > 0 or having_filters:
             # If grouping on multiple fields or using a having filter
             # we have to force a groupby query
-            logging.info('Running groupby query for dimensions [{}]'.format(dimensions))
+            logging.info("Running groupby query for dimensions [{}]".format(dimensions))
             if timeseries_limit and is_timeseries:
-                logging.info('Running two-phase query for timeseries')
+                logging.info("Running two-phase query for timeseries")
 
                 pre_qry = deepcopy(qry)
-                pre_qry_dims = self._dimensions_to_values(qry['dimensions'])
+                pre_qry_dims = self._dimensions_to_values(qry["dimensions"])
 
                 # Can't use set on an array with dicts
                 # Use set with non-dict items only
                 non_dict_dims = list(
-                    set([x for x in pre_qry_dims if not isinstance(x, dict)]),
+                    set([x for x in pre_qry_dims if not isinstance(x, dict)])
                 )
                 dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]
-                pre_qry['dimensions'] = non_dict_dims + dict_dims
+                pre_qry["dimensions"] = non_dict_dims + dict_dims
 
                 order_by = None
                 if metrics:
@@ -1210,62 +1260,59 @@ class DruidDatasource(Model, BaseDatasource):
                 if timeseries_limit_metric:
                     order_by = utils.get_metric_name(timeseries_limit_metric)
                     aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
-                        [timeseries_limit_metric],
-                        metrics_dict)
+                        [timeseries_limit_metric], metrics_dict
+                    )
                     if phase == 1:
-                        pre_qry['aggregations'].update(aggs_dict)
-                        pre_qry['post_aggregations'].update(post_aggs_dict)
+                        pre_qry["aggregations"].update(aggs_dict)
+                        pre_qry["post_aggregations"].update(post_aggs_dict)
                     else:
-                        pre_qry['aggregations'] = aggs_dict
-                        pre_qry['post_aggregations'] = post_aggs_dict
+                        pre_qry["aggregations"] = aggs_dict
+                        pre_qry["post_aggregations"] = post_aggs_dict
 
                 # Limit on the number of timeseries, doing a two-phases query
-                pre_qry['granularity'] = 'all'
-                pre_qry['limit_spec'] = {
-                    'type': 'default',
-                    'limit': min(timeseries_limit, row_limit),
-                    'intervals': self.intervals_from_dttms(
-                        inner_from_dttm, inner_to_dttm),
-                    'columns': [{
-                        'dimension': order_by,
-                        'direction': order_direction,
-                    }],
+                pre_qry["granularity"] = "all"
+                pre_qry["limit_spec"] = {
+                    "type": "default",
+                    "limit": min(timeseries_limit, row_limit),
+                    "intervals": self.intervals_from_dttms(
+                        inner_from_dttm, inner_to_dttm
+                    ),
+                    "columns": [{"dimension": order_by, "direction": order_direction}],
                 }
                 client.groupby(**pre_qry)
-                logging.info('Phase 1 Complete')
-                query_str += '// Two phase query\n// Phase 1\n'
+                logging.info("Phase 1 Complete")
+                query_str += "// Two phase query\n// Phase 1\n"
                 query_str += json.dumps(
-                    client.query_builder.last_query.query_dict, indent=2)
-                query_str += '\n'
+                    client.query_builder.last_query.query_dict, indent=2
+                )
+                query_str += "\n"
                 if phase == 1:
                     return query_str
-                query_str += (
-                    "// Phase 2 (built based on phase one's results)\n")
+                query_str += "// Phase 2 (built based on phase one's results)\n"
                 df = client.export_pandas()
-                qry['filter'] = self._add_filter_from_pre_query_data(
-                    df,
-                    pre_qry['dimensions'],
-                    filters,
+                qry["filter"] = self._add_filter_from_pre_query_data(
+                    df, pre_qry["dimensions"], filters
                 )
-                qry['limit_spec'] = None
+                qry["limit_spec"] = None
             if row_limit:
                 dimension_values = self._dimensions_to_values(dimensions)
-                qry['limit_spec'] = {
-                    'type': 'default',
-                    'limit': row_limit,
-                    'columns': [{
-                        'dimension': (
-                            utils.get_metric_name(
-                                metrics[0],
-                            ) if metrics else dimension_values[0]
-                        ),
-                        'direction': order_direction,
-                    }],
+                qry["limit_spec"] = {
+                    "type": "default",
+                    "limit": row_limit,
+                    "columns": [
+                        {
+                            "dimension": (
+                                utils.get_metric_name(metrics[0])
+                                if metrics
+                                else dimension_values[0]
+                            ),
+                            "direction": order_direction,
+                        }
+                    ],
                 }
             client.groupby(**qry)
-            logging.info('Query Complete')
-        query_str += json.dumps(
-            client.query_builder.last_query.query_dict, indent=2)
+            logging.info("Query Complete")
+        query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)
         return query_str
 
     @staticmethod
@@ -1280,82 +1327,79 @@ class DruidDatasource(Model, BaseDatasource):
         str instead of an object.
         """
         for col in groupby_cols:
-            df[col] = df[col].fillna('<NULL>').astype('unicode')
+            df[col] = df[col].fillna("<NULL>").astype("unicode")
         return df
 
     def query(self, query_obj):
         qry_start_dttm = datetime.now()
         client = self.cluster.get_pydruid_client()
-        query_str = self.get_query_str(
-            client=client, query_obj=query_obj, phase=2)
+        query_str = self.get_query_str(client=client, query_obj=query_obj, phase=2)
         df = client.export_pandas()
 
         if df is None or df.size == 0:
             return QueryResult(
                 df=pandas.DataFrame([]),
                 query=query_str,
-                duration=datetime.now() - qry_start_dttm)
+                duration=datetime.now() - qry_start_dttm,
+            )
 
-        df = self.homogenize_types(df, query_obj.get('groupby', []))
+        df = self.homogenize_types(df, query_obj.get("groupby", []))
         df.columns = [
-            DTTM_ALIAS if c in ('timestamp', '__time') else c
-            for c in df.columns
+            DTTM_ALIAS if c in ("timestamp", "__time") else c for c in df.columns
         ]
 
-        is_timeseries = query_obj['is_timeseries'] \
-            if 'is_timeseries' in query_obj else True
-        if (
-                not is_timeseries and
-                DTTM_ALIAS in df.columns):
+        is_timeseries = (
+            query_obj["is_timeseries"] if "is_timeseries" in query_obj else True
+        )
+        if not is_timeseries and DTTM_ALIAS in df.columns:
             del df[DTTM_ALIAS]
 
         # Reordering columns
         cols = []
         if DTTM_ALIAS in df.columns:
             cols += [DTTM_ALIAS]
-        cols += query_obj.get('groupby') or []
-        cols += query_obj.get('columns') or []
-        cols += query_obj.get('metrics') or []
+        cols += query_obj.get("groupby") or []
+        cols += query_obj.get("columns") or []
+        cols += query_obj.get("metrics") or []
 
         cols = utils.get_metric_names(cols)
         cols = [col for col in cols if col in df.columns]
         df = df[cols]
 
-        time_offset = DruidDatasource.time_offset(query_obj['granularity'])
+        time_offset = DruidDatasource.time_offset(query_obj["granularity"])
 
         def increment_timestamp(ts):
-            dt = utils.parse_human_datetime(ts).replace(
-                tzinfo=DRUID_TZ)
+            dt = utils.parse_human_datetime(ts).replace(tzinfo=DRUID_TZ)
             return dt + timedelta(milliseconds=time_offset)
+
         if DTTM_ALIAS in df.columns and time_offset:
             df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(increment_timestamp)
 
         return QueryResult(
-            df=df,
-            query=query_str,
-            duration=datetime.now() - qry_start_dttm)
+            df=df, query=query_str, duration=datetime.now() - qry_start_dttm
+        )
 
     @staticmethod
     def _create_extraction_fn(dim_spec):
         extraction_fn = None
-        if dim_spec and 'extractionFn' in dim_spec:
-            col = dim_spec['dimension']
-            fn = dim_spec['extractionFn']
-            ext_type = fn.get('type')
-            if ext_type == 'lookup' and fn['lookup'].get('type') == 'map':
-                replace_missing_values = fn.get('replaceMissingValueWith')
-                retain_missing_values = fn.get('retainMissingValue', False)
-                injective = fn.get('isOneToOne', False)
+        if dim_spec and "extractionFn" in dim_spec:
+            col = dim_spec["dimension"]
+            fn = dim_spec["extractionFn"]
+            ext_type = fn.get("type")
+            if ext_type == "lookup" and fn["lookup"].get("type") == "map":
+                replace_missing_values = fn.get("replaceMissingValueWith")
+                retain_missing_values = fn.get("retainMissingValue", False)
+                injective = fn.get("isOneToOne", False)
                 extraction_fn = MapLookupExtraction(
-                    fn['lookup']['map'],
+                    fn["lookup"]["map"],
                     replace_missing_values=replace_missing_values,
                     retain_missing_values=retain_missing_values,
                     injective=injective,
                 )
-            elif ext_type == 'regex':
-                extraction_fn = RegexExtraction(fn['expr'])
+            elif ext_type == "regex":
+                extraction_fn = RegexExtraction(fn["expr"])
             else:
-                raise Exception(_('Unsupported extraction function: ' + ext_type))
+                raise Exception(_("Unsupported extraction function: " + ext_type))
         return (col, extraction_fn)
 
     @classmethod
@@ -1363,13 +1407,14 @@ class DruidDatasource(Model, BaseDatasource):
         """Given Superset filter data structure, returns pydruid Filter(s)"""
         filters = None
         for flt in raw_filters:
-            col = flt.get('col')
-            op = flt.get('op')
-            eq = flt.get('val')
+            col = flt.get("col")
+            op = flt.get("op")
+            eq = flt.get("val")
             if (
-                    not col or
-                    not op or
-                    (eq is None and op not in ('IS NULL', 'IS NOT NULL'))):
+                not col
+                or not op
+                or (eq is None and op not in ("IS NULL", "IS NOT NULL"))
+            ):
                 continue
 
             # Check if this dimension uses an extraction function
@@ -1377,23 +1422,29 @@ class DruidDatasource(Model, BaseDatasource):
             column_def = columns_dict.get(col)
             dim_spec = column_def.dimension_spec if column_def else None
             extraction_fn = None
-            if dim_spec and 'extractionFn' in dim_spec:
+            if dim_spec and "extractionFn" in dim_spec:
                 (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
 
             cond = None
             is_numeric_col = col in num_cols
-            is_list_target = op in ('in', 'not in')
+            is_list_target = op in ("in", "not in")
             eq = cls.filter_values_handler(
-                eq, is_list_target=is_list_target,
-                target_column_is_numeric=is_numeric_col)
+                eq,
+                is_list_target=is_list_target,
+                target_column_is_numeric=is_numeric_col,
+            )
 
             # For these two ops, could have used Dimension,
             # but it doesn't support extraction functions
-            if op == '==':
-                cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)
-            elif op == '!=':
-                cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)
-            elif op in ('in', 'not in'):
+            if op == "==":
+                cond = Filter(
+                    dimension=col, value=eq, extraction_function=extraction_fn
+                )
+            elif op == "!=":
+                cond = ~Filter(
+                    dimension=col, value=eq, extraction_function=extraction_fn
+                )
+            elif op in ("in", "not in"):
                 fields = []
                 # ignore the filter if it has no value
                 if not len(eq):
@@ -1404,7 +1455,7 @@ class DruidDatasource(Model, BaseDatasource):
                     cond = Filter(
                         dimension=col,
                         values=eq,
-                        type='in',
+                        type="in",
                         extraction_function=extraction_fn,
                     )
                 elif len(eq) == 1:
@@ -1412,22 +1463,22 @@ class DruidDatasource(Model, BaseDatasource):
                 else:
                     for s in eq:
                         fields.append(Dimension(col) == s)
-                    cond = Filter(type='or', fields=fields)
-                if op == 'not in':
+                    cond = Filter(type="or", fields=fields)
+                if op == "not in":
                     cond = ~cond
-            elif op == 'regex':
+            elif op == "regex":
                 cond = Filter(
                     extraction_function=extraction_fn,
-                    type='regex',
+                    type="regex",
                     pattern=eq,
                     dimension=col,
                 )
 
             # For the ops below, could have used pydruid's Bound,
             # but it doesn't support extraction functions
-            elif op == '>=':
+            elif op == ">=":
                 cond = Filter(
-                    type='bound',
+                    type="bound",
                     extraction_function=extraction_fn,
                     dimension=col,
                     lowerStrict=False,
@@ -1436,9 +1487,9 @@ class DruidDatasource(Model, BaseDatasource):
                     upper=None,
                     alphaNumeric=is_numeric_col,
                 )
-            elif op == '<=':
+            elif op == "<=":
                 cond = Filter(
-                    type='bound',
+                    type="bound",
                     extraction_function=extraction_fn,
                     dimension=col,
                     lowerStrict=False,
@@ -1447,9 +1498,9 @@ class DruidDatasource(Model, BaseDatasource):
                     upper=eq,
                     alphaNumeric=is_numeric_col,
                 )
-            elif op == '>':
+            elif op == ">":
                 cond = Filter(
-                    type='bound',
+                    type="bound",
                     extraction_function=extraction_fn,
                     lowerStrict=True,
                     upperStrict=False,
@@ -1458,9 +1509,9 @@ class DruidDatasource(Model, BaseDatasource):
                     upper=None,
                     alphaNumeric=is_numeric_col,
                 )
-            elif op == '<':
+            elif op == "<":
                 cond = Filter(
-                    type='bound',
+                    type="bound",
                     extraction_function=extraction_fn,
                     upperStrict=True,
                     lowerStrict=False,
@@ -1469,16 +1520,13 @@ class DruidDatasource(Model, BaseDatasource):
                     upper=eq,
                     alphaNumeric=is_numeric_col,
                 )
-            elif op == 'IS NULL':
+            elif op == "IS NULL":
                 cond = Dimension(col) == None  # NOQA
-            elif op == 'IS NOT NULL':
+            elif op == "IS NOT NULL":
                 cond = Dimension(col) != None  # NOQA
 
             if filters:
-                filters = Filter(type='and', fields=[
-                    cond,
-                    filters,
-                ])
+                filters = Filter(type="and", fields=[cond, filters])
             else:
                 filters = cond
 
@@ -1486,34 +1534,30 @@ class DruidDatasource(Model, BaseDatasource):
 
     def _get_having_obj(self, col, op, eq):
         cond = None
-        if op == '==':
+        if op == "==":
             if col in self.column_names:
                 cond = DimSelector(dimension=col, value=eq)
             else:
                 cond = Aggregation(col) == eq
-        elif op == '>':
+        elif op == ">":
             cond = Aggregation(col) > eq
-        elif op == '<':
+        elif op == "<":
             cond = Aggregation(col) < eq
 
         return cond
 
     def get_having_filters(self, raw_filters):
         filters = None
-        reversed_op_map = {
-            '!=': '==',
-            '>=': '<',
-            '<=': '>',
-        }
+        reversed_op_map = {"!=": "==", ">=": "<", "<=": ">"}
 
         for flt in raw_filters:
-            if not all(f in flt for f in ['col', 'op', 'val']):
+            if not all(f in flt for f in ["col", "op", "val"]):
                 continue
-            col = flt['col']
-            op = flt['op']
-            eq = flt['val']
+            col = flt["col"]
+            op = flt["op"]
+            eq = flt["val"]
             cond = None
-            if op in ['==', '>', '<']:
+            if op in ["==", ">", "<"]:
                 cond = self._get_having_obj(col, op, eq)
             elif op in reversed_op_map:
                 cond = ~self._get_having_obj(col, reversed_op_map[op], eq)
@@ -1525,8 +1569,7 @@ class DruidDatasource(Model, BaseDatasource):
         return filters
 
     @classmethod
-    def query_datasources_by_name(
-            cls, session, database, datasource_name, schema=None):
+    def query_datasources_by_name(cls, session, database, datasource_name, schema=None):
         return (
             session.query(cls)
             .filter_by(cluster_name=database.id)
@@ -1537,13 +1580,10 @@ class DruidDatasource(Model, BaseDatasource):
     def external_metadata(self):
         self.merge_flag = True
         return [
-            {
-                'name': k,
-                'type': v.get('type'),
-            }
+            {"name": k, "type": v.get("type")}
             for k, v in self.latest_metadata().items()
         ]
 
 
-sa.event.listen(DruidDatasource, 'after_insert', security_manager.set_perm)
-sa.event.listen(DruidDatasource, 'after_update', security_manager.set_perm)
+sa.event.listen(DruidDatasource, "after_insert", security_manager.set_perm)
+sa.event.listen(DruidDatasource, "after_update", security_manager.set_perm)
diff --git a/superset/connectors/druid/views.py b/superset/connectors/druid/views.py
index 103dd25..8de7e95 100644
--- a/superset/connectors/druid/views.py
+++ b/superset/connectors/druid/views.py
@@ -33,9 +33,14 @@ from superset.connectors.base.views import DatasourceModelView
 from superset.connectors.connector_registry import ConnectorRegistry
 from superset.utils import core as utils
 from superset.views.base import (
-    BaseSupersetView, DatasourceFilter, DeleteMixin,
-    get_datasource_exist_error_msg, ListWidgetWithCheckboxes, SupersetModelView,
-    validate_json, YamlExportMixin,
+    BaseSupersetView,
+    DatasourceFilter,
+    DeleteMixin,
+    get_datasource_exist_error_msg,
+    ListWidgetWithCheckboxes,
+    SupersetModelView,
+    validate_json,
+    YamlExportMixin,
 )
 from . import models
 
@@ -43,48 +48,56 @@ from . import models
 class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
     datamodel = SQLAInterface(models.DruidColumn)
 
-    list_title = _('Columns')
-    show_title = _('Show Druid Column')
-    add_title = _('Add Druid Column')
-    edit_title = _('Edit Druid Column')
+    list_title = _("Columns")
+    show_title = _("Show Druid Column")
+    add_title = _("Add Druid Column")
+    edit_title = _("Edit Druid Column")
 
     list_widget = ListWidgetWithCheckboxes
 
     edit_columns = [
-        'column_name', 'verbose_name', 'description', 'dimension_spec_json', 'datasource',
-        'groupby', 'filterable']
+        "column_name",
+        "verbose_name",
+        "description",
+        "dimension_spec_json",
+        "datasource",
+        "groupby",
+        "filterable",
+    ]
     add_columns = edit_columns
-    list_columns = ['column_name', 'verbose_name', 'type', 'groupby', 'filterable']
+    list_columns = ["column_name", "verbose_name", "type", "groupby", "filterable"]
     can_delete = False
     page_size = 500
     label_columns = {
-        'column_name': _('Column'),
-        'type': _('Type'),
-        'datasource': _('Datasource'),
-        'groupby': _('Groupable'),
-        'filterable': _('Filterable'),
+        "column_name": _("Column"),
+        "type": _("Type"),
+        "datasource": _("Datasource"),
+        "groupby": _("Groupable"),
+        "filterable": _("Filterable"),
     }
     description_columns = {
-        'filterable': _(
-            'Whether this column is exposed in the `Filters` section '
-            'of the explore view.'),
-        'dimension_spec_json': utils.markdown(
-            'this field can be used to specify  '
-            'a `dimensionSpec` as documented [here]'
-            '(http://druid.io/docs/latest/querying/dimensionspecs.html). '
-            'Make sure to input valid JSON and that the '
-            '`outputName` matches the `column_name` defined '
-            'above.',
-            True),
+        "filterable": _(
+            "Whether this column is exposed in the `Filters` section "
+            "of the explore view."
+        ),
+        "dimension_spec_json": utils.markdown(
+            "this field can be used to specify  "
+            "a `dimensionSpec` as documented [here]"
+            "(http://druid.io/docs/latest/querying/dimensionspecs.html). "
+            "Make sure to input valid JSON and that the "
+            "`outputName` matches the `column_name` defined "
+            "above.",
+            True,
+        ),
     }
 
     add_form_extra_fields = {
-        'datasource': QuerySelectField(
-            'Datasource',
+        "datasource": QuerySelectField(
+            "Datasource",
             query_factory=lambda: db.session().query(models.DruidDatasource),
             allow_blank=True,
-            widget=Select2Widget(extra_classes='readonly'),
-        ),
+            widget=Select2Widget(extra_classes="readonly"),
+        )
     }
 
     edit_form_extra_fields = add_form_extra_fields
@@ -96,18 +109,20 @@ class DruidColumnInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
             try:
                 dimension_spec = json.loads(col.dimension_spec_json)
             except ValueError as e:
-                raise ValueError('Invalid Dimension Spec JSON: ' + str(e))
+                raise ValueError("Invalid Dimension Spec JSON: " + str(e))
             if not isinstance(dimension_spec, dict):
-                raise ValueError('Dimension Spec must be a JSON object')
-            if 'outputName' not in dimension_spec:
-                raise ValueError('Dimension Spec does not contain `outputName`')
-            if 'dimension' not in dimension_spec:
-                raise ValueError('Dimension Spec is missing `dimension`')
+                raise ValueError("Dimension Spec must be a JSON object")
+            if "outputName" not in dimension_spec:
+                raise ValueError("Dimension Spec does not contain `outputName`")
+            if "dimension" not in dimension_spec:
+                raise ValueError("Dimension Spec is missing `dimension`")
             # `outputName` should be the same as the `column_name`
-            if dimension_spec['outputName'] != col.column_name:
+            if dimension_spec["outputName"] != col.column_name:
                 raise ValueError(
-                    '`outputName` [{}] unequal to `column_name` [{}]'
-                    .format(dimension_spec['outputName'], col.column_name))
+                    "`outputName` [{}] unequal to `column_name` [{}]".format(
+                        dimension_spec["outputName"], col.column_name
+                    )
+                )
 
     def post_update(self, col):
         col.refresh_metrics()
@@ -122,60 +137,73 @@ appbuilder.add_view_no_menu(DruidColumnInlineView)
 class DruidMetricInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
     datamodel = SQLAInterface(models.DruidMetric)
 
-    list_title = _('Metrics')
-    show_title = _('Show Druid Metric')
-    add_title = _('Add Druid Metric')
-    edit_title = _('Edit Druid Metric')
+    list_title = _("Metrics")
+    show_title = _("Show Druid Metric")
+    add_title = _("Add Druid Metric")
+    edit_title = _("Edit Druid Metric")
 
-    list_columns = ['metric_name', 'verbose_name', 'metric_type']
+    list_columns = ["metric_name", "verbose_name", "metric_type"]
     edit_columns = [
-        'metric_name', 'description', 'verbose_name', 'metric_type', 'json',
-        'datasource', 'd3format', 'is_restricted', 'warning_text']
+        "metric_name",
+        "description",
+        "verbose_name",
+        "metric_type",
+        "json",
+        "datasource",
+        "d3format",
+        "is_restricted",
+        "warning_text",
+    ]
     add_columns = edit_columns
     page_size = 500
-    validators_columns = {
-        'json': [validate_json],
-    }
+    validators_columns = {"json": [validate_json]}
     description_columns = {
-        'metric_type': utils.markdown(
-            'use `postagg` as the metric type if you are defining a '
-            '[Druid Post Aggregation]'
-            '(http://druid.io/docs/latest/querying/post-aggregations.html)',
-            True),
-        'is_restricted': _('Whether access to this metric is restricted '
-                           'to certain roles. Only roles with the permission '
-                           "'metric access on XXX (the name of this metric)' "
-                           'are allowed to access this metric'),
+        "metric_type": utils.markdown(
+            "use `postagg` as the metric type if you are defining a "
+            "[Druid Post Aggregation]"
+            "(http://druid.io/docs/latest/querying/post-aggregations.html)",
+            True,
+        ),
+        "is_restricted": _(
+            "Whether access to this metric is restricted "
+            "to certain roles. Only roles with the permission "
+            "'metric access on XXX (the name of this metric)' "
+            "are allowed to access this metric"
+        ),
     }
     label_columns = {
-        'metric_name': _('Metric'),
-        'description': _('Description'),
-        'verbose_name': _('Verbose Name'),
-        'metric_type': _('Type'),
-        'json': _('JSON'),
-        'datasource': _('Druid Datasource'),
-        'warning_text': _('Warning Message'),
-        'is_restricted': _('Is Restricted'),
+        "metric_name": _("Metric"),
+        "description": _("Description"),
+        "verbose_name": _("Verbose Name"),
+        "metric_type": _("Type"),
+        "json": _("JSON"),
+        "datasource": _("Druid Datasource"),
+        "warning_text": _("Warning Message"),
+        "is_restricted": _("Is Restricted"),
     }
 
     add_form_extra_fields = {
-        'datasource': QuerySelectField(
-            'Datasource',
+        "datasource": QuerySelectField(
+            "Datasource",
             query_factory=lambda: db.session().query(models.DruidDatasource),
             allow_blank=True,
-            widget=Select2Widget(extra_classes='readonly'),
-        ),
+            widget=Select2Widget(extra_classes="readonly"),
+        )
     }
 
     edit_form_extra_fields = add_form_extra_fields
 
     def post_add(self, metric):
         if metric.is_restricted:
-            security_manager.add_permission_view_menu('metric_access', metric.get_perm())
+            security_manager.add_permission_view_menu(
+                "metric_access", metric.get_perm()
+            )
 
     def post_update(self, metric):
         if metric.is_restricted:
-            security_manager.add_permission_view_menu('metric_access', metric.get_perm())
+            security_manager.add_permission_view_menu(
+                "metric_access", metric.get_perm()
+            )
 
 
 appbuilder.add_view_no_menu(DruidMetricInlineView)
@@ -184,57 +212,63 @@ appbuilder.add_view_no_menu(DruidMetricInlineView)
 class DruidClusterModelView(SupersetModelView, DeleteMixin, YamlExportMixin):  # noqa
     datamodel = SQLAInterface(models.DruidCluster)
 
-    list_title = _('Druid Clusters')
-    show_title = _('Show Druid Cluster')
-    add_title = _('Add Druid Cluster')
-    edit_title = _('Edit Druid Cluster')
+    list_title = _("Druid Clusters")
+    show_title = _("Show Druid Cluster")
+    add_title = _("Add Druid Cluster")
+    edit_title = _("Edit Druid Cluster")
 
     add_columns = [
-        'verbose_name', 'broker_host', 'broker_port',
-        'broker_user', 'broker_pass', 'broker_endpoint',
-        'cache_timeout', 'cluster_name',
+        "verbose_name",
+        "broker_host",
+        "broker_port",
+        "broker_user",
+        "broker_pass",
+        "broker_endpoint",
+        "cache_timeout",
+        "cluster_name",
     ]
     edit_columns = add_columns
-    list_columns = ['cluster_name', 'metadata_last_refreshed']
-    search_columns = ('cluster_name',)
+    list_columns = ["cluster_name", "metadata_last_refreshed"]
+    search_columns = ("cluster_name",)
     label_columns = {
-        'cluster_name': _('Cluster'),
-        'broker_host': _('Broker Host'),
-        'broker_port': _('Broker Port'),
-        'broker_user': _('Broker Username'),
-        'broker_pass': _('Broker Password'),
-        'broker_endpoint': _('Broker Endpoint'),
-        'verbose_name': _('Verbose Name'),
-        'cache_timeout': _('Cache Timeout'),
-        'metadata_last_refreshed': _('Metadata Last Refreshed'),
+        "cluster_name": _("Cluster"),
+        "broker_host": _("Broker Host"),
+        "broker_port": _("Broker Port"),
+        "broker_user": _("Broker Username"),
+        "broker_pass": _("Broker Password"),
+        "broker_endpoint": _("Broker Endpoint"),
+        "verbose_name": _("Verbose Name"),
+        "cache_timeout": _("Cache Timeout"),
+        "metadata_last_refreshed": _("Metadata Last Refreshed"),
     }
     description_columns = {
-        'cache_timeout': _(
-            'Duration (in seconds) of the caching timeout for this cluster. '
-            'A timeout of 0 indicates that the cache never expires. '
-            'Note this defaults to the global timeout if undefined.'),
-        'broker_user': _(
-            'Druid supports basic authentication. See '
-            '[auth](http://druid.io/docs/latest/design/auth.html) and '
-            'druid-basic-security extension',
+        "cache_timeout": _(
+            "Duration (in seconds) of the caching timeout for this cluster. "
+            "A timeout of 0 indicates that the cache never expires. "
+            "Note this defaults to the global timeout if undefined."
+        ),
+        "broker_user": _(
+            "Druid supports basic authentication. See "
+            "[auth](http://druid.io/docs/latest/design/auth.html) and "
+            "druid-basic-security extension"
         ),
-        'broker_pass': _(
-            'Druid supports basic authentication. See '
-            '[auth](http://druid.io/docs/latest/design/auth.html) and '
-            'druid-basic-security extension',
+        "broker_pass": _(
+            "Druid supports basic authentication. See "
+            "[auth](http://druid.io/docs/latest/design/auth.html) and "
+            "druid-basic-security extension"
         ),
     }
 
     edit_form_extra_fields = {
-        'cluster_name': QuerySelectField(
-            'Cluster',
+        "cluster_name": QuerySelectField(
+            "Cluster",
             query_factory=lambda: db.session().query(models.DruidCluster),
-            widget=Select2Widget(extra_classes='readonly'),
-        ),
+            widget=Select2Widget(extra_classes="readonly"),
+        )
     }
 
     def pre_add(self, cluster):
-        security_manager.add_permission_view_menu('database_access', cluster.perm)
+        security_manager.add_permission_view_menu("database_access", cluster.perm)
 
     def pre_update(self, cluster):
         self.pre_add(cluster)
@@ -245,112 +279,118 @@ class DruidClusterModelView(SupersetModelView, DeleteMixin, YamlExportMixin):  #
 
 appbuilder.add_view(
     DruidClusterModelView,
-    name='Druid Clusters',
-    label=__('Druid Clusters'),
-    icon='fa-cubes',
-    category='Sources',
-    category_label=__('Sources'),
-    category_icon='fa-database',
+    name="Druid Clusters",
+    label=__("Druid Clusters"),
+    icon="fa-cubes",
+    category="Sources",
+    category_label=__("Sources"),
+    category_icon="fa-database",
 )
 
 
-class DruidDatasourceModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):  # noqa
+class DruidDatasourceModelView(
+    DatasourceModelView, DeleteMixin, YamlExportMixin
+):  # noqa
     datamodel = SQLAInterface(models.DruidDatasource)
 
-    list_title = _('Druid Datasources')
-    show_title = _('Show Druid Datasource')
-    add_title = _('Add Druid Datasource')
-    edit_title = _('Edit Druid Datasource')
+    list_title = _("Druid Datasources")
+    show_title = _("Show Druid Datasource")
+    add_title = _("Add Druid Datasource")
+    edit_title = _("Edit Druid Datasource")
 
-    list_columns = [
-        'datasource_link', 'cluster', 'changed_by_', 'modified']
-    order_columns = ['datasource_link', 'modified']
+    list_columns = ["datasource_link", "cluster", "changed_by_", "modified"]
+    order_columns = ["datasource_link", "modified"]
     related_views = [DruidColumnInlineView, DruidMetricInlineView]
     edit_columns = [
-        'datasource_name', 'cluster', 'description', 'owners',
-        'is_hidden',
-        'filter_select_enabled', 'fetch_values_from',
-        'default_endpoint', 'offset', 'cache_timeout']
-    search_columns = (
-        'datasource_name', 'cluster', 'description', 'owners',
-    )
+        "datasource_name",
+        "cluster",
+        "description",
+        "owners",
+        "is_hidden",
+        "filter_select_enabled",
+        "fetch_values_from",
+        "default_endpoint",
+        "offset",
+        "cache_timeout",
+    ]
+    search_columns = ("datasource_name", "cluster", "description", "owners")
     add_columns = edit_columns
-    show_columns = add_columns + ['perm', 'slices']
+    show_columns = add_columns + ["perm", "slices"]
     page_size = 500
-    base_order = ('datasource_name', 'asc')
+    base_order = ("datasource_name", "asc")
     description_columns = {
-        'slices': _(
-            'The list of charts associated with this table. By '
-            'altering this datasource, you may change how these associated '
-            'charts behave. '
-            'Also note that charts need to point to a datasource, so '
-            'this form will fail at saving if removing charts from a '
-            'datasource. If you want to change the datasource for a chart, '
-            "overwrite the chart from the 'explore view'"),
-        'offset': _('Timezone offset (in hours) for this datasource'),
-        'description': Markup(
+        "slices": _(
+            "The list of charts associated with this table. By "
+            "altering this datasource, you may change how these associated "
+            "charts behave. "
+            "Also note that charts need to point to a datasource, so "
+            "this form will fail at saving if removing charts from a "
+            "datasource. If you want to change the datasource for a chart, "
+            "overwrite the chart from the 'explore view'"
+        ),
+        "offset": _("Timezone offset (in hours) for this datasource"),
+        "description": Markup(
             'Supports <a href="'
-            'https://daringfireball.net/projects/markdown/">markdown</a>'),
-        'fetch_values_from': _(
-            'Time expression to use as a predicate when retrieving '
-            'distinct values to populate the filter component. '
-            'Only applies when `Enable Filter Select` is on. If '
-            'you enter `7 days ago`, the distinct list of values in '
-            'the filter will be populated based on the distinct value over '
-            'the past week'),
-        'filter_select_enabled': _(
+            'https://daringfireball.net/projects/markdown/">markdown</a>'
+        ),
+        "fetch_values_from": _(
+            "Time expression to use as a predicate when retrieving "
+            "distinct values to populate the filter component. "
+            "Only applies when `Enable Filter Select` is on. If "
+            "you enter `7 days ago`, the distinct list of values in "
+            "the filter will be populated based on the distinct value over "
+            "the past week"
+        ),
+        "filter_select_enabled": _(
             "Whether to populate the filter's dropdown in the explore "
             "view's filter section with a list of distinct values fetched "
-            'from the backend on the fly'),
-        'default_endpoint': _(
-            'Redirects to this endpoint when clicking on the datasource '
-            'from the datasource list'),
-        'cache_timeout': _(
-            'Duration (in seconds) of the caching timeout for this datasource. '
-            'A timeout of 0 indicates that the cache never expires. '
-            'Note this defaults to the cluster timeout if undefined.'),
+            "from the backend on the fly"
+        ),
+        "default_endpoint": _(
+            "Redirects to this endpoint when clicking on the datasource "
+            "from the datasource list"
+        ),
+        "cache_timeout": _(
+            "Duration (in seconds) of the caching timeout for this datasource. "
+            "A timeout of 0 indicates that the cache never expires. "
+            "Note this defaults to the cluster timeout if undefined."
+        ),
     }
-    base_filters = [['id', DatasourceFilter, lambda: []]]
+    base_filters = [["id", DatasourceFilter, lambda: []]]
     label_columns = {
-        'slices': _('Associated Charts'),
-        'datasource_link': _('Data Source'),
-        'cluster': _('Cluster'),
-        'description': _('Description'),
-        'owners': _('Owners'),
-        'is_hidden': _('Is Hidden'),
-        'filter_select_enabled': _('Enable Filter Select'),
-        'default_endpoint': _('Default Endpoint'),
-        'offset': _('Time Offset'),
-        'cache_timeout': _('Cache Timeout'),
-        'datasource_name': _('Datasource Name'),
-        'fetch_values_from': _('Fetch Values From'),
-        'changed_by_': _('Changed By'),
-        'modified': _('Modified'),
+        "slices": _("Associated Charts"),
+        "datasource_link": _("Data Source"),
+        "cluster": _("Cluster"),
+        "description": _("Description"),
+        "owners": _("Owners"),
+        "is_hidden": _("Is Hidden"),
+        "filter_select_enabled": _("Enable Filter Select"),
+        "default_endpoint": _("Default Endpoint"),
+        "offset": _("Time Offset"),
+        "cache_timeout": _("Cache Timeout"),
+        "datasource_name": _("Datasource Name"),
+        "fetch_values_from": _("Fetch Values From"),
+        "changed_by_": _("Changed By"),
+        "modified": _("Modified"),
     }
 
     def pre_add(self, datasource):
         with db.session.no_autoflush:
-            query = (
-                db.session.query(models.DruidDatasource)
-                .filter(models.DruidDatasource.datasource_name ==
-                        datasource.datasource_name,
-                        models.DruidDatasource.cluster_name ==
-                        datasource.cluster.id)
+            query = db.session.query(models.DruidDatasource).filter(
+                models.DruidDatasource.datasource_name == datasource.datasource_name,
+                models.DruidDatasource.cluster_name == datasource.cluster.id,
             )
             if db.session.query(query.exists()).scalar():
-                raise Exception(get_datasource_exist_error_msg(
-                    datasource.full_name))
+                raise Exception(get_datasource_exist_error_msg(datasource.full_name))
 
     def post_add(self, datasource):
         datasource.refresh_metrics()
         security_manager.add_permission_view_menu(
-            'datasource_access',
-            datasource.get_perm(),
+            "datasource_access", datasource.get_perm()
         )
         if datasource.schema:
             security_manager.add_permission_view_menu(
-                'schema_access',
-                datasource.schema_perm,
+                "schema_access", datasource.schema_perm
             )
 
     def post_update(self, datasource):
@@ -362,22 +402,23 @@ class DruidDatasourceModelView(DatasourceModelView, DeleteMixin, YamlExportMixin
 
 appbuilder.add_view(
     DruidDatasourceModelView,
-    'Druid Datasources',
-    label=__('Druid Datasources'),
-    category='Sources',
-    category_label=__('Sources'),
-    icon='fa-cube')
+    "Druid Datasources",
+    label=__("Druid Datasources"),
+    category="Sources",
+    category_label=__("Sources"),
+    icon="fa-cube",
+)
 
 
 class Druid(BaseSupersetView):
     """The base views for Superset!"""
 
     @has_access
-    @expose('/refresh_datasources/')
+    @expose("/refresh_datasources/")
     def refresh_datasources(self, refreshAll=True):
         """endpoint that refreshes druid datasources metadata"""
         session = db.session()
-        DruidCluster = ConnectorRegistry.sources['druid'].cluster_class
+        DruidCluster = ConnectorRegistry.sources["druid"].cluster_class
         for cluster in session.query(DruidCluster).all():
             cluster_name = cluster.cluster_name
             valid_cluster = True
@@ -387,21 +428,25 @@ class Druid(BaseSupersetView):
                 valid_cluster = False
                 flash(
                     "Error while processing cluster '{}'\n{}".format(
-                        cluster_name, utils.error_msg_from_exception(e)),
-                    'danger')
+                        cluster_name, utils.error_msg_from_exception(e)
+                    ),
+                    "danger",
+                )
                 logging.exception(e)
                 pass
             if valid_cluster:
                 cluster.metadata_last_refreshed = datetime.now()
                 flash(
-                    _('Refreshed metadata from cluster [{}]').format(
-                        cluster.cluster_name),
-                    'info')
+                    _("Refreshed metadata from cluster [{}]").format(
+                        cluster.cluster_name
+                    ),
+                    "info",
+                )
         session.commit()
-        return redirect('/druiddatasourcemodelview/list/')
+        return redirect("/druiddatasourcemodelview/list/")
 
     @has_access
-    @expose('/scan_new_datasources/')
+    @expose("/scan_new_datasources/")
     def scan_new_datasources(self):
         """
         Calling this endpoint will cause a scan for new
@@ -413,21 +458,23 @@ class Druid(BaseSupersetView):
 appbuilder.add_view_no_menu(Druid)
 
 appbuilder.add_link(
-    'Scan New Datasources',
-    label=__('Scan New Datasources'),
-    href='/druid/scan_new_datasources/',
-    category='Sources',
-    category_label=__('Sources'),
-    category_icon='fa-database',
-    icon='fa-refresh')
+    "Scan New Datasources",
+    label=__("Scan New Datasources"),
+    href="/druid/scan_new_datasources/",
+    category="Sources",
+    category_label=__("Sources"),
+    category_icon="fa-database",
+    icon="fa-refresh",
+)
 appbuilder.add_link(
-    'Refresh Druid Metadata',
-    label=__('Refresh Druid Metadata'),
-    href='/druid/refresh_datasources/',
-    category='Sources',
-    category_label=__('Sources'),
-    category_icon='fa-database',
-    icon='fa-cog')
+    "Refresh Druid Metadata",
+    label=__("Refresh Druid Metadata"),
+    href="/druid/refresh_datasources/",
+    category="Sources",
+    category_label=__("Sources"),
+    category_icon="fa-database",
+    icon="fa-cog",
+)
 
 
-appbuilder.add_separator('Sources')
+appbuilder.add_separator("Sources")
diff --git a/superset/connectors/sqla/models.py b/superset/connectors/sqla/models.py
index a2e392b..f465b3e 100644
--- a/superset/connectors/sqla/models.py
+++ b/superset/connectors/sqla/models.py
@@ -26,8 +26,19 @@ from flask_babel import lazy_gettext as _
 import pandas as pd
 import sqlalchemy as sa
 from sqlalchemy import (
-    and_, asc, Boolean, Column, DateTime, desc, ForeignKey, Integer, or_,
-    select, String, Table, Text,
+    and_,
+    asc,
+    Boolean,
+    Column,
+    DateTime,
+    desc,
+    ForeignKey,
+    Integer,
+    or_,
+    select,
+    String,
+    Table,
+    Text,
 )
 from sqlalchemy.exc import CompileError
 from sqlalchemy.orm import backref, relationship
@@ -48,8 +59,8 @@ from superset.utils import core as utils, import_datasource
 config = app.config
 metadata = Model.metadata  # pylint: disable=no-member
 
-SqlaQuery = namedtuple('SqlaQuery', ['sqla_query', 'labels_expected'])
-QueryStringExtended = namedtuple('QueryStringExtended', ['sql', 'labels_expected'])
+SqlaQuery = namedtuple("SqlaQuery", ["sqla_query", "labels_expected"])
+QueryStringExtended = namedtuple("QueryStringExtended", ["sql", "labels_expected"])
 
 
 class AnnotationDatasource(BaseDatasource):
@@ -63,25 +74,21 @@ class AnnotationDatasource(BaseDatasource):
         df = None
         error_message = None
         qry = db.session.query(Annotation)
-        qry = qry.filter(Annotation.layer_id == query_obj['filter'][0]['val'])
-        if query_obj['from_dttm']:
-            qry = qry.filter(Annotation.start_dttm >= query_obj['from_dttm'])
-        if query_obj['to_dttm']:
-            qry = qry.filter(Annotation.end_dttm <= query_obj['to_dttm'])
+        qry = qry.filter(Annotation.layer_id == query_obj["filter"][0]["val"])
+        if query_obj["from_dttm"]:
+            qry = qry.filter(Annotation.start_dttm >= query_obj["from_dttm"])
+        if query_obj["to_dttm"]:
+            qry = qry.filter(Annotation.end_dttm <= query_obj["to_dttm"])
         status = utils.QueryStatus.SUCCESS
         try:
             df = pd.read_sql_query(qry.statement, db.engine)
         except Exception as e:
             status = utils.QueryStatus.FAILED
             logging.exception(e)
-            error_message = (
-                utils.error_msg_from_exception(e))
+            error_message = utils.error_msg_from_exception(e)
         return QueryResult(
-            status=status,
-            df=df,
-            duration=0,
-            query='',
-            error_message=error_message)
+            status=status, df=df, duration=0, query="", error_message=error_message
+        )
 
     def get_query_str(self, query_obj):
         raise NotImplementedError()
@@ -94,28 +101,36 @@ class TableColumn(Model, BaseColumn):
 
     """ORM object for table columns, each table can have multiple columns"""
 
-    __tablename__ = 'table_columns'
-    __table_args__ = (UniqueConstraint('table_id', 'column_name'),)
-    table_id = Column(Integer, ForeignKey('tables.id'))
+    __tablename__ = "table_columns"
+    __table_args__ = (UniqueConstraint("table_id", "column_name"),)
+    table_id = Column(Integer, ForeignKey("tables.id"))
     table = relationship(
-        'SqlaTable',
-        backref=backref('columns', cascade='all, delete-orphan'),
-        foreign_keys=[table_id])
+        "SqlaTable",
+        backref=backref("columns", cascade="all, delete-orphan"),
+        foreign_keys=[table_id],
+    )
     is_dttm = Column(Boolean, default=False)
     expression = Column(Text)
     python_date_format = Column(String(255))
     database_expression = Column(String(255))
 
     export_fields = (
-        'table_id', 'column_name', 'verbose_name', 'is_dttm', 'is_active',
-        'type', 'groupby',
-        'filterable', 'expression', 'description', 'python_date_format',
-        'database_expression',
+        "table_id",
+        "column_name",
+        "verbose_name",
+        "is_dttm",
+        "is_active",
+        "type",
+        "groupby",
+        "filterable",
+        "expression",
+        "description",
+        "python_date_format",
+        "database_expression",
     )
 
-    update_from_object_fields = [
-        s for s in export_fields if s not in ('table_id',)]
-    export_parent = 'table'
+    update_from_object_fields = [s for s in export_fields if s not in ("table_id",)]
+    export_parent = "table"
 
     def get_sqla_col(self, label=None):
         label = label or self.column_name
@@ -133,7 +148,7 @@ class TableColumn(Model, BaseColumn):
         return self.table
 
     def get_time_filter(self, start_dttm, end_dttm):
-        col = self.get_sqla_col(label='__time')
+        col = self.get_sqla_col(label="__time")
         l = []  # noqa: E741
         if start_dttm:
             l.append(col >= text(self.dttm_sql_literal(start_dttm)))
@@ -141,8 +156,9 @@ class TableColumn(Model, BaseColumn):
             l.append(col <= text(self.dttm_sql_literal(end_dttm)))
         return and_(*l)
 
-    def get_timestamp_expression(self, time_grain: Optional[str]) \
-            -> Union[TimestampExpression, Label]:
+    def get_timestamp_expression(
+        self, time_grain: Optional[str]
+    ) -> Union[TimestampExpression, Label]:
         """
         Return a SQLAlchemy Core element representation of self to be used in a query.
 
@@ -153,7 +169,7 @@ class TableColumn(Model, BaseColumn):
 
         db = self.table.database
         pdf = self.python_date_format
-        is_epoch = pdf in ('epoch_s', 'epoch_ms')
+        is_epoch = pdf in ("epoch_s", "epoch_ms")
         if not self.expression and not time_grain and not is_epoch:
             sqla_col = column(self.column_name, type_=DateTime)
             return self.table.make_sqla_column_compatible(sqla_col, label)
@@ -167,9 +183,15 @@ class TableColumn(Model, BaseColumn):
     @classmethod
     def import_obj(cls, i_column):
         def lookup_obj(lookup_column):
-            return db.session.query(TableColumn).filter(
-                TableColumn.table_id == lookup_column.table_id,
-                TableColumn.column_name == lookup_column.column_name).first()
+            return (
+                db.session.query(TableColumn)
+                .filter(
+                    TableColumn.table_id == lookup_column.table_id,
+                    TableColumn.column_name == lookup_column.column_name,
+                )
+                .first()
+            )
+
         return import_datasource.import_simple_obj(db.session, i_column, lookup_obj)
 
     def dttm_sql_literal(self, dttm):
@@ -183,39 +205,48 @@ class TableColumn(Model, BaseColumn):
         """
         tf = self.python_date_format
         if self.database_expression:
-            return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
+            return self.database_expression.format(dttm.strftime("%Y-%m-%d %H:%M:%S"))
         elif tf:
             seconds_since_epoch = int(dttm.timestamp())
-            if tf == 'epoch_s':
+            if tf == "epoch_s":
                 return str(seconds_since_epoch)
-            elif tf == 'epoch_ms':
+            elif tf == "epoch_ms":
                 return str(seconds_since_epoch * 1000)
             return "'{}'".format(dttm.strftime(tf))
         else:
-            s = self.table.database.db_engine_spec.convert_dttm(
-                self.type or '', dttm)
-            return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
+            s = self.table.database.db_engine_spec.convert_dttm(self.type or "", dttm)
+            return s or "'{}'".format(dttm.strftime("%Y-%m-%d %H:%M:%S.%f"))
 
 
 class SqlMetric(Model, BaseMetric):
 
     """ORM object for metrics, each table can have multiple metrics"""
 
-    __tablename__ = 'sql_metrics'
-    __table_args__ = (UniqueConstraint('table_id', 'metric_name'),)
-    table_id = Column(Integer, ForeignKey('tables.id'))
+    __tablename__ = "sql_metrics"
+    __table_args__ = (UniqueConstraint("table_id", "metric_name"),)
+    table_id = Column(Integer, ForeignKey("tables.id"))
     table = relationship(
-        'SqlaTable',
-        backref=backref('metrics', cascade='all, delete-orphan'),
-        foreign_keys=[table_id])
+        "SqlaTable",
+        backref=backref("metrics", cascade="all, delete-orphan"),
+        foreign_keys=[table_id],
+    )
     expression = Column(Text, nullable=False)
 
     export_fields = (
-        'metric_name', 'verbose_name', 'metric_type', 'table_id', 'expression',
-        'description', 'is_restricted', 'd3format', 'warning_text')
-    update_from_object_fields = list([
-        s for s in export_fields if s not in ('table_id', )])
-    export_parent = 'table'
+        "metric_name",
+        "verbose_name",
+        "metric_type",
+        "table_id",
+        "expression",
+        "description",
+        "is_restricted",
+        "d3format",
+        "warning_text",
+    )
+    update_from_object_fields = list(
+        [s for s in export_fields if s not in ("table_id",)]
+    )
+    export_parent = "table"
 
     def get_sqla_col(self, label=None):
         label = label or self.metric_name
@@ -225,9 +256,12 @@ class SqlMetric(Model, BaseMetric):
     @property
     def perm(self):
         return (
-            '{parent_name}.[{obj.metric_name}](id:{obj.id})'
-        ).format(obj=self,
-                 parent_name=self.table.full_name) if self.table else None
+            ("{parent_name}.[{obj.metric_name}](id:{obj.id})").format(
+                obj=self, parent_name=self.table.full_name
+            )
+            if self.table
+            else None
+        )
 
     def get_perm(self):
         return self.perm
@@ -235,17 +269,24 @@ class SqlMetric(Model, BaseMetric):
     @classmethod
     def import_obj(cls, i_metric):
         def lookup_obj(lookup_metric):
-            return db.session.query(SqlMetric).filter(
-                SqlMetric.table_id == lookup_metric.table_id,
-                SqlMetric.metric_name == lookup_metric.metric_name).first()
+            return (
+                db.session.query(SqlMetric)
+                .filter(
+                    SqlMetric.table_id == lookup_metric.table_id,
+                    SqlMetric.metric_name == lookup_metric.metric_name,
+                )
+                .first()
+            )
+
         return import_datasource.import_simple_obj(db.session, i_metric, lookup_obj)
 
 
 sqlatable_user = Table(
-    'sqlatable_user', metadata,
-    Column('id', Integer, primary_key=True),
-    Column('user_id', Integer, ForeignKey('ab_user.id')),
-    Column('table_id', Integer, ForeignKey('tables.id')),
+    "sqlatable_user",
+    metadata,
+    Column("id", Integer, primary_key=True),
+    Column("user_id", Integer, ForeignKey("ab_user.id")),
+    Column("table_id", Integer, ForeignKey("tables.id")),
 )
 
 
@@ -253,49 +294,60 @@ class SqlaTable(Model, BaseDatasource):
 
     """An ORM object for SqlAlchemy table references"""
 
-    type = 'table'
-    query_language = 'sql'
+    type = "table"
+    query_language = "sql"
     metric_class = SqlMetric
     column_class = TableColumn
     owner_class = security_manager.user_model
 
-    __tablename__ = 'tables'
-    __table_args__ = (UniqueConstraint('database_id', 'table_name'),)
+    __tablename__ = "tables"
+    __table_args__ = (UniqueConstraint("database_id", "table_name"),)
 
     table_name = Column(String(250))
     main_dttm_col = Column(String(250))
-    database_id = Column(Integer, ForeignKey('dbs.id'), nullable=False)
+    database_id = Column(Integer, ForeignKey("dbs.id"), nullable=False)
     fetch_values_predicate = Column(String(1000))
-    owners = relationship(owner_class, secondary=sqlatable_user, backref='tables')
+    owners = relationship(owner_class, secondary=sqlatable_user, backref="tables")
     database = relationship(
-        'Database',
-        backref=backref('tables', cascade='all, delete-orphan'),
-        foreign_keys=[database_id])
+        "Database",
+        backref=backref("tables", cascade="all, delete-orphan"),
+        foreign_keys=[database_id],
+    )
     schema = Column(String(255))
     sql = Column(Text)
     is_sqllab_view = Column(Boolean, default=False)
     template_params = Column(Text)
 
-    baselink = 'tablemodelview'
+    baselink = "tablemodelview"
 
     export_fields = (
-        'table_name', 'main_dttm_col', 'description', 'default_endpoint',
-        'database_id', 'offset', 'cache_timeout', 'schema',
-        'sql', 'params', 'template_params', 'filter_select_enabled',
-        'fetch_values_predicate',
+        "table_name",
+        "main_dttm_col",
+        "description",
+        "default_endpoint",
+        "database_id",
+        "offset",
+        "cache_timeout",
+        "schema",
+        "sql",
+        "params",
+        "template_params",
+        "filter_select_enabled",
+        "fetch_values_predicate",
     )
     update_from_object_fields = [
-        f for f in export_fields if f not in ('table_name', 'database_id')]
-    export_parent = 'database'
-    export_children = ['metrics', 'columns']
+        f for f in export_fields if f not in ("table_name", "database_id")
+    ]
+    export_parent = "database"
+    export_children = ["metrics", "columns"]
 
     sqla_aggregations = {
-        'COUNT_DISTINCT': lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
-        'COUNT': sa.func.COUNT,
-        'SUM': sa.func.SUM,
-        'AVG': sa.func.AVG,
-        'MIN': sa.func.MIN,
-        'MAX': sa.func.MAX,
+        "COUNT_DISTINCT": lambda column_name: sa.func.COUNT(sa.distinct(column_name)),
+        "COUNT": sa.func.COUNT,
+        "SUM": sa.func.SUM,
+        "AVG": sa.func.AVG,
+        "MIN": sa.func.MIN,
+        "MAX": sa.func.MAX,
     }
 
     def make_sqla_column_compatible(self, sqla_col, label=None):
@@ -343,20 +395,19 @@ class SqlaTable(Model, BaseDatasource):
         return security_manager.get_schema_perm(self.database, self.schema)
 
     def get_perm(self):
-        return (
-            '[{obj.database}].[{obj.table_name}]'
-            '(id:{obj.id})').format(obj=self)
+        return ("[{obj.database}].[{obj.table_name}]" "(id:{obj.id})").format(obj=self)
 
     @property
     def name(self):
         if not self.schema:
             return self.table_name
-        return '{}.{}'.format(self.schema, self.table_name)
+        return "{}.{}".format(self.schema, self.table_name)
 
     @property
     def full_name(self):
         return utils.get_datasource_full_name(
-            self.database, self.table_name, schema=self.schema)
+            self.database, self.table_name, schema=self.schema
+        )
 
     @property
     def dttm_cols(self):
@@ -379,31 +430,30 @@ class SqlaTable(Model, BaseDatasource):
     def html(self):
         t = ((c.column_name, c.type) for c in self.columns)
         df = pd.DataFrame(t)
-        df.columns = ['field', 'type']
+        df.columns = ["field", "type"]
         return df.to_html(
             index=False,
-            classes=(
-                'dataframe table table-striped table-bordered '
-                'table-condensed'))
+            classes=("dataframe table table-striped table-bordered " "table-condensed"),
+        )
 
     @property
     def sql_url(self):
-        return self.database.sql_url + '?table_name=' + str(self.table_name)
+        return self.database.sql_url + "?table_name=" + str(self.table_name)
 
     def external_metadata(self):
         cols = self.database.get_columns(self.table_name, schema=self.schema)
         for col in cols:
             try:
-                col['type'] = str(col['type'])
+                col["type"] = str(col["type"])
             except CompileError:
-                col['type'] = 'UNKNOWN'
+                col["type"] = "UNKNOWN"
         return cols
 
     @property
     def time_column_grains(self):
         return {
-            'time_columns': self.dttm_cols,
-            'time_grains': [grain.name for grain in self.database.grains()],
+            "time_columns": self.dttm_cols,
+            "time_grains": [grain.name for grain in self.database.grains()],
         }
 
     @property
@@ -411,7 +461,8 @@ class SqlaTable(Model, BaseDatasource):
         # show_cols and latest_partition set to false to avoid
         # the expensive cost of inspecting the DB
         return self.database.select_star(
-            self.name, show_cols=False, latest_partition=False)
+            self.name, show_cols=False, latest_partition=False
+        )
 
     def get_col(self, col_name):
         columns = self.columns
@@ -422,15 +473,15 @@ class SqlaTable(Model, BaseDatasource):
     @property
     def data(self):
         d = super(SqlaTable, self).data
-        if self.type == 'table':
+        if self.type == "table":
             grains = self.database.grains() or []
             if grains:
                 grains = [(g.duration, g.name) for g in grains]
-            d['granularity_sqla'] = utils.choicify(self.dttm_cols)
-            d['time_grain_sqla'] = grains
-            d['main_dttm_col'] = self.main_dttm_col
-            d['fetch_values_predicate'] = self.fetch_values_predicate
-            d['template_params'] = self.template_params
+            d["granularity_sqla"] = utils.choicify(self.dttm_cols)
+            d["time_grain_sqla"] = grains
+            d["main_dttm_col"] = self.main_dttm_col
+            d["fetch_values_predicate"] = self.fetch_values_predicate
+            d["template_params"] = self.template_params
         return d
 
     def values_for_column(self, column_name, limit=10000):
@@ -454,9 +505,7 @@ class SqlaTable(Model, BaseDatasource):
             qry = qry.where(tp.process_template(self.fetch_values_predicate))
 
         engine = self.database.get_sqla_engine()
-        sql = '{}'.format(
-            qry.compile(engine, compile_kwargs={'literal_binds': True}),
-        )
+        sql = "{}".format(qry.compile(engine, compile_kwargs={"literal_binds": True}))
         sql = self.mutate_query_from_config(sql)
 
         df = pd.read_sql_query(sql=sql, con=engine)
@@ -466,23 +515,22 @@ class SqlaTable(Model, BaseDatasource):
         """Apply config's SQL_QUERY_MUTATOR
 
         Typically adds comments to the query with context"""
-        SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
+        SQL_QUERY_MUTATOR = config.get("SQL_QUERY_MUTATOR")
         if SQL_QUERY_MUTATOR:
             username = utils.get_username()
             sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database)
         return sql
 
     def get_template_processor(self, **kwargs):
-        return get_template_processor(
-            table=self, database=self.database, **kwargs)
+        return get_template_processor(table=self, database=self.database, **kwargs)
 
     def get_query_str_extended(self, query_obj):
         sqlaq = self.get_sqla_query(**query_obj)
         sql = self.database.compile_sqla_query(sqlaq.sqla_query)
         logging.info(sql)
         sql = sqlparse.format(sql, reindent=True)
-        if query_obj['is_prequery']:
-            query_obj['prequeries'].append(sql)
+        if query_obj["is_prequery"]:
+            query_obj["prequeries"].append(sql)
         sql = self.mutate_query_from_config(sql)
         return QueryStringExtended(labels_expected=sqlaq.labels_expected, sql=sql)
 
@@ -502,7 +550,7 @@ class SqlaTable(Model, BaseDatasource):
             if template_processor:
                 from_sql = template_processor.process_template(from_sql)
             from_sql = sqlparse.format(from_sql, strip_comments=True)
-            return TextAsFrom(sa.text(from_sql), []).alias('expr_qry')
+            return TextAsFrom(sa.text(from_sql), []).alias("expr_qry")
         return self.get_sqla_table()
 
     def adhoc_metric_to_sqla(self, metric, cols):
@@ -514,52 +562,54 @@ class SqlaTable(Model, BaseDatasource):
         :returns: The metric defined as a sqlalchemy column
         :rtype: sqlalchemy.sql.column
         """
-        expression_type = metric.get('expressionType')
+        expression_type = metric.get("expressionType")
         label = utils.get_metric_name(metric)
 
-        if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']:
-            column_name = metric.get('column').get('column_name')
+        if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES["SIMPLE"]:
+            column_name = metric.get("column").get("column_name")
             table_column = cols.get(column_name)
             if table_column:
                 sqla_column = table_column.get_sqla_col()
             else:
                 sqla_column = column(column_name)
-            sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column)
-        elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']:
-            sqla_metric = literal_column(metric.get('sqlExpression'))
+            sqla_metric = self.sqla_aggregations[metric.get("aggregate")](sqla_column)
+        elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES["SQL"]:
+            sqla_metric = literal_column(metric.get("sqlExpression"))
         else:
             return None
 
         return self.make_sqla_column_compatible(sqla_metric, label)
 
     def get_sqla_query(  # sqla
-            self,
-            groupby, metrics,
-            granularity,
-            from_dttm, to_dttm,
-            filter=None,  # noqa
-            is_timeseries=True,
-            timeseries_limit=15,
-            timeseries_limit_metric=None,
-            row_limit=None,
-            inner_from_dttm=None,
-            inner_to_dttm=None,
-            orderby=None,
-            extras=None,
-            columns=None,
-            order_desc=True,
-            prequeries=None,
-            is_prequery=False,
+        self,
+        groupby,
+        metrics,
+        granularity,
+        from_dttm,
+        to_dttm,
+        filter=None,  # noqa
+        is_timeseries=True,
+        timeseries_limit=15,
+        timeseries_limit_metric=None,
+        row_limit=None,
+        inner_from_dttm=None,
+        inner_to_dttm=None,
+        orderby=None,
+        extras=None,
+        columns=None,
+        order_desc=True,
+        prequeries=None,
+        is_prequery=False,
     ):
         """Querying any sqla table from this common interface"""
         template_kwargs = {
-            'from_dttm': from_dttm,
-            'groupby': groupby,
-            'metrics': metrics,
-            'row_limit': row_limit,
-            'to_dttm': to_dttm,
-            'filter': filter,
-            'columns': {col.column_name: col for col in self.columns},
+            "from_dttm": from_dttm,
+            "groupby": groupby,
+            "metrics": metrics,
+            "row_limit": row_limit,
+            "to_dttm": to_dttm,
+            "filter": filter,
+            "columns": {col.column_name: col for col in self.columns},
         }
         template_kwargs.update(self.template_params_dict)
         template_processor = self.get_template_processor(**template_kwargs)
@@ -578,11 +628,14 @@ class SqlaTable(Model, BaseDatasource):
         metrics_dict = {m.metric_name: m for m in self.metrics}
 
         if not granularity and is_timeseries:
-            raise Exception(_(
-                'Datetime column not provided as part table configuration '
-                'and is required by this type of chart'))
+            raise Exception(
+                _(
+                    "Datetime column not provided as part table configuration "
+                    "and is required by this type of chart"
+                )
+            )
         if not groupby and not metrics and not columns:
-            raise Exception(_('Empty query?'))
+            raise Exception(_("Empty query?"))
         metrics_exprs = []
         for m in metrics:
             if utils.is_adhoc_metric(m):
@@ -594,7 +647,7 @@ class SqlaTable(Model, BaseDatasource):
         if metrics_exprs:
             main_metric_expr = metrics_exprs[0]
         else:
-            main_metric_expr, label = literal_column('COUNT(*)'), 'ccount'
+            main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
             main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
 
         select_exprs = []
@@ -606,7 +659,7 @@ class SqlaTable(Model, BaseDatasource):
                 if s in cols:
                     outer = cols[s].get_sqla_col()
                 else:
-                    outer = literal_column(f'({s})')
+                    outer = literal_column(f"({s})")
                     outer = self.make_sqla_column_compatible(outer, s)
 
                 groupby_exprs_sans_timestamp[outer.name] = outer
@@ -614,14 +667,16 @@ class SqlaTable(Model, BaseDatasource):
         elif columns:
             for s in columns:
                 select_exprs.append(
-                    cols[s].get_sqla_col() if s in cols else
-                    self.make_sqla_column_compatible(literal_column(s)))
+                    cols[s].get_sqla_col()
+                    if s in cols
+                    else self.make_sqla_column_compatible(literal_column(s))
+                )
             metrics_exprs = []
 
         groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
         if granularity:
             dttm_col = cols[granularity]
-            time_grain = extras.get('time_grain_sqla')
+            time_grain = extras.get("time_grain_sqla")
             time_filters = []
 
             if is_timeseries:
@@ -630,11 +685,14 @@ class SqlaTable(Model, BaseDatasource):
                 groupby_exprs_with_timestamp[timestamp.name] = timestamp
 
             # Use main dttm column to support index with secondary dttm columns
-            if db_engine_spec.time_secondary_columns and \
-                    self.main_dttm_col in self.dttm_cols and \
-                    self.main_dttm_col != dttm_col.column_name:
-                time_filters.append(cols[self.main_dttm_col].
-                                    get_time_filter(from_dttm, to_dttm))
+            if (
+                db_engine_spec.time_secondary_columns
+                and self.main_dttm_col in self.dttm_cols
+                and self.main_dttm_col != dttm_col.column_name
+            ):
+                time_filters.append(
+                    cols[self.main_dttm_col].get_time_filter(from_dttm, to_dttm)
+                )
             time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))
 
         select_exprs += metrics_exprs
@@ -642,8 +700,8 @@ class SqlaTable(Model, BaseDatasource):
         labels_expected = [c._df_label_expected for c in select_exprs]
 
         select_exprs = db_engine_spec.make_select_compatible(
-            groupby_exprs_with_timestamp.values(),
-            select_exprs)
+            groupby_exprs_with_timestamp.values(), select_exprs
+        )
         qry = sa.select(select_exprs)
 
         tbl = self.get_from_clause(template_processor)
@@ -654,55 +712,55 @@ class SqlaTable(Model, BaseDatasource):
         where_clause_and = []
         having_clause_and = []
         for flt in filter:
-            if not all([flt.get(s) for s in ['col', 'op']]):
+            if not all([flt.get(s) for s in ["col", "op"]]):
                 continue
-            col = flt['col']
-            op = flt['op']
+            col = flt["col"]
+            op = flt["op"]
             col_obj = cols.get(col)
             if col_obj:
-                is_list_target = op in ('in', 'not in')
+                is_list_target = op in ("in", "not in")
                 eq = self.filter_values_handler(
-                    flt.get('val'),
+                    flt.get("val"),
                     target_column_is_numeric=col_obj.is_num,
-                    is_list_target=is_list_target)
-                if op in ('in', 'not in'):
+                    is_list_target=is_list_target,
+                )
+                if op in ("in", "not in"):
                     cond = col_obj.get_sqla_col().in_(eq)
-                    if '<NULL>' in eq:
+                    if "<NULL>" in eq:
                         cond = or_(cond, col_obj.get_sqla_col() == None)  # noqa
-                    if op == 'not in':
+                    if op == "not in":
                         cond = ~cond
                     where_clause_and.append(cond)
                 else:
                     if col_obj.is_num:
-                        eq = utils.string_to_num(flt['val'])
-                    if op == '==':
+                        eq = utils.string_to_num(flt["val"])
+                    if op == "==":
                         where_clause_and.append(col_obj.get_sqla_col() == eq)
-                    elif op == '!=':
+                    elif op == "!=":
                         where_clause_and.append(col_obj.get_sqla_col() != eq)
-                    elif op == '>':
+                    elif op == ">":
                         where_clause_and.append(col_obj.get_sqla_col() > eq)
-                    elif op == '<':
+                    elif op == "<":
                         where_clause_and.append(col_obj.get_sqla_col() < eq)
-                    elif op == '>=':
+                    elif op == ">=":
                         where_clause_and.append(col_obj.get_sqla_col() >= eq)
-                    elif op == '<=':
+                    elif op == "<=":
                         where_clause_and.append(col_obj.get_sqla_col() <= eq)
-                    elif op == 'LIKE':
+                    elif op == "LIKE":
                         where_clause_and.append(col_obj.get_sqla_col().like(eq))
-                    elif op == 'IS NULL':
+                    elif op == "IS NULL":
                         where_clause_and.append(col_obj.get_sqla_col() == None)  # noqa
-                    elif op == 'IS NOT NULL':
-                        where_clause_and.append(
-                            col_obj.get_sqla_col() != None)  # noqa
+                    elif op == "IS NOT NULL":
+                        where_clause_and.append(col_obj.get_sqla_col() != None)  # noqa
         if extras:
-            where = extras.get('where')
+            where = extras.get("where")
             if where:
                 where = template_processor.process_template(where)
-                where_clause_and += [sa.text('({})'.format(where))]
-            having = extras.get('having')
+                where_clause_and += [sa.text("({})".format(where))]
+            having = extras.get("having")
             if having:
                 having = template_processor.process_template(having)
-                having_clause_and += [sa.text('({})'.format(having))]
+                having_clause_and += [sa.text("({})".format(having))]
         if granularity:
             qry = qry.where(and_(*(time_filters + where_clause_and)))
         else:
@@ -721,26 +779,25 @@ class SqlaTable(Model, BaseDatasource):
         if row_limit:
             qry = qry.limit(row_limit)
 
-        if is_timeseries and \
-                timeseries_limit and groupby and not time_groupby_inline:
+        if is_timeseries and timeseries_limit and groupby and not time_groupby_inline:
             if self.database.db_engine_spec.inner_joins:
                 # some sql dialects require for order by expressions
                 # to also be in the select clause -- others, e.g. vertica,
                 # require a unique inner alias
                 inner_main_metric_expr = self.make_sqla_column_compatible(
-                    main_metric_expr, 'mme_inner__')
+                    main_metric_expr, "mme_inner__"
+                )
                 inner_groupby_exprs = []
                 inner_select_exprs = []
                 for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
-                    inner = self.make_sqla_column_compatible(gby_obj, gby_name + '__')
+                    inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
                     inner_groupby_exprs.append(inner)
                     inner_select_exprs.append(inner)
 
                 inner_select_exprs += [inner_main_metric_expr]
                 subq = select(inner_select_exprs).select_from(tbl)
                 inner_time_filter = dttm_col.get_time_filter(
-                    inner_from_dttm or from_dttm,
-                    inner_to_dttm or to_dttm,
+                    inner_from_dttm or from_dttm, inner_to_dttm or to_dttm
                 )
                 subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
                 subq = subq.group_by(*inner_groupby_exprs)
@@ -748,9 +805,7 @@ class SqlaTable(Model, BaseDatasource):
                 ob = inner_main_metric_expr
                 if timeseries_limit_metric:
                     ob = self._get_timeseries_orderby(
-                        timeseries_limit_metric,
-                        metrics_dict,
-                        cols,
+                        timeseries_limit_metric, metrics_dict, cols
                     )
                 direction = desc if order_desc else asc
                 subq = subq.order_by(direction(ob))
@@ -761,61 +816,63 @@ class SqlaTable(Model, BaseDatasource):
                     # in this case the column name, not the alias, needs to be
                     # conditionally mutated, as it refers to the column alias in
                     # the inner query
-                    col_name = db_engine_spec.make_label_compatible(gby_name + '__')
+                    col_name = db_engine_spec.make_label_compatible(gby_name + "__")
                     on_clause.append(gby_obj == column(col_name))
 
                 tbl = tbl.join(subq.alias(), and_(*on_clause))
             else:
                 if timeseries_limit_metric:
-                    orderby = [(
-                        self._get_timeseries_orderby(
-                            timeseries_limit_metric,
-                            metrics_dict,
-                            cols,
-                        ),
-                        False,
-                    )]
+                    orderby = [
+                        (
+                            self._get_timeseries_orderby(
+                                timeseries_limit_metric, metrics_dict, cols
+                            ),
+                            False,
+                        )
+                    ]
 
                 # run subquery to get top groups
                 subquery_obj = {
-                    'prequeries': prequeries,
-                    'is_prequery': True,
-                    'is_timeseries': False,
-                    'row_limit': timeseries_limit,
-                    'groupby': groupby,
-                    'metrics': metrics,
-                    'granularity': granularity,
-                    'from_dttm': inner_from_dttm or from_dttm,
-                    'to_dttm': inner_to_dttm or to_dttm,
-                    'filter': filter,
-                    'orderby': orderby,
-                    'extras': extras,
-                    'columns': columns,
-                    'order_desc': True,
+                    "prequeries": prequeries,
+                    "is_prequery": True,
+                    "is_timeseries": False,
+                    "row_limit": timeseries_limit,
+                    "groupby": groupby,
+                    "metrics": metrics,
+                    "granularity": granularity,
+                    "from_dttm": inner_from_dttm or from_dttm,
+                    "to_dttm": inner_to_dttm or to_dttm,
+                    "filter": filter,
+                    "orderby": orderby,
+                    "extras": extras,
+                    "columns": columns,
+                    "order_desc": True,
                 }
                 result = self.query(subquery_obj)
                 dimensions = [
-                    c for c in result.df.columns
+                    c
+                    for c in result.df.columns
                     if c not in metrics and c in groupby_exprs_sans_timestamp
                 ]
-                top_groups = self._get_top_groups(result.df,
-                                                  dimensions,
-                                                  groupby_exprs_sans_timestamp)
+                top_groups = self._get_top_groups(
+                    result.df, dimensions, groupby_exprs_sans_timestamp
+                )
                 qry = qry.where(top_groups)
 
-        return SqlaQuery(sqla_query=qry.select_from(tbl),
-                         labels_expected=labels_expected)
+        return SqlaQuery(
+            sqla_query=qry.select_from(tbl), labels_expected=labels_expected
+        )
 
     def _get_timeseries_orderby(self, timeseries_limit_metric, metrics_dict, cols):
         if utils.is_adhoc_metric(timeseries_limit_metric):
             ob = self.adhoc_metric_to_sqla(timeseries_limit_metric, cols)
         elif timeseries_limit_metric in metrics_dict:
-            timeseries_limit_metric = metrics_dict.get(
-                timeseries_limit_metric,
-            )
+            timeseries_limit_metric = metrics_dict.get(timeseries_limit_metric)
             ob = timeseries_limit_metric.get_sqla_col()
         else:
-            raise Exception(_("Metric '{}' is not valid".format(timeseries_limit_metric)))
+            raise Exception(
+                _("Metric '{}' is not valid".format(timeseries_limit_metric))
+            )
 
         return ob
 
@@ -840,8 +897,10 @@ class SqlaTable(Model, BaseDatasource):
             labels_expected = query_str_ext.labels_expected
             if df is not None and not df.empty:
                 if len(df.columns) != len(labels_expected):
-                    raise Exception(f'For {sql}, df.columns: {df.columns}'
-                                    f' differs from {labels_expected}')
+                    raise Exception(
+                        f"For {sql}, df.columns: {df.columns}"
+                        f" differs from {labels_expected}"
+                    )
                 else:
                     df.columns = labels_expected
             return df
@@ -851,22 +910,23 @@ class SqlaTable(Model, BaseDatasource):
         except Exception as e:
             df = None
             status = utils.QueryStatus.FAILED
-            logging.exception(f'Query {sql} on schema {self.schema} failed')
+            logging.exception(f"Query {sql} on schema {self.schema} failed")
             db_engine_spec = self.database.db_engine_spec
             error_message = db_engine_spec.extract_error_message(e)
 
         # if this is a main query with prequeries, combine them together
-        if not query_obj['is_prequery']:
-            query_obj['prequeries'].append(sql)
-            sql = ';\n\n'.join(query_obj['prequeries'])
-        sql += ';'
+        if not query_obj["is_prequery"]:
+            query_obj["prequeries"].append(sql)
+            sql = ";\n\n".join(query_obj["prequeries"])
+        sql += ";"
 
         return QueryResult(
             status=status,
             df=df,
             duration=datetime.now() - qry_start_dttm,
             query=sql,
-            error_message=error_message)
+            error_message=error_message,
+        )
 
     def get_sqla_table_object(self):
         return self.database.get_table(self.table_name, schema=self.schema)
@@ -877,9 +937,12 @@ class SqlaTable(Model, BaseDatasource):
             table = self.get_sqla_table_object()
         except Exception as e:
             logging.exception(e)
-            raise Exception(_(
-                "Table [{}] doesn't seem to exist in the specified database, "
-                "couldn't fetch column information").format(self.table_name))
+            raise Exception(
+                _(
+                    "Table [{}] doesn't seem to exist in the specified database, "
+                    "couldn't fetch column information"
+                ).format(self.table_name)
+            )
 
         M = SqlMetric  # noqa
         metrics = []
@@ -889,17 +952,18 @@ class SqlaTable(Model, BaseDatasource):
         dbcols = (
             db.session.query(TableColumn)
             .filter(TableColumn.table == self)
-            .filter(or_(TableColumn.column_name == col.name
-                        for col in table.columns)))
+            .filter(or_(TableColumn.column_name == col.name for col in table.columns))
+        )
         dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}
 
         for col in table.columns:
             try:
-                datatype = db_engine_spec.column_datatype_to_string(col.type, db_dialect)
+                datatype = db_engine_spec.column_datatype_to_string(
+                    col.type, db_dialect
+                )
             except Exception as e:
-                datatype = 'UNKNOWN'
-                logging.error(
-                    'Unrecognized data type in {}.{}'.format(table, col.name))
+                datatype = "UNKNOWN"
+                logging.error("Unrecognized data type in {}.{}".format(table, col.name))
                 logging.exception(e)
             dbcol = dbcols.get(col.name, None)
             if not dbcol:
@@ -916,12 +980,14 @@ class SqlaTable(Model, BaseDatasource):
             if not any_date_col and dbcol.is_time:
                 any_date_col = col.name
 
-        metrics.append(M(
-            metric_name='count',
-            verbose_name='COUNT(*)',
-            metric_type='count',
-            expression='COUNT(*)',
-        ))
+        metrics.append(
+            M(
+                metric_name="count",
+                verbose_name="COUNT(*)",
+                metric_type="count",
+                expression="COUNT(*)",
+            )
+        )
         if not self.main_dttm_col:
             self.main_dttm_col = any_date_col
         self.add_missing_metrics(metrics)
@@ -936,23 +1002,32 @@ class SqlaTable(Model, BaseDatasource):
          This function can be used to import/export dashboards between multiple
          superset instances. Audit metadata isn't copies over.
         """
+
         def lookup_sqlatable(table):
-            return db.session.query(SqlaTable).join(Database).filter(
-                SqlaTable.table_name == table.table_name,
-                SqlaTable.schema == table.schema,
-                Database.id == table.database_id,
-            ).first()
+            return (
+                db.session.query(SqlaTable)
+                .join(Database)
+                .filter(
+                    SqlaTable.table_name == table.table_name,
+                    SqlaTable.schema == table.schema,
+                    Database.id == table.database_id,
+                )
+                .first()
+            )
 
         def lookup_database(table):
-            return db.session.query(Database).filter_by(
-                database_name=table.params_dict['database_name']).one()
+            return (
+                db.session.query(Database)
+                .filter_by(database_name=table.params_dict["database_name"])
+                .one()
+            )
+
         return import_datasource.import_datasource(
-            db.session, i_datasource, lookup_database, lookup_sqlatable,
-            import_time)
+            db.session, i_datasource, lookup_database, lookup_sqlatable, import_time
+        )
 
     @classmethod
-    def query_datasources_by_name(
-            cls, session, database, datasource_name, schema=None):
+    def query_datasources_by_name(cls, session, database, datasource_name, schema=None):
         query = (
             session.query(cls)
             .filter_by(database_id=database.id)
@@ -967,5 +1042,5 @@ class SqlaTable(Model, BaseDatasource):
         return qry.filter_by(is_sqllab_view=False)
 
 
-sa.event.listen(SqlaTable, 'after_insert', security_manager.set_perm)
-sa.event.listen(SqlaTable, 'after_update', security_manager.set_perm)
+sa.event.listen(SqlaTable, "after_insert", security_manager.set_perm)
+sa.event.listen(SqlaTable, "after_update", security_manager.set_perm)
diff --git a/superset/connectors/sqla/views.py b/superset/connectors/sqla/views.py
index e6c0029..90ac4e8 100644
--- a/superset/connectors/sqla/views.py
+++ b/superset/connectors/sqla/views.py
@@ -32,8 +32,12 @@ from superset import appbuilder, db, security_manager
 from superset.connectors.base.views import DatasourceModelView
 from superset.utils import core as utils
 from superset.views.base import (
-    DatasourceFilter, DeleteMixin, get_datasource_exist_error_msg,
-    ListWidgetWithCheckboxes, SupersetModelView, YamlExportMixin,
+    DatasourceFilter,
+    DeleteMixin,
+    get_datasource_exist_error_msg,
+    ListWidgetWithCheckboxes,
+    SupersetModelView,
+    YamlExportMixin,
 )
 from . import models
 
@@ -43,79 +47,103 @@ logger = logging.getLogger(__name__)
 class TableColumnInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
     datamodel = SQLAInterface(models.TableColumn)
 
-    list_title = _('Columns')
-    show_title = _('Show Column')
-    add_title = _('Add Column')
-    edit_title = _('Edit Column')
+    list_title = _("Columns")
+    show_title = _("Show Column")
+    add_title = _("Add Column")
+    edit_title = _("Edit Column")
 
     can_delete = False
     list_widget = ListWidgetWithCheckboxes
     edit_columns = [
-        'column_name', 'verbose_name', 'description',
-        'type', 'groupby', 'filterable',
-        'table', 'expression',
-        'is_dttm', 'python_date_format', 'database_expression']
+        "column_name",
+        "verbose_name",
+        "description",
+        "type",
+        "groupby",
+        "filterable",
+        "table",
+        "expression",
+        "is_dttm",
+        "python_date_format",
+        "database_expression",
+    ]
     add_columns = edit_columns
     list_columns = [
-        'column_name', 'verbose_name', 'type', 'groupby', 'filterable',
-        'is_dttm']
+        "column_name",
+        "verbose_name",
+        "type",
+        "groupby",
+        "filterable",
+        "is_dttm",
+    ]
     page_size = 500
     description_columns = {
-        'is_dttm': _(
-            'Whether to make this column available as a '
-            '[Time Granularity] option, column has to be DATETIME or '
-            'DATETIME-like'),
-        'filterable': _(
-            'Whether this column is exposed in the `Filters` section '
-            'of the explore view.'),
-        'type': _(
-            'The data type that was inferred by the database. '
-            'It may be necessary to input a type manually for '
-            'expression-defined columns in some cases. In most case '
-            'users should not need to alter this.'),
-        'expression': utils.markdown(
-            'a valid, *non-aggregating* SQL expression as supported by the '
-            'underlying backend. Example: `substr(name, 1, 1)`', True),
-        'python_date_format': utils.markdown(Markup(
-            'The pattern of timestamp format, use '
-            '<a href="https://docs.python.org/2/library/'
-            'datetime.html#strftime-strptime-behavior">'
-            'python datetime string pattern</a> '
-            'expression. If time is stored in epoch '
-            'format, put `epoch_s` or `epoch_ms`. Leave `Database Expression` '
-            'below empty if timestamp is stored in '
-            'String or Integer(epoch) type'), True),
-        'database_expression': utils.markdown(
-            'The database expression to cast internal datetime '
-            'constants to database date/timestamp type according to the DBAPI. '
-            'The expression should follow the pattern of '
-            '%Y-%m-%d %H:%M:%S, based on different DBAPI. '
-            'The string should be a python string formatter \n'
+        "is_dttm": _(
+            "Whether to make this column available as a "
+            "[Time Granularity] option, column has to be DATETIME or "
+            "DATETIME-like"
+        ),
+        "filterable": _(
+            "Whether this column is exposed in the `Filters` section "
+            "of the explore view."
+        ),
+        "type": _(
+            "The data type that was inferred by the database. "
+            "It may be necessary to input a type manually for "
+            "expression-defined columns in some cases. In most case "
+            "users should not need to alter this."
+        ),
+        "expression": utils.markdown(
+            "a valid, *non-aggregating* SQL expression as supported by the "
+            "underlying backend. Example: `substr(name, 1, 1)`",
+            True,
+        ),
+        "python_date_format": utils.markdown(
+            Markup(
+                "The pattern of timestamp format, use "
+                '<a href="https://docs.python.org/2/library/'
+                'datetime.html#strftime-strptime-behavior">'
+                "python datetime string pattern</a> "
+                "expression. If time is stored in epoch "
+                "format, put `epoch_s` or `epoch_ms`. Leave `Database Expression` "
+                "below empty if timestamp is stored in "
+                "String or Integer(epoch) type"
+            ),
+            True,
+        ),
+        "database_expression": utils.markdown(
+            "The database expression to cast internal datetime "
+            "constants to database date/timestamp type according to the DBAPI. "
+            "The expression should follow the pattern of "
+            "%Y-%m-%d %H:%M:%S, based on different DBAPI. "
+            "The string should be a python string formatter \n"
             "`Ex: TO_DATE('{}', 'YYYY-MM-DD HH24:MI:SS')` for Oracle "
-            'Superset uses default expression based on DB URI if this '
-            'field is blank.', True),
+            "Superset uses default expression based on DB URI if this "
+            "field is blank.",
+            True,
+        ),
     }
     label_columns = {
-        'column_name': _('Column'),
-        'verbose_name': _('Verbose Name'),
-        'description': _('Description'),
-        'groupby': _('Groupable'),
-        'filterable': _('Filterable'),
-        'table': _('Table'),
-        'expression': _('Expression'),
-        'is_dttm': _('Is temporal'),
-        'python_date_format': _('Datetime Format'),
-        'database_expression': _('Database Expression'),
-        'type': _('Type'),
+        "column_name": _("Column"),
+        "verbose_name": _("Verbose Name"),
+        "description": _("Description"),
+        "groupby": _("Groupable"),
+        "filterable": _("Filterable"),
+        "table": _("Table"),
+        "expression": _("Expression"),
+        "is_dttm": _("Is temporal"),
+        "python_date_format": _("Datetime Format"),
+        "database_expression": _("Database Expression"),
+        "type": _("Type"),
     }
 
     add_form_extra_fields = {
-        'table': QuerySelectField(
-            'Table',
+        "table": QuerySelectField(
+            "Table",
             query_factory=lambda: db.session().query(models.SqlaTable),
             allow_blank=True,
-            widget=Select2Widget(extra_classes='readonly'),
-        ),
+            widget=Select2Widget(extra_classes="readonly"),
+        )
     }
 
     edit_form_extra_fields = add_form_extra_fields
@@ -127,63 +155,80 @@ appbuilder.add_view_no_menu(TableColumnInlineView)
 class SqlMetricInlineView(CompactCRUDMixin, SupersetModelView):  # noqa
     datamodel = SQLAInterface(models.SqlMetric)
 
-    list_title = _('Metrics')
-    show_title = _('Show Metric')
-    add_title = _('Add Metric')
-    edit_title = _('Edit Metric')
+    list_title = _("Metrics")
+    show_title = _("Show Metric")
+    add_title = _("Add Metric")
+    edit_title = _("Edit Metric")
 
-    list_columns = ['metric_name', 'verbose_name', 'metric_type']
+    list_columns = ["metric_name", "verbose_name", "metric_type"]
     edit_columns = [
-        'metric_name', 'description', 'verbose_name', 'metric_type',
-        'expression', 'table', 'd3format', 'is_restricted', 'warning_text']
+        "metric_name",
+        "description",
+        "verbose_name",
+        "metric_type",
+        "expression",
+        "table",
+        "d3format",
+        "is_restricted",
+        "warning_text",
+    ]
     description_columns = {
-        'expression': utils.markdown(
-            'a valid, *aggregating* SQL expression as supported by the '
-            'underlying backend. Example: `count(DISTINCT userid)`', True),
-        'is_restricted': _('Whether access to this metric is restricted '
-                           'to certain roles. Only roles with the permission '
-                           "'metric access on XXX (the name of this metric)' "
-                           'are allowed to access this metric'),
-        'd3format': utils.markdown(
-            'd3 formatting string as defined [here]'
-            '(https://github.com/d3/d3-format/blob/master/README.md#format). '
-            'For instance, this default formatting applies in the Table '
-            'visualization and allow for different metric to use different '
-            'formats', True,
+        "expression": utils.markdown(
+            "a valid, *aggregating* SQL expression as supported by the "
+            "underlying backend. Example: `count(DISTINCT userid)`",
+            True,
+        ),
+        "is_restricted": _(
+            "Whether access to this metric is restricted "
+            "to certain roles. Only roles with the permission "
+            "'metric access on XXX (the name of this metric)' "
+            "are allowed to access this metric"
+        ),
+        "d3format": utils.markdown(
+            "d3 formatting string as defined [here]"
+            "(https://github.com/d3/d3-format/blob/master/README.md#format). "
+            "For instance, this default formatting applies in the Table "
+            "visualization and allow for different metric to use different "
+            "formats",
+            True,
         ),
     }
     add_columns = edit_columns
     page_size = 500
     label_columns = {
-        'metric_name': _('Metric'),
-        'description': _('Description'),
-        'verbose_name': _('Verbose Name'),
-        'metric_type': _('Type'),
-        'expression': _('SQL Expression'),
-        'table': _('Table'),
-        'd3format': _('D3 Format'),
-        'is_restricted': _('Is Restricted'),
-        'warning_text': _('Warning Message'),
+        "metric_name": _("Metric"),
+        "description": _("Description"),
+        "verbose_name": _("Verbose Name"),
+        "metric_type": _("Type"),
+        "expression": _("SQL Expression"),
+        "table": _("Table"),
+        "d3format": _("D3 Format"),
+        "is_restricted": _("Is Restricted"),
+        "warning_text": _("Warning Message"),
     }
 
     add_form_extra_fields = {
-        'table': QuerySelectField(
-            'Table',
+        "table": QuerySelectField(
+            "Table",
             query_factory=lambda: db.session().query(models.SqlaTable),
             allow_blank=True,
-            widget=Select2Widget(extra_classes='readonly'),
-        ),
+            widget=Select2Widget(extra_classes="readonly"),
+        )
     }
 
     edit_form_extra_fields = add_form_extra_fields
 
     def post_add(self, metric):
         if metric.is_restricted:
-            security_manager.add_permission_view_menu('metric_access', metric.get_perm())
+            security_manager.add_permission_view_menu(
+                "metric_access", metric.get_perm()
+            )
 
     def post_update(self, metric):
         if metric.is_restricted:
-            security_manager.add_permission_view_menu('metric_access', metric.get_perm())
+            security_manager.add_permission_view_menu(
+                "metric_access", metric.get_perm()
+            )
 
 
 appbuilder.add_view_no_menu(SqlMetricInlineView)
@@ -192,104 +237,114 @@ appbuilder.add_view_no_menu(SqlMetricInlineView)
 class TableModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):  # noqa
     datamodel = SQLAInterface(models.SqlaTable)
 
-    list_title = _('Tables')
-    show_title = _('Show Table')
-    add_title = _('Import a table definition')
-    edit_title = _('Edit Table')
+    list_title = _("Tables")
+    show_title = _("Show Table")
+    add_title = _("Import a table definition")
+    edit_title = _("Edit Table")
 
-    list_columns = [
-        'link', 'database_name',
-        'changed_by_', 'modified']
-    order_columns = ['modified']
-    add_columns = ['database', 'schema', 'table_name']
+    list_columns = ["link", "database_name", "changed_by_", "modified"]
+    order_columns = ["modified"]
+    add_columns = ["database", "schema", "table_name"]
     edit_columns = [
-        'table_name', 'sql', 'filter_select_enabled',
-        'fetch_values_predicate', 'database', 'schema',
-        'description', 'owners',
-        'main_dttm_col', 'default_endpoint', 'offset', 'cache_timeout',
-        'is_sqllab_view', 'template_params',
+        "table_name",
+        "sql",
+        "filter_select_enabled",
+        "fetch_values_predicate",
+        "database",
+        "schema",
+        "description",
+        "owners",
+        "main_dttm_col",
+        "default_endpoint",
+        "offset",
+        "cache_timeout",
+        "is_sqllab_view",
+        "template_params",
     ]
-    base_filters = [['id', DatasourceFilter, lambda: []]]
-    show_columns = edit_columns + ['perm', 'slices']
+    base_filters = [["id", DatasourceFilter, lambda: []]]
+    show_columns = edit_columns + ["perm", "slices"]
     related_views = [TableColumnInlineView, SqlMetricInlineView]
-    base_order = ('changed_on', 'desc')
-    search_columns = (
-        'database', 'schema', 'table_name', 'owners', 'is_sqllab_view',
-    )
+    base_order = ("changed_on", "desc")
+    search_columns = ("database", "schema", "table_name", "owners", "is_sqllab_view")
     description_columns = {
-        'slices': _(
-            'The list of charts associated with this table. By '
-            'altering this datasource, you may change how these associated '
-            'charts behave. '
-            'Also note that charts need to point to a datasource, so '
-            'this form will fail at saving if removing charts from a '
-            'datasource. If you want to change the datasource for a chart, '
-            "overwrite the chart from the 'explore view'"),
-        'offset': _('Timezone offset (in hours) for this datasource'),
-        'table_name': _(
-            'Name of the table that exists in the source database'),
-        'schema': _(
-            'Schema, as used only in some databases like Postgres, Redshift '
-            'and DB2'),
-        'description': Markup(
+        "slices": _(
+            "The list of charts associated with this table. By "
+            "altering this datasource, you may change how these associated "
+            "charts behave. "
+            "Also note that charts need to point to a datasource, so "
+            "this form will fail at saving if removing charts from a "
+            "datasource. If you want to change the datasource for a chart, "
+            "overwrite the chart from the 'explore view'"
+        ),
+        "offset": _("Timezone offset (in hours) for this datasource"),
+        "table_name": _("Name of the table that exists in the source database"),
+        "schema": _(
+            "Schema, as used only in some databases like Postgres, Redshift " "and DB2"
+        ),
+        "description": Markup(
             'Supports <a href="https://daringfireball.net/projects/markdown/">'
-            'markdown</a>'),
-        'sql': _(
-            'This fields acts a Superset view, meaning that Superset will '
-            'run a query against this string as a subquery.',
+            "markdown</a>"
+        ),
+        "sql": _(
+            "This fields acts a Superset view, meaning that Superset will "
+            "run a query against this string as a subquery."
         ),
-        'fetch_values_predicate': _(
-            'Predicate applied when fetching distinct value to '
-            'populate the filter control component. Supports '
-            'jinja template syntax. Applies only when '
-            '`Enable Filter Select` is on.',
+        "fetch_values_predicate": _(
+            "Predicate applied when fetching distinct value to "
+            "populate the filter control component. Supports "
+            "jinja template syntax. Applies only when "
+            "`Enable Filter Select` is on."
         ),
-        'default_endpoint': _(
-            'Redirects to this endpoint when clicking on the table '
-            'from the table list'),
-        'filter_select_enabled': _(
+        "default_endpoint": _(
+            "Redirects to this endpoint when clicking on the table "
+            "from the table list"
+        ),
+        "filter_select_enabled": _(
             "Whether to populate the filter's dropdown in the explore "
             "view's filter section with a list of distinct values fetched "
-            'from the backend on the fly'),
-        'is_sqllab_view': _(
-            "Whether the table was generated by the 'Visualize' flow "
-            'in SQL Lab'),
-        'template_params': _(
-            'A set of parameters that become available in the query using '
-            'Jinja templating syntax'),
-        'cache_timeout': _(
-            'Duration (in seconds) of the caching timeout for this table. '
-            'A timeout of 0 indicates that the cache never expires. '
-            'Note this defaults to the database timeout if undefined.'),
+            "from the backend on the fly"
+        ),
+        "is_sqllab_view": _(
+            "Whether the table was generated by the 'Visualize' flow " "in SQL Lab"
+        ),
+        "template_params": _(
+            "A set of parameters that become available in the query using "
+            "Jinja templating syntax"
+        ),
+        "cache_timeout": _(
+            "Duration (in seconds) of the caching timeout for this table. "
+            "A timeout of 0 indicates that the cache never expires. "
+            "Note this defaults to the database timeout if undefined."
+        ),
     }
     label_columns = {
-        'slices': _('Associated Charts'),
-        'link': _('Table'),
-        'changed_by_': _('Changed By'),
-        'database': _('Database'),
-        'database_name': _('Database'),
-        'changed_on_': _('Last Changed'),
-        'filter_select_enabled': _('Enable Filter Select'),
-        'schema': _('Schema'),
-        'default_endpoint': _('Default Endpoint'),
-        'offset': _('Offset'),
-        'cache_timeout': _('Cache Timeout'),
-        'table_name': _('Table Name'),
-        'fetch_values_predicate': _('Fetch Values Predicate'),
-        'owners': _('Owners'),
-        'main_dttm_col': _('Main Datetime Column'),
-        'description': _('Description'),
-        'is_sqllab_view': _('SQL Lab View'),
-        'template_params': _('Template parameters'),
-        'modified': _('Modified'),
+        "slices": _("Associated Charts"),
+        "link": _("Table"),
+        "changed_by_": _("Changed By"),
+        "database": _("Database"),
+        "database_name": _("Database"),
+        "changed_on_": _("Last Changed"),
+        "filter_select_enabled": _("Enable Filter Select"),
+        "schema": _("Schema"),
+        "default_endpoint": _("Default Endpoint"),
+        "offset": _("Offset"),
+        "cache_timeout": _("Cache Timeout"),
+        "table_name": _("Table Name"),
+        "fetch_values_predicate": _("Fetch Values Predicate"),
+        "owners": _("Owners"),
+        "main_dttm_col": _("Main Datetime Column"),
+        "description": _("Description"),
+        "is_sqllab_view": _("SQL Lab View"),
+        "template_params": _("Template parameters"),
+        "modified": _("Modified"),
     }
 
     edit_form_extra_fields = {
-        'database': QuerySelectField(
-            'Database',
+        "database": QuerySelectField(
+            "Database",
             query_factory=lambda: db.session().query(models.Database),
-            widget=Select2Widget(extra_classes='readonly'),
-        ),
+            widget=Select2Widget(extra_classes="readonly"),
+        )
     }
 
     def pre_add(self, table):
@@ -297,34 +352,43 @@ class TableModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):  # noqa
             table_query = db.session.query(models.SqlaTable).filter(
                 models.SqlaTable.table_name == table.table_name,
                 models.SqlaTable.schema == table.schema,
-                models.SqlaTable.database_id == table.database.id)
+                models.SqlaTable.database_id == table.database.id,
+            )
             if db.session.query(table_query.exists()).scalar():
-                raise Exception(
-                    get_datasource_exist_error_msg(table.full_name))
+                raise Exception(get_datasource_exist_error_msg(table.full_name))
 
         # Fail before adding if the table can't be found
         try:
             table.get_sqla_table_object()
         except Exception as e:
-            logger.exception(f'Got an error in pre_add for {table.name}')
-            raise Exception(_(
-                'Table [{}] could not be found, '
-                'please double check your '
-                'database connection, schema, and '
-                'table name, error: {}').format(table.name, str(e)))
+            logger.exception(f"Got an error in pre_add for {table.name}")
+            raise Exception(
+                _(
+                    "Table [{}] could not be found, "
+                    "please double check your "
+                    "database connection, schema, and "
+                    "table name, error: {}"
+                ).format(table.name, str(e))
+            )
 
     def post_add(self, table, flash_message=True):
         table.fetch_metadata()
-        security_manager.add_permission_view_menu('datasource_access', table.get_perm())
+        security_manager.add_permission_view_menu("datasource_access", table.get_perm())
         if table.schema:
-            security_manager.add_permission_view_menu('schema_access', table.schema_perm)
+            security_manager.add_permission_view_menu(
+                "schema_access", table.schema_perm
+            )
 
         if flash_message:
-            flash(_(
-                'The table was created. '
-                'As part of this two-phase configuration '
-                'process, you should now click the edit button by '
-                'the new table to configure it.'), 'info')
+            flash(
+                _(
+                    "The table was created. "
+                    "As part of this two-phase configuration "
+                    "process, you should now click the edit button by "
+                    "the new table to configure it."
+                ),
+                "info",
+            )
 
     def post_update(self, table):
         self.post_add(table, flash_message=False)
@@ -332,20 +396,18 @@ class TableModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):  # noqa
     def _delete(self, pk):
         DeleteMixin._delete(self, pk)
 
-    @expose('/edit/<pk>', methods=['GET', 'POST'])
+    @expose("/edit/<pk>", methods=["GET", "POST"])
     @has_access
     def edit(self, pk):
         """Simple hack to redirect to explore view after saving"""
         resp = super(TableModelView, self).edit(pk)
         if isinstance(resp, str):
             return resp
-        return redirect('/superset/explore/table/{}/'.format(pk))
+        return redirect("/superset/explore/table/{}/".format(pk))
 
     @action(
-        'refresh',
-        __('Refresh Metadata'),
-        __('Refresh column metadata'),
-        'fa-refresh')
+        "refresh", __("Refresh Metadata"), __("Refresh column metadata"), "fa-refresh"
+    )
     def refresh(self, tables):
         if not isinstance(tables, list):
             tables = [tables]
@@ -360,26 +422,29 @@ class TableModelView(DatasourceModelView, DeleteMixin, YamlExportMixin):  # noqa
 
         if len(successes) > 0:
             success_msg = _(
-                'Metadata refreshed for the following table(s): %(tables)s',
-                tables=', '.join([t.table_name for t in successes]))
-            flash(success_msg, 'info')
+                "Metadata refreshed for the following table(s): %(tables)s",
+                tables=", ".join([t.table_name for t in successes]),
+            )
+            flash(success_msg, "info")
         if len(failures) > 0:
             failure_msg = _(
-                'Unable to retrieve metadata for the following table(s): %(tables)s',
-                tables=', '.join([t.table_name for t in failures]))
-            flash(failure_msg, 'danger')
+                "Unable to retrieve metadata for the following table(s): %(tables)s",
+                tables=", ".join([t.table_name for t in failures]),
+            )
+            flash(failure_msg, "danger")
 
-        return redirect('/tablemodelview/list/')
+        return redirect("/tablemodelview/list/")
 
 
 appbuilder.add_view_no_menu(TableModelView)
 appbuilder.add_link(
-    'Tables',
-    label=__('Tables'),
-    href='/tablemodelview/list/?_flt_1_is_sqllab_view=y',
-    icon='fa-table',
-    category='Sources',
-    category_label=__('Sources'),
-    category_icon='fa-table')
-
-appbuilder.add_separator('Sources')
+    "Tables",
+    label=__("Tables"),
+    href="/tablemodelview/list/?_flt_1_is_sqllab_view=y",
+    icon="fa-table",
+    category="Sources",
+    category_label=__("Sources"),
+    category_icon="fa-table",
+)
+
+appbuilder.add_separator("Sources")
diff --git a/superset/data/__init__.py b/superset/data/__init__.py
index b36a300..aee65ac 100644
--- a/superset/data/__init__.py
+++ b/superset/data/__init__.py
@@ -28,6 +28,6 @@ from .multiformat_time_series import load_multiformat_time_series  # noqa
 from .paris import load_paris_iris_geojson  # noqa
 from .random_time_series import load_random_time_series_data  # noqa
 from .sf_population_polygons import load_sf_population_polygons  # noqa
-from .tabbed_dashboard import load_tabbed_dashboard # noqa
+from .tabbed_dashboard import load_tabbed_dashboard  # noqa
 from .unicode_test_data import load_unicode_test_data  # noqa
 from .world_bank import load_world_bank_health_n_pop  # noqa
diff --git a/superset/data/bart_lines.py b/superset/data/bart_lines.py
index f4e0b1f..8e615fc 100644
--- a/superset/data/bart_lines.py
+++ b/superset/data/bart_lines.py
@@ -26,30 +26,31 @@ from .helpers import TBL, get_example_data
 
 
 def load_bart_lines():
-    tbl_name = 'bart_lines'
-    content = get_example_data('bart-lines.json.gz')
-    df = pd.read_json(content, encoding='latin-1')
-    df['path_json'] = df.path.map(json.dumps)
-    df['polyline'] = df.path.map(polyline.encode)
-    del df['path']
+    tbl_name = "bart_lines"
+    content = get_example_data("bart-lines.json.gz")
+    df = pd.read_json(content, encoding="latin-1")
+    df["path_json"] = df.path.map(json.dumps)
+    df["polyline"] = df.path.map(polyline.encode)
+    del df["path"]
 
     df.to_sql(
         tbl_name,
         db.engine,
-        if_exists='replace',
+        if_exists="replace",
         chunksize=500,
         dtype={
-            'color': String(255),
-            'name': String(255),
-            'polyline': Text,
-            'path_json': Text,
+            "color": String(255),
+            "name": String(255),
+            "polyline": Text,
+            "path_json": Text,
         },
-        index=False)
-    print('Creating table {} reference'.format(tbl_name))
+        index=False,
+    )
+    print("Creating table {} reference".format(tbl_name))
     tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
     if not tbl:
         tbl = TBL(table_name=tbl_name)
-    tbl.description = 'BART lines'
+    tbl.description = "BART lines"
     tbl.database = get_or_create_main_db()
     db.session.merge(tbl)
     db.session.commit()
diff --git a/superset/data/birth_names.py b/superset/data/birth_names.py
index 85de019..aa00069 100644
--- a/superset/data/birth_names.py
+++ b/superset/data/birth_names.py
@@ -38,46 +38,46 @@ from .helpers import (
 
 def load_birth_names():
     """Loading birth name dataset from a zip file in the repo"""
-    data = get_example_data('birth_names.json.gz')
+    data = get_example_data("birth_names.json.gz")
     pdf = pd.read_json(data)
-    pdf.ds = pd.to_datetime(pdf.ds, unit='ms')
+    pdf.ds = pd.to_datetime(pdf.ds, unit="ms")
     pdf.to_sql(
-        'birth_names',
+        "birth_names",
         db.engine,
-        if_exists='replace',
+        if_exists="replace",
         chunksize=500,
         dtype={
-            'ds': DateTime,
-            'gender': String(16),
-            'state': String(10),
-            'name': String(255),
+            "ds": DateTime,
+            "gender": String(16),
+            "state": String(10),
+            "name": String(255),
         },
-        index=False)
-    print('Done loading table!')
-    print('-' * 80)
+        index=False,
+    )
+    print("Done loading table!")
+    print("-" * 80)
 
-    print('Creating table [birth_names] reference')
-    obj = db.session.query(TBL).filter_by(table_name='birth_names').first()
+    print("Creating table [birth_names] reference")
+    obj = db.session.query(TBL).filter_by(table_name="birth_names").first()
     if not obj:
-        obj = TBL(table_name='birth_names')
-    obj.main_dttm_col = 'ds'
+        obj = TBL(table_name="birth_names")
+    obj.main_dttm_col = "ds"
     obj.database = get_or_create_main_db()
     obj.filter_select_enabled = True
 
-    if not any(col.column_name == 'num_california' for col in obj.columns):
-        col_state = str(column('state').compile(db.engine))
-        col_num = str(column('num').compile(db.engine))
-        obj.columns.append(TableColumn(
-            column_name='num_california',
-            expression=f"CASE WHEN {col_state} = 'CA' THEN {col_num} ELSE 0 END",
-        ))
+    if not any(col.column_name == "num_california" for col in obj.columns):
+        col_state = str(column("state").compile(db.engine))
+        col_num = str(column("num").compile(db.engine))
+        obj.columns.append(
+            TableColumn(
+                column_name="num_california",
+                expression=f"CASE WHEN {col_state} = 'CA' THEN {col_num} ELSE 0 END",
+            )
+        )
 
-    if not any(col.metric_name == 'sum__num' for col in obj.metrics):
-        col = str(column('num').compile(db.engine))
-        obj.metrics.append(SqlMetric(
-            metric_name='sum__num',
-            expression=f'SUM({col})',
-        ))
+    if not any(col.metric_name == "sum__num" for col in obj.metrics):
+        col = str(column("num").compile(db.engine))
+        obj.metrics.append(SqlMetric(metric_name="sum__num", expression=f"SUM({col})"))
 
     db.session.merge(obj)
     db.session.commit()
@@ -85,149 +85,149 @@ def load_birth_names():
     tbl = obj
 
     defaults = {
-        'compare_lag': '10',
-        'compare_suffix': 'o10Y',
-        'limit': '25',
-        'granularity_sqla': 'ds',
-        'groupby': [],
-        'metric': 'sum__num',
-        'metrics': ['sum__num'],
-        'row_limit': config.get('ROW_LIMIT'),
-        'since': '100 years ago',
-        'until': 'now',
-        'viz_type': 'table',
-        'where': '',
-        'markup_type': 'markdown',
+        "compare_lag": "10",
+        "compare_suffix": "o10Y",
+        "limit": "25",
+        "granularity_sqla": "ds",
+        "groupby": [],
+        "metric": "sum__num",
+        "metrics": ["sum__num"],
+        "row_limit": config.get("ROW_LIMIT"),
+        "since": "100 years ago",
+        "until": "now",
+        "viz_type": "table",
+        "where": "",
+        "markup_type": "markdown",
     }
 
-    admin = security_manager.find_user('admin')
+    admin = security_manager.find_user("admin")
 
-    print('Creating some slices')
+    print("Creating some slices")
     slices = [
         Slice(
-            slice_name='Girls',
-            viz_type='table',
-            datasource_type='table',
+            slice_name="Girls",
+            viz_type="table",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                groupby=['name'],
-                filters=[{
-                    'col': 'gender',
-                    'op': 'in',
-                    'val': ['girl'],
-                }],
+                groupby=["name"],
+                filters=[{"col": "gender", "op": "in", "val": ["girl"]}],
                 row_limit=50,
-                timeseries_limit_metric='sum__num')),
+                timeseries_limit_metric="sum__num",
+            ),
+        ),
         Slice(
-            slice_name='Boys',
-            viz_type='table',
-            datasource_type='table',
+            slice_name="Boys",
+            viz_type="table",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                groupby=['name'],
-                filters=[{
-                    'col': 'gender',
-                    'op': 'in',
-                    'val': ['boy'],
-                }],
-                row_limit=50)),
+                groupby=["name"],
+                filters=[{"col": "gender", "op": "in", "val": ["boy"]}],
+                row_limit=50,
+            ),
+        ),
         Slice(
-            slice_name='Participants',
-            viz_type='big_number',
-            datasource_type='table',
+            slice_name="Participants",
+            viz_type="big_number",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='big_number', granularity_sqla='ds',
-                compare_lag='5', compare_suffix='over 5Y')),
+                viz_type="big_number",
+                granularity_sqla="ds",
+                compare_lag="5",
+                compare_suffix="over 5Y",
+            ),
+        ),
         Slice(
-            slice_name='Genders',
-            viz_type='pie',
-            datasource_type='table',
+            slice_name="Genders",
+            viz_type="pie",
+            datasource_type="table",
             datasource_id=tbl.id,
-            params=get_slice_json(
-                defaults,
-                viz_type='pie', groupby=['gender'])),
+            params=get_slice_json(defaults, viz_type="pie", groupby=["gender"]),
+        ),
         Slice(
-            slice_name='Genders by State',
-            viz_type='dist_bar',
-            datasource_type='table',
+            slice_name="Genders by State",
+            viz_type="dist_bar",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
                 adhoc_filters=[
                     {
-                        'clause': 'WHERE',
-                        'expressionType': 'SIMPLE',
-                        'filterOptionName': '2745eae5',
-                        'comparator': ['other'],
-                        'operator': 'not in',
-                        'subject': 'state',
-                    },
+                        "clause": "WHERE",
+                        "expressionType": "SIMPLE",
+                        "filterOptionName": "2745eae5",
+                        "comparator": ["other"],
+                        "operator": "not in",
+                        "subject": "state",
+                    }
                 ],
-                viz_type='dist_bar',
+                viz_type="dist_bar",
                 metrics=[
                     {
-                        'expressionType': 'SIMPLE',
-                        'column': {
-                            'column_name': 'sum_boys',
-                            'type': 'BIGINT(20)',
-                        },
-                        'aggregate': 'SUM',
-                        'label': 'Boys',
-                        'optionName': 'metric_11',
+                        "expressionType": "SIMPLE",
+                        "column": {"column_name": "sum_boys", "type": "BIGINT(20)"},
+                        "aggregate": "SUM",
+                        "label": "Boys",
+                        "optionName": "metric_11",
                     },
                     {
-                        'expressionType': 'SIMPLE',
-                        'column': {
-                            'column_name': 'sum_girls',
-                            'type': 'BIGINT(20)',
-                        },
-                        'aggregate': 'SUM',
-                        'label': 'Girls',
-                        'optionName': 'metric_12',
+                        "expressionType": "SIMPLE",
+                        "column": {"column_name": "sum_girls", "type": "BIGINT(20)"},
+                        "aggregate": "SUM",
+                        "label": "Girls",
+                        "optionName": "metric_12",
                     },
                 ],
-                groupby=['state'])),
+                groupby=["state"],
+            ),
+        ),
         Slice(
-            slice_name='Trends',
-            viz_type='line',
-            datasource_type='table',
+            slice_name="Trends",
+            viz_type="line",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='line', groupby=['name'],
-                granularity_sqla='ds', rich_tooltip=True, show_legend=True)),
+                viz_type="line",
+                groupby=["name"],
+                granularity_sqla="ds",
+                rich_tooltip=True,
+                show_legend=True,
+            ),
+        ),
         Slice(
-            slice_name='Average and Sum Trends',
-            viz_type='dual_line',
-            datasource_type='table',
+            slice_name="Average and Sum Trends",
+            viz_type="dual_line",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='dual_line',
+                viz_type="dual_line",
                 metric={
-                    'expressionType': 'SIMPLE',
-                    'column': {
-                        'column_name': 'num',
-                        'type': 'BIGINT(20)',
-                    },
-                    'aggregate': 'AVG',
-                    'label': 'AVG(num)',
-                    'optionName': 'metric_vgops097wej_g8uff99zhk7',
+                    "expressionType": "SIMPLE",
+                    "column": {"column_name": "num", "type": "BIGINT(20)"},
+                    "aggregate": "AVG",
+                    "label": "AVG(num)",
+                    "optionName": "metric_vgops097wej_g8uff99zhk7",
                 },
-                metric_2='sum__num',
-                granularity_sqla='ds')),
+                metric_2="sum__num",
+                granularity_sqla="ds",
+            ),
+        ),
         Slice(
-            slice_name='Title',
-            viz_type='markup',
-            datasource_type='table',
+            slice_name="Title",
+            viz_type="markup",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='markup', markup_type='html',
+                viz_type="markup",
+                markup_type="html",
                 code="""\
     <div style='text-align:center'>
         <h1>Birth Names Dashboard</h1>
@@ -237,135 +237,156 @@ def load_birth_names():
         </p>
         <img src='/static/assets/images/babytux.jpg'>
     </div>
-    """)),
+    """,
+            ),
+        ),
         Slice(
-            slice_name='Name Cloud',
-            viz_type='word_cloud',
-            datasource_type='table',
+            slice_name="Name Cloud",
+            viz_type="word_cloud",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='word_cloud', size_from='10',
-                series='name', size_to='70', rotation='square',
-                limit='100')),
+                viz_type="word_cloud",
+                size_from="10",
+                series="name",
+                size_to="70",
+                rotation="square",
+                limit="100",
+            ),
+        ),
         Slice(
-            slice_name='Pivot Table',
-            viz_type='pivot_table',
-            datasource_type='table',
+            slice_name="Pivot Table",
+            viz_type="pivot_table",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='pivot_table', metrics=['sum__num'],
-                groupby=['name'], columns=['state'])),
+                viz_type="pivot_table",
+                metrics=["sum__num"],
+                groupby=["name"],
+                columns=["state"],
+            ),
+        ),
         Slice(
-            slice_name='Number of Girls',
-            viz_type='big_number_total',
-            datasource_type='table',
+            slice_name="Number of Girls",
+            viz_type="big_number_total",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                viz_type='big_number_total', granularity_sqla='ds',
-                filters=[{
-                    'col': 'gender',
-                    'op': 'in',
-                    'val': ['girl'],
-                }],
-                subheader='total female participants')),
+                viz_type="big_number_total",
+                granularity_sqla="ds",
+                filters=[{"col": "gender", "op": "in", "val": ["girl"]}],
+                subheader="total female participants",
+            ),
+        ),
         Slice(
-            slice_name='Number of California Births',
-            viz_type='big_number_total',
-            datasource_type='table',
+            slice_name="Number of California Births",
+            viz_type="big_number_total",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
                 metric={
-                    'expressionType': 'SIMPLE',
-                    'column': {
-                        'column_name': 'num_california',
-                        'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
+                    "expressionType": "SIMPLE",
+                    "column": {
+                        "column_name": "num_california",
+                        "expression": "CASE WHEN state = 'CA' THEN num ELSE 0 END",
                     },
-                    'aggregate': 'SUM',
-                    'label': 'SUM(num_california)',
+                    "aggregate": "SUM",
+                    "label": "SUM(num_california)",
                 },
-                viz_type='big_number_total',
-                granularity_sqla='ds')),
+                viz_type="big_number_total",
+                granularity_sqla="ds",
+            ),
+        ),
         Slice(
-            slice_name='Top 10 California Names Timeseries',
-            viz_type='line',
-            datasource_type='table',
+            slice_name="Top 10 California Names Timeseries",
+            viz_type="line",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                metrics=[{
-                    'expressionType': 'SIMPLE',
-                    'column': {
-                        'column_name': 'num_california',
-                        'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
-                    },
-                    'aggregate': 'SUM',
-                    'label': 'SUM(num_california)',
-                }],
-                viz_type='line',
-                granularity_sqla='ds',
-                groupby=['name'],
+                metrics=[
+                    {
+                        "expressionType": "SIMPLE",
+                        "column": {
+                            "column_name": "num_california",
+                            "expression": "CASE WHEN state = 'CA' THEN num ELSE 0 END",
+                        },
+                        "aggregate": "SUM",
+                        "label": "SUM(num_california)",
+                    }
+                ],
+                viz_type="line",
+                granularity_sqla="ds",
+                groupby=["name"],
                 timeseries_limit_metric={
-                    'expressionType': 'SIMPLE',
-                    'column': {
-                        'column_name': 'num_california',
-                        'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
+                    "expressionType": "SIMPLE",
+                    "column": {
+                        "column_name": "num_california",
+                        "expression": "CASE WHEN state = 'CA' THEN num ELSE 0 END",
                     },
-                    'aggregate': 'SUM',
-                    'label': 'SUM(num_california)',
+                    "aggregate": "SUM",
+                    "label": "SUM(num_california)",
                 },
-                limit='10')),
+                limit="10",
+            ),
+        ),
         Slice(
-            slice_name='Names Sorted by Num in California',
-            viz_type='table',
-            datasource_type='table',
+            slice_name="Names Sorted by Num in California",
+            viz_type="table",
+            datasource_type="table",
             datasource_id=tbl.id,
             params=get_slice_json(
                 defaults,
-                groupby=['name'],
+                groupby=["name"],
                 row_limit=50,
                 timeseries_limit_metric={
-                    'expressionType': 'SIMPLE',
-                    'column': {
-                        'column_name': 'num_california',
-                        'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
+                    "expressionType": "SIMPLE",
+                    "column": {
+                        "column_name": "num_california",
+                        "expression": "CASE WHEN state = 'CA' THEN num ELSE 0 END",
                     },
-                    'aggregate': 'SUM',
-                    'label': 'SUM(num_california)',
-                })),
+                    "aggregate": "SUM",
+                    "label": "SUM(num_california)",
+                },
+            ),
+        ),
         Slice(
-            slice_name='Num Births Trend',
-            viz_type='line',
-            datasource_type='table',
+            slice_name="Num Births Trend",
+            viz_type="line",
+            datasource_type="table",
             datasource_id=tbl.id,
-            params=get_slice_json(
-                defaults,
-                viz_type='line')),
+            params=get_slice_json(defaults, viz_type="line"),
+        ),
         Slice(
-            slice_name='Daily Totals',
-            viz_type='table',
-            datasource_type='table',
+            slice_name="Daily Totals",
+            viz_type="table",
+            datasource_type="table",
             datasource_id=tbl.id,
             created_by=admin,
             params=get_slice_json(
                 defaults,
-                groupby=['ds'],
-                since='40 years ago',
-                until='now',
-                viz_type='table')),
+                groupby=["ds"],
+                since="40 years ago",
+                until="now",
+                viz_type="table",
+            ),
+        ),
     ]
     for slc in slices:
         merge_slice(slc)
 
-    print('Creating a dashboard')
-    dash = db.session.query(Dash).filter_by(dashboard_title='Births').first()
+    print("Creating a dashboard")
+    dash = db.session.query(Dash).filter_by(dashboard_title="Births").first()
 
     if not dash:
         dash = Dash()
-    js = textwrap.dedent("""\
+    js = textwrap.dedent(
+        # pylint: disable=line-too-long
+        """\
 {
     "CHART-0dd270f0": {
         "meta": {
@@ -614,13 +635,15 @@ def load_birth_names():
     },
     "DASHBOARD_VERSION_KEY": "v2"
 }
-        """)
+        """
+        # pylint: enable=line-too-long
+    )
     pos = json.loads(js)
     # dashboard v2 doesn't allow add markup slice
-    dash.slices = [slc for slc in slices if slc.viz_type != 'markup']
+    dash.slices = [slc for slc in slices if slc.viz_type != "markup"]
     update_slice_ids(pos, dash.slices)
-    dash.dashboard_title = 'Births'
+    dash.dashboard_title = "Births"
     dash.position_json = json.dumps(pos, indent=4)
-    dash.slug = 'births'
+    dash.slug = "births"
     db.session.merge(dash)
     db.session.commit()
diff --git a/superset/data/countries.py b/superset/data/countries.py
index 24981cd..3e90bc7 100644
--- a/superset/data/countries.py
+++ b/superset/data/countries.py
@@ -24,7 +24,7 @@ countries = [
         "capital": "Luanda",
         "lat": -12.5,
         "lng": 18.5,
-        "cca3": "AGO"
+        "cca3": "AGO",
     },
     {
         "name": "Algeria",
@@ -34,7 +34,7 @@ countries = [
         "capital": "Algiers",
         "lat": 28,
         "lng": 3,
-        "cca3": "DZA"
+        "cca3": "DZA",
     },
     {
         "name": "Egypt",
@@ -44,7 +44,7 @@ countries = [
         "capital": "Cairo",
         "lat": 27,
         "lng": 30,
-        "cca3": "EGY"
+        "cca3": "EGY",
     },
     {
         "name": "Bangladesh",
@@ -54,7 +54,7 @@ countries = [
         "capital": "Dhaka",
         "lat": 24,
         "lng": 90,
-        "cca3": "BGD"
+        "cca3": "BGD",
     },
     {
         "name": "Niger",
@@ -64,7 +64,7 @@ countries = [
         "capital": "Niamey",
         "lat": 16,
         "lng": 8,
-        "cca3": "NER"
+        "cca3": "NER",
     },
     {
         "name": "Liechtenstein",
@@ -74,7 +74,7 @@ countries = [
         "capital": "Vaduz",
         "lat": 47.26666666,
         "lng": 9.53333333,
-        "cca3": "LIE"
+        "cca3": "LIE",
     },
     {
         "name": "Namibia",
@@ -84,7 +84,7 @@ countries = [
         "capital": "Windhoek",
         "lat": -22,
         "lng": 17,
-        "cca3": "NAM"
+        "cca3": "NAM",
     },
     {
         "name": "Bulgaria",
@@ -94,7 +94,7 @@ countries = [
         "capital": "Sofia",
         "lat": 43,
         "lng": 25,
-        "cca3": "BGR"
+        "cca3": "BGR",
     },
     {
         "name": "Bolivia",
@@ -104,7 +104,7 @@ countries = [
         "capital": "Sucre",
         "lat": -17,
         "lng": -65,
-        "cca3": "BOL"
+        "cca3": "BOL",
     },
     {
         "name": "Ghana",
@@ -114,7 +114,7 @@ countries = [
         "capital": "Accra",
         "lat": 8,
         "lng": -2,
-        "cca3": "GHA"
+        "cca3": "GHA",
     },
     {
         "name": "Cocos (Keeling) Islands",
@@ -124,7 +124,7 @@ countries = [
         "capital": "West Island",
         "lat": -12.5,
         "lng": 96.83333333,
-        "cca3": "CCK"
+        "cca3": "CCK",
     },
     {
         "name": "Pakistan",
@@ -134,7 +134,7 @@ countries = [
         "capital": "Islamabad",
         "lat": 30,
         "lng": 70,
-        "cca3": "PAK"
+        "cca3": "PAK",
     },
     {
         "name": "Cape Verde",
@@ -144,7 +144,7 @@ countries = [
         "capital": "Praia",
         "lat": 16,
         "lng": -24,
-        "cca3": "CPV"
+        "cca3": "CPV",
     },
     {
         "name": "Jordan",
@@ -154,7 +154,7 @@ countries = [
         "capital": "Amman",
         "lat": 31,
         "lng": 36,
-        "cca3": "JOR"
+        "cca3": "JOR",
     },
     {
         "name": "Liberia",
@@ -164,7 +164,7 @@ countries = [
         "capital": "Monrovia",
         "lat": 6.5,
         "lng": -9.5,
-        "cca3": "LBR"
+        "cca3": "LBR",
     },
     {
         "name": "Libya",
@@ -174,7 +174,7 @@ countries = [
         "capital": "Tripoli",
         "lat": 25,
         "lng": 17,
-        "cca3": "LBY"
+        "cca3": "LBY",
     },
     {
         "name": "Malaysia",
@@ -184,7 +184,7 @@ countries = [
         "capital": "Kuala Lumpur",
         "lat": 2.5,
         "lng": 112.5,
-        "cca3": "MYS"
+        "cca3": "MYS",
     },
     {
         "name": "Dominican Republic",
@@ -194,7 +194,7 @@ countries = [
         "capital": "Santo Domingo",
         "lat": 19,
         "lng": -70.66666666,
-        "cca3": "DOM"
+        "cca3": "DOM",
     },
     {
         "name": "Puerto Rico",
@@ -204,7 +204,7 @@ countries = [
         "capital": "San Juan",
         "lat": 18.25,
         "lng": -66.5,
-        "cca3": "PRI"
+        "cca3": "PRI",
     },
     {
         "name": "Mayotte",
@@ -214,7 +214,7 @@ countries = [
         "capital": "Mamoudzou",
         "lat": -12.83333333,
         "lng": 45.16666666,
-        "cca3": "MYT"
+        "cca3": "MYT",
     },
     {
         "name": "North Korea",
@@ -224,7 +224,7 @@ countries = [
         "capital": "Pyongyang",
         "lat": 40,
         "lng": 127,
-        "cca3": "PRK"
+        "cca3": "PRK",
     },
     {
         "name": "Palestine",
@@ -234,7 +234,7 @@ countries = [
         "capital": "Ramallah",
         "lat": 31.9,
         "lng": 35.2,
-        "cca3": "PSE"
+        "cca3": "PSE",
     },
     {
         "name": "Tanzania",
@@ -244,7 +244,7 @@ countries = [
         "capital": "Dodoma",
         "lat": -6,
         "lng": 35,
-        "cca3": "TZA"
+        "cca3": "TZA",
     },
     {
         "name": "Botswana",
@@ -254,7 +254,7 @@ countries = [
         "capital": "Gaborone",
         "lat": -22,
         "lng": 24,
-        "cca3": "BWA"
+        "cca3": "BWA",
     },
     {
         "name": "Cambodia",
@@ -264,7 +264,7 @@ countries = [
         "capital": "Phnom Penh",
         "lat": 13,
         "lng": 105,
-        "cca3": "KHM"
+        "cca3": "KHM",
     },
     {
         "name": "Nicaragua",
@@ -274,7 +274,7 @@ countries = [
         "capital": "Managua",
         "lat": 13,
         "lng": -85,
-        "cca3": "NIC"
+        "cca3": "NIC",
     },
     {
         "name": "Trinidad and Tobago",
@@ -284,7 +284,7 @@ countries = [
         "capital": "Port of Spain",
         "lat": 11,
         "lng": -61,
-        "cca3": "TTO"
+        "cca3": "TTO",
     },
     {
         "name": "Ethiopia",
@@ -294,7 +294,7 @@ countries = [
         "capital": "Addis Ababa",
         "lat": 8,
         "lng": 38,
-        "cca3": "ETH"
+        "cca3": "ETH",
     },
     {
         "name": "Paraguay",
@@ -304,7 +304,7 @@ countries = [
         "capital": "Asuncion",
         "lat": -23,
         "lng": -58,
-        "cca3": "PRY"
+        "cca3": "PRY",
     },
     {
         "name": "Hong Kong",
@@ -314,7 +314,7 @@ countries = [
         "capital": "City of Victoria",
         "lat": 22.267,
         "lng": 114.188,
-        "cca3": "HKG"
+        "cca3": "HKG",
     },
     {
         "name": "Saudi Arabia",
@@ -324,7 +324,7 @@ countries = [
         "capital": "Riyadh",
         "lat": 25,
         "lng": 45,
-        "cca3": "SAU"
+        "cca3": "SAU",
     },
     {
         "name": "Lebanon",
@@ -334,7 +334,7 @@ countries = [
         "capital": "Beirut",
         "lat": 33.83333333,
         "lng": 35.83333333,
-        "cca3": "LBN"
+        "cca3": "LBN",
     },
     {
         "name": "Slovenia",
@@ -344,7 +344,7 @@ countries = [
         "capital": "Ljubljana",
         "lat": 46.11666666,
         "lng": 14.81666666,
-        "cca3": "SVN"
+        "cca3": "SVN",
     },
     {
         "name": "Burkina Faso",
@@ -354,7 +354,7 @@ countries = [
         "capital": "Ouagadougou",
         "lat": 13,
         "lng": -2,
-        "cca3": "BFA"
+        "cca3": "BFA",
     },
     {
         "name": "Switzerland",
@@ -364,7 +364,7 @@ countries = [
         "capital": "Bern",
         "lat": 47,
         "lng": 8,
-        "cca3": "CHE"
+        "cca3": "CHE",
     },
     {
         "name": "Mauritania",
@@ -374,7 +374,7 @@ countries = [
         "capital": "Nouakchott",
         "lat": 20,
         "lng": -12,
-        "cca3": "MRT"
+        "cca3": "MRT",
     },
     {
         "name": "Croatia",
@@ -384,7 +384,7 @@ countries = [
         "capital": "Zagreb",
         "lat": 45.16666666,
         "lng": 15.5,
-        "cca3": "HRV"
+        "cca3": "HRV",
     },
     {
         "name": "Chile",
@@ -394,7 +394,7 @@ countries = [
         "capital": "Santiago",
         "lat": -30,
         "lng": -71,
-        "cca3": "CHL"
+        "cca3": "CHL",
     },
     {
         "name": "China",
@@ -404,7 +404,7 @@ countries = [
         "capital": "Beijing",
         "lat": 35,
         "lng": 105,
-        "cca3": "CHN"
+        "cca3": "CHN",
     },
     {
         "name": "Saint Kitts and Nevis",
@@ -414,7 +414,7 @@ countries = [
         "capital": "Basseterre",
         "lat": 17.33333333,
         "lng": -62.75,
-        "cca3": "KNA"
+        "cca3": "KNA",
     },
     {
         "name": "Sierra Leone",
@@ -424,7 +424,7 @@ countries = [
         "capital": "Freetown",
         "lat": 8.5,
         "lng": -11.5,
-        "cca3": "SLE"
+        "cca3": "SLE",
     },
     {
         "name": "Jamaica",
@@ -434,7 +434,7 @@ countries = [
         "capital": "Kingston",
         "lat": 18.25,
         "lng": -77.5,
-        "cca3": "JAM"
+        "cca3": "JAM",
     },
     {
         "name": "San Marino",
@@ -444,7 +444,7 @@ countries = [
         "capital": "City of San Marino",
         "lat": 43.76666666,
         "lng": 12.41666666,
-        "cca3": "SMR"
+        "cca3": "SMR",
     },
     {
         "name": "Gibraltar",
@@ -454,7 +454,7 @@ countries = [
         "capital": "Gibraltar",
         "lat": 36.13333333,
         "lng": -5.35,
-        "cca3": "GIB"
+        "cca3": "GIB",
     },
     {
         "name": "Djibouti",
@@ -464,7 +464,7 @@ countries = [
         "capital": "Djibouti",
         "lat": 11.5,
         "lng": 43,
-        "cca3": "DJI"
+        "cca3": "DJI",
     },
     {
         "name": "Guinea",
@@ -474,7 +474,7 @@ countries = [
         "capital": "Conakry",
         "lat": 11,
         "lng": -10,
-        "cca3": "GIN"
+        "cca3": "GIN",
     },
     {
         "name": "Finland",
@@ -484,7 +484,7 @@ countries = [
         "capital": "Helsinki",
         "lat": 64,
         "lng": 26,
-        "cca3": "FIN"
+        "cca3": "FIN",
     },
     {
         "name": "Uruguay",
@@ -494,7 +494,7 @@ countries = [
         "capital": "Montevideo",
         "lat": -33,
         "lng": -56,
-        "cca3": "URY"
+        "cca3": "URY",
     },
     {
         "name": "Thailand",
@@ -504,7 +504,7 @@ countries = [
         "capital": "Bangkok",
         "lat": 15,
         "lng": 100,
-        "cca3": "THA"
+        "cca3": "THA",
     },
     {
         "name": "Sao Tome and Principe",
@@ -514,7 +514,7 @@ countries = [
         "capital": "Sao Tome",
         "lat": 1,
         "lng": 7,
-        "cca3": "STP"
+        "cca3": "STP",
     },
     {
         "name": "Seychelles",
@@ -524,7 +524,7 @@ countries = [
         "capital": "Victoria",
         "lat": -4.58333333,
         "lng": 55.66666666,
-        "cca3": "SYC"
+        "cca3": "SYC",
     },
     {
         "name": "Nepal",
@@ -534,7 +534,7 @@ countries = [
         "capital": "Kathmandu",
         "lat": 28,
         "lng": 84,
-        "cca3": "NPL"
+        "cca3": "NPL",
     },
     {
         "name": "Christmas Island",
@@ -544,7 +544,7 @@ countries = [
         "capital": "Flying Fish Cove",
         "lat": -10.5,
         "lng": 105.66666666,
-        "cca3": "CXR"
+        "cca3": "CXR",
     },
     {
         "name": "Laos",
@@ -554,7 +554,7 @@ countries = [
         "capital": "Vientiane",
         "lat": 18,
         "lng": 105,
-        "cca3": "LAO"
+        "cca3": "LAO",
     },
     {
         "name": "Yemen",
@@ -564,7 +564,7 @@ countries = [
         "capital": "Sana'a",
         "lat": 15,
         "lng": 48,
-        "cca3": "YEM"
+        "cca3": "YEM",
     },
     {
         "name": "Bouvet Island",
@@ -574,7 +574,7 @@ countries = [
         "capital": "",
         "lat": -54.43333333,
         "lng": 3.4,
-        "cca3": "BVT"
+        "cca3": "BVT",
     },
     {
         "name": "South Africa",
@@ -584,7 +584,7 @@ countries = [
         "capital": "Pretoria",
         "lat": -29,
         "lng": 24,
-        "cca3": "ZAF"
+        "cca3": "ZAF",
     },
     {
         "name": "Kiribati",
@@ -594,7 +594,7 @@ countries = [
         "capital": "South Tarawa",
         "lat": 1.41666666,
         "lng": 173,
-        "cca3": "KIR"
+        "cca3": "KIR",
     },
     {
         "name": "Philippines",
@@ -604,7 +604,7 @@ countries = [
         "capital": "Manila",
         "lat": 13,
         "lng": 122,
-        "cca3": "PHL"
+        "cca3": "PHL",
     },
     {
         "name": "Sint Maarten",
@@ -614,7 +614,7 @@ countries = [
         "capital": "Philipsburg",
         "lat": 18.033333,
         "lng": -63.05,
-        "cca3": "SXM"
+        "cca3": "SXM",
     },
     {
         "name": "Romania",
@@ -624,7 +624,7 @@ countries = [
         "capital": "Bucharest",
         "lat": 46,
         "lng": 25,
-        "cca3": "ROU"
+        "cca3": "ROU",
     },
     {
         "name": "United States Virgin Islands",
@@ -634,7 +634,7 @@ countries = [
         "capital": "Charlotte Amalie",
         "lat": 18.35,
         "lng": -64.933333,
-        "cca3": "VIR"
+        "cca3": "VIR",
     },
     {
         "name": "Syria",
@@ -644,7 +644,7 @@ countries = [
         "capital": "Damascus",
         "lat": 35,
         "lng": 38,
-        "cca3": "SYR"
+        "cca3": "SYR",
     },
     {
         "name": "Macau",
@@ -654,7 +654,7 @@ countries = [
         "capital": "",
         "lat": 22.16666666,
         "lng": 113.55,
-        "cca3": "MAC"
+        "cca3": "MAC",
     },
     {
         "name": "Saint Martin",
@@ -664,7 +664,7 @@ countries = [
         "capital": "Marigot",
         "lat": 18.08333333,
         "lng": -63.95,
-        "cca3": "MAF"
+        "cca3": "MAF",
     },
     {
         "name": "Malta",
@@ -674,7 +674,7 @@ countries = [
         "capital": "Valletta",
         "lat": 35.83333333,
         "lng": 14.58333333,
-        "cca3": "MLT"
+        "cca3": "MLT",
     },
     {
         "name": "Kazakhstan",
@@ -684,7 +684,7 @@ countries = [
         "capital": "Astana",
         "lat": 48,
         "lng": 68,
-        "cca3": "KAZ"
+        "cca3": "KAZ",
     },
     {
         "name": "Turks and Caicos Islands",
@@ -694,7 +694,7 @@ countries = [
         "capital": "Cockburn Town",
         "lat": 21.75,
         "lng": -71.58333333,
-        "cca3": "TCA"
+        "cca3": "TCA",
     },
     {
         "name": "French Polynesia",
@@ -704,7 +704,7 @@ countries = [
         "capital": "Papeete",
         "lat": -15,
         "lng": -140,
-        "cca3": "PYF"
+        "cca3": "PYF",
     },
     {
         "name": "Niue",
@@ -714,7 +714,7 @@ countries = [
         "capital": "Alofi",
         "lat": -19.03333333,
         "lng": -169.86666666,
-        "cca3": "NIU"
+        "cca3": "NIU",
     },
     {
         "name": "Dominica",
@@ -724,7 +724,7 @@ countries = [
         "capital": "Roseau",
         "lat": 15.41666666,
         "lng": -61.33333333,
-        "cca3": "DMA"
+        "cca3": "DMA",
     },
     {
         "name": "Benin",
@@ -734,7 +734,7 @@ countries = [
         "capital": "Porto-Novo",
         "lat": 9.5,
         "lng": 2.25,
-        "cca3": "BEN"
+        "cca3": "BEN",
     },
     {
         "name": "French Guiana",
@@ -744,7 +744,7 @@ countries = [
         "capital": "Cayenne",
         "lat": 4,
         "lng": -53,
-        "cca3": "GUF"
+        "cca3": "GUF",
     },
     {
         "name": "Belgium",
@@ -754,7 +754,7 @@ countries = [
         "capital": "Brussels",
         "lat": 50.83333333,
         "lng": 4,
-        "cca3": "BEL"
+        "cca3": "BEL",
     },
     {
         "name": "Montserrat",
@@ -764,7 +764,7 @@ countries = [
         "capital": "Plymouth",
         "lat": 16.75,
         "lng": -62.2,
-        "cca3": "MSR"
+        "cca3": "MSR",
     },
     {
         "name": "Togo",
@@ -774,7 +774,7 @@ countries = [
         "capital": "Lome",
         "lat": 8,
         "lng": 1.16666666,
-        "cca3": "TGO"
+        "cca3": "TGO",
     },
     {
         "name": "Germany",
@@ -784,7 +784,7 @@ countries = [
         "capital": "Berlin",
         "lat": 51,
         "lng": 9,
-        "cca3": "DEU"
+        "cca3": "DEU",
     },
     {
         "name": "Guam",
@@ -794,7 +794,7 @@ countries = [
         "capital": "Hagatna",
         "lat": 13.46666666,
         "lng": 144.78333333,
-        "cca3": "GUM"
+        "cca3": "GUM",
     },
     {
         "name": "Sri Lanka",
@@ -804,7 +804,7 @@ countries = [
         "capital": "Colombo",
         "lat": 7,
         "lng": 81,
-        "cca3": "LKA"
+        "cca3": "LKA",
     },
     {
         "name": "South Sudan",
@@ -814,7 +814,7 @@ countries = [
         "capital": "Juba",
         "lat": 7,
         "lng": 30,
-        "cca3": "SSD"
+        "cca3": "SSD",
     },
     {
         "name": "Falkland Islands",
@@ -824,7 +824,7 @@ countries = [
         "capital": "Stanley",
         "lat": -51.75,
         "lng": -59,
-        "cca3": "FLK"
+        "cca3": "FLK",
     },
     {
         "name": "United Kingdom",
@@ -834,7 +834,7 @@ countries = [
         "capital": "London",
         "lat": 54,
         "lng": -2,
-        "cca3": "GBR"
+        "cca3": "GBR",
     },
     {
         "name": "Guyana",
@@ -844,7 +844,7 @@ countries = [
         "capital": "Georgetown",
         "lat": 5,
         "lng": -59,
-        "cca3": "GUY"
+        "cca3": "GUY",
     },
     {
         "name": "Costa Rica",
@@ -854,7 +854,7 @@ countries = [
         "capital": "San Jose",
         "lat": 10,
         "lng": -84,
-        "cca3": "CRI"
+        "cca3": "CRI",
     },
     {
         "name": "Cameroon",
@@ -864,7 +864,7 @@ countries = [
         "capital": "Yaounde",
         "lat": 6,
         "lng": 12,
-        "cca3": "CMR"
+        "cca3": "CMR",
     },
     {
         "name": "Morocco",
@@ -874,7 +874,7 @@ countries = [
         "capital": "Rabat",
         "lat": 32,
         "lng": -5,
-        "cca3": "MAR"
+        "cca3": "MAR",
     },
     {
         "name": "Northern Mariana Islands",
@@ -884,7 +884,7 @@ countries = [
         "capital": "Saipan",
         "lat": 15.2,
         "lng": 145.75,
-        "cca3": "MNP"
+        "cca3": "MNP",
     },
     {
         "name": "Lesotho",
@@ -894,7 +894,7 @@ countries = [
         "capital": "Maseru",
         "lat": -29.5,
         "lng": 28.5,
-        "cca3": "LSO"
+        "cca3": "LSO",
     },
     {
         "name": "Hungary",
@@ -904,7 +904,7 @@ countries = [
         "capital": "Budapest",
         "lat": 47,
         "lng": 20,
-        "cca3": "HUN"
+        "cca3": "HUN",
     },
     {
         "name": "Turkmenistan",
@@ -914,7 +914,7 @@ countries = [
         "capital": "Ashgabat",
         "lat": 40,
         "lng": 60,
-        "cca3": "TKM"
+        "cca3": "TKM",
     },
     {
         "name": "Suriname",
@@ -924,7 +924,7 @@ countries = [
         "capital": "Paramaribo",
         "lat": 4,
         "lng": -56,
-        "cca3": "SUR"
+        "cca3": "SUR",
     },
     {
         "name": "Netherlands",
@@ -934,7 +934,7 @@ countries = [
         "capital": "Amsterdam",
         "lat": 52.5,
         "lng": 5.75,
-        "cca3": "NLD"
+        "cca3": "NLD",
     },
     {
         "name": "Bermuda",
@@ -944,7 +944,7 @@ countries = [
         "capital": "Hamilton",
         "lat": 32.33333333,
         "lng": -64.75,
-        "cca3": "BMU"
+        "cca3": "BMU",
     },
     {
         "name": "Heard Island and McDonald Islands",
@@ -954,7 +954,7 @@ countries = [
         "capital": "",
         "lat": -53.1,
         "lng": 72.51666666,
-        "cca3": "HMD"
+        "cca3": "HMD",
     },
     {
         "name": "Chad",
@@ -964,7 +964,7 @@ countries = [
         "capital": "N'Djamena",
         "lat": 15,
         "lng": 19,
-        "cca3": "TCD"
+        "cca3": "TCD",
     },
     {
         "name": "Georgia",
@@ -974,7 +974,7 @@ countries = [
         "capital": "Tbilisi",
         "lat": 42,
         "lng": 43.5,
-        "cca3": "GEO"
+        "cca3": "GEO",
     },
     {
         "name": "Montenegro",
@@ -984,7 +984,7 @@ countries = [
         "capital": "Podgorica",
         "lat": 42.5,
         "lng": 19.3,
-        "cca3": "MNE"
+        "cca3": "MNE",
     },
     {
         "name": "Mongolia",
@@ -994,7 +994,7 @@ countries = [
         "capital": "Ulan Bator",
         "lat": 46,
         "lng": 105,
-        "cca3": "MNG"
+        "cca3": "MNG",
     },
     {
         "name": "Marshall Islands",
@@ -1004,7 +1004,7 @@ countries = [
         "capital": "Majuro",
         "lat": 9,
         "lng": 168,
-        "cca3": "MHL"
+        "cca3": "MHL",
     },
     {
         "name": "Martinique",
@@ -1014,7 +1014,7 @@ countries = [
         "capital": "Fort-de-France",
         "lat": 14.666667,
         "lng": -61,
-        "cca3": "MTQ"
+        "cca3": "MTQ",
     },
     {
         "name": "Belize",
@@ -1024,7 +1024,7 @@ countries = [
         "capital": "Belmopan",
         "lat": 17.25,
         "lng": -88.75,
-        "cca3": "BLZ"
+        "cca3": "BLZ",
     },
     {
         "name": "Norfolk Island",
@@ -1034,7 +1034,7 @@ countries = [
         "capital": "Kingston",
         "lat": -29.03333333,
         "lng": 167.95,
-        "cca3": "NFK"
+        "cca3": "NFK",
     },
     {
         "name": "Myanmar",
@@ -1044,7 +1044,7 @@ countries = [
         "capital": "Naypyidaw",
         "lat": 22,
         "lng": 98,
-        "cca3": "MMR"
+        "cca3": "MMR",
     },
     {
         "name": "Afghanistan",
@@ -1054,7 +1054,7 @@ countries = [
         "capital": "Kabul",
         "lat": 33,
         "lng": 65,
-        "cca3": "AFG"
+        "cca3": "AFG",
     },
     {
         "name": "Burundi",
@@ -1064,7 +1064,7 @@ countries = [
         "capital": "Bujumbura",
         "lat": -3.5,
         "lng": 30,
-        "cca3": "BDI"
+        "cca3": "BDI",
     },
     {
         "name": "British Virgin Islands",
@@ -1074,7 +1074,7 @@ countries = [
         "capital": "Road Town",
         "lat": 18.431383,
         "lng": -64.62305,
-        "cca3": "VGB"
+        "cca3": "VGB",
     },
     {
         "name": "Belarus",
@@ -1084,7 +1084,7 @@ countries = [
         "capital": "Minsk",
         "lat": 53,
         "lng": 28,
-        "cca3": "BLR"
+        "cca3": "BLR",
     },
     {
         "name": "Saint Barthelemy",
@@ -1094,7 +1094,7 @@ countries = [
         "capital": "Gustavia",
         "lat": 18.5,
         "lng": -63.41666666,
-        "cca3": "BLM"
+        "cca3": "BLM",
     },
     {
         "name": "Grenada",
@@ -1104,7 +1104,7 @@ countries = [
         "capital": "St. George's",
         "lat": 12.11666666,
         "lng": -61.66666666,
-        "cca3": "GRD"
+        "cca3": "GRD",
     },
     {
         "name": "Tokelau",
@@ -1114,7 +1114,7 @@ countries = [
         "capital": "Fakaofo",
         "lat": -9,
         "lng": -172,
-        "cca3": "TKL"
+        "cca3": "TKL",
     },
     {
         "name": "Greece",
@@ -1124,7 +1124,7 @@ countries = [
         "capital": "Athens",
         "lat": 39,
         "lng": 22,
-        "cca3": "GRC"
+        "cca3": "GRC",
     },
     {
         "name": "Russia",
@@ -1134,7 +1134,7 @@ countries = [
         "capital": "Moscow",
         "lat": 60,
         "lng": 100,
-        "cca3": "RUS"
+        "cca3": "RUS",
     },
     {
         "name": "Greenland",
@@ -1144,7 +1144,7 @@ countries = [
         "capital": "Nuuk",
         "lat": 72,
         "lng": -40,
-        "cca3": "GRL"
+        "cca3": "GRL",
     },
     {
         "name": "Andorra",
@@ -1154,7 +1154,7 @@ countries = [
         "capital": "Andorra la Vella",
         "lat": 42.5,
         "lng": 1.5,
-        "cca3": "AND"
+        "cca3": "AND",
     },
     {
         "name": "Mozambique",
@@ -1164,7 +1164,7 @@ countries = [
         "capital": "Maputo",
         "lat": -18.25,
         "lng": 35,
-        "cca3": "MOZ"
+        "cca3": "MOZ",
     },
     {
         "name": "Tajikistan",
@@ -1174,7 +1174,7 @@ countries = [
         "capital": "Dushanbe",
         "lat": 39,
         "lng": 71,
-        "cca3": "TJK"
+        "cca3": "TJK",
     },
     {
         "name": "Haiti",
@@ -1184,7 +1184,7 @@ countries = [
         "capital": "Port-au-Prince",
         "lat": 19,
         "lng": -72.41666666,
-        "cca3": "HTI"
+        "cca3": "HTI",
     },
     {
         "name": "Mexico",
@@ -1194,7 +1194,7 @@ countries = [
         "capital": "Mexico City",
         "lat": 23,
         "lng": -102,
-        "cca3": "MEX"
+        "cca3": "MEX",
     },
     {
         "name": "Zimbabwe",
@@ -1204,7 +1204,7 @@ countries = [
         "capital": "Harare",
         "lat": -20,
         "lng": 30,
-        "cca3": "ZWE"
+        "cca3": "ZWE",
     },
     {
         "name": "Saint Lucia",
@@ -1214,7 +1214,7 @@ countries = [
         "capital": "Castries",
         "lat": 13.88333333,
         "lng": -60.96666666,
-        "cca3": "LCA"
+        "cca3": "LCA",
     },
     {
         "name": "India",
@@ -1224,7 +1224,7 @@ countries = [
         "capital": "New Delhi",
         "lat": 20,
         "lng": 77,
-        "cca3": "IND"
+        "cca3": "IND",
     },
     {
         "name": "Latvia",
@@ -1234,7 +1234,7 @@ countries = [
         "capital": "Riga",
         "lat": 57,
         "lng": 25,
-        "cca3": "LVA"
+        "cca3": "LVA",
     },
     {
         "name": "Bhutan",
@@ -1244,7 +1244,7 @@ countries = [
         "capital": "Thimphu",
         "lat": 27.5,
         "lng": 90.5,
-        "cca3": "BTN"
+        "cca3": "BTN",
     },
     {
         "name": "Saint Vincent and the Grenadines",
@@ -1254,7 +1254,7 @@ countries = [
         "capital": "Kingstown",
         "lat": 13.25,
         "lng": -61.2,
-        "cca3": "VCT"
+        "cca3": "VCT",
     },
     {
         "name": "Vietnam",
@@ -1264,7 +1264,7 @@ countries = [
         "capital": "Hanoi",
         "lat": 16.16666666,
         "lng": 107.83333333,
-        "cca3": "VNM"
+        "cca3": "VNM",
     },
     {
         "name": "Norway",
@@ -1274,7 +1274,7 @@ countries = [
         "capital": "Oslo",
         "lat": 62,
         "lng": 10,
-        "cca3": "NOR"
+        "cca3": "NOR",
     },
     {
         "name": "Czech Republic",
@@ -1284,7 +1284,7 @@ countries = [
         "capital": "Prague",
         "lat": 49.75,
         "lng": 15.5,
-        "cca3": "CZE"
+        "cca3": "CZE",
     },
     {
         "name": "French Southern and Antarctic Lands",
@@ -1294,7 +1294,7 @@ countries = [
         "capital": "Port-aux-Francais",
         "lat": -49.25,
         "lng": 69.167,
-        "cca3": "ATF"
+        "cca3": "ATF",
     },
     {
         "name": "Antigua and Barbuda",
@@ -1304,7 +1304,7 @@ countries = [
         "capital": "Saint John's",
         "lat": 17.05,
         "lng": -61.8,
-        "cca3": "ATG"
+        "cca3": "ATG",
     },
     {
         "name": "Fiji",
@@ -1314,7 +1314,7 @@ countries = [
         "capital": "Suva",
         "lat": -18,
         "lng": 175,
-        "cca3": "FJI"
+        "cca3": "FJI",
     },
     {
         "name": "British Indian Ocean Territory",
@@ -1324,7 +1324,7 @@ countries = [
         "capital": "Diego Garcia",
         "lat": -6,
         "lng": 71.5,
-        "cca3": "IOT"
+        "cca3": "IOT",
     },
     {
         "name": "Honduras",
@@ -1334,7 +1334,7 @@ countries = [
         "capital": "Tegucigalpa",
         "lat": 15,
         "lng": -86.5,
-        "cca3": "HND"
+        "cca3": "HND",
     },
     {
         "name": "Mauritius",
@@ -1344,7 +1344,7 @@ countries = [
         "capital": "Port Louis",
         "lat": -20.28333333,
         "lng": 57.55,
-        "cca3": "MUS"
+        "cca3": "MUS",
     },
     {
         "name": "Antarctica",
@@ -1354,7 +1354,7 @@ countries = [
         "capital": "",
         "lat": -90,
         "lng": 0,
-        "cca3": "ATA"
+        "cca3": "ATA",
     },
     {
         "name": "Luxembourg",
@@ -1364,7 +1364,7 @@ countries = [
         "capital": "Luxembourg",
         "lat": 49.75,
         "lng": 6.16666666,
-        "cca3": "LUX"
+        "cca3": "LUX",
     },
     {
         "name": "Israel",
@@ -1374,7 +1374,7 @@ countries = [
         "capital": "Jerusalem",
         "lat": 31.47,
         "lng": 35.13,
-        "cca3": "ISR"
+        "cca3": "ISR",
     },
     {
         "name": "Micronesia",
@@ -1384,7 +1384,7 @@ countries = [
         "capital": "Palikir",
         "lat": 6.91666666,
         "lng": 158.25,
-        "cca3": "FSM"
+        "cca3": "FSM",
     },
     {
         "name": "Peru",
@@ -1394,7 +1394,7 @@ countries = [
         "capital": "Lima",
         "lat": -10,
         "lng": -76,
-        "cca3": "PER"
+        "cca3": "PER",
     },
     {
         "name": "Reunion",
@@ -1404,7 +1404,7 @@ countries = [
         "capital": "Saint-Denis",
         "lat": -21.15,
         "lng": 55.5,
-        "cca3": "REU"
+        "cca3": "REU",
     },
     {
         "name": "Indonesia",
@@ -1414,7 +1414,7 @@ countries = [
         "capital": "Jakarta",
         "lat": -5,
         "lng": 120,
-        "cca3": "IDN"
+        "cca3": "IDN",
     },
     {
         "name": "Vanuatu",
@@ -1424,7 +1424,7 @@ countries = [
         "capital": "Port Vila",
         "lat": -16,
         "lng": 167,
-        "cca3": "VUT"
+        "cca3": "VUT",
     },
     {
         "name": "Macedonia",
@@ -1434,7 +1434,7 @@ countries = [
         "capital": "Skopje",
         "lat": 41.83333333,
         "lng": 22,
-        "cca3": "MKD"
+        "cca3": "MKD",
     },
     {
         "name": "DR Congo",
@@ -1444,7 +1444,7 @@ countries = [
         "capital": "Kinshasa",
         "lat": 0,
         "lng": 25,
-        "cca3": "COD"
+        "cca3": "COD",
     },
     {
         "name": "Republic of the Congo",
@@ -1454,7 +1454,7 @@ countries = [
         "capital": "Brazzaville",
         "lat": -1,
         "lng": 15,
-        "cca3": "COG"
+        "cca3": "COG",
     },
     {
         "name": "Iceland",
@@ -1464,7 +1464,7 @@ countries = [
         "capital": "Reykjavik",
         "lat": 65,
         "lng": -18,
-        "cca3": "ISL"
+        "cca3": "ISL",
     },
     {
         "name": "Guadeloupe",
@@ -1474,7 +1474,7 @@ countries = [
         "capital": "Basse-Terre",
         "lat": 16.25,
         "lng": -61.583333,
-        "cca3": "GLP"
+        "cca3": "GLP",
     },
     {
         "name": "Cook Islands",
@@ -1484,7 +1484,7 @@ countries = [
         "capital": "Avarua",
         "lat": -21.23333333,
         "lng": -159.76666666,
-        "cca3": "COK"
+        "cca3": "COK",
     },
     {
         "name": "Comoros",
@@ -1494,7 +1494,7 @@ countries = [
         "capital": "Moroni",
         "lat": -12.16666666,
         "lng": 44.25,
-        "cca3": "COM"
+        "cca3": "COM",
     },
     {
         "name": "Colombia",
@@ -1504,7 +1504,7 @@ countries = [
         "capital": "Bogota",
         "lat": 4,
         "lng": -72,
-        "cca3": "COL"
+        "cca3": "COL",
     },
     {
         "name": "Nigeria",
@@ -1514,7 +1514,7 @@ countries = [
         "capital": "Abuja",
         "lat": 10,
         "lng": 8,
-        "cca3": "NGA"
+        "cca3": "NGA",
     },
     {
         "name": "Timor-Leste",
@@ -1524,7 +1524,7 @@ countries = [
         "capital": "Dili",
         "lat": -8.83333333,
         "lng": 125.91666666,
-        "cca3": "TLS"
+        "cca3": "TLS",
     },
     {
         "name": "Taiwan",
@@ -1534,7 +1534,7 @@ countries = [
         "capital": "Taipei",
         "lat": 23.5,
         "lng": 121,
-        "cca3": "TWN"
+        "cca3": "TWN",
     },
     {
         "name": "Portugal",
@@ -1544,7 +1544,7 @@ countries = [
         "capital": "Lisbon",
         "lat": 39.5,
         "lng": -8,
-        "cca3": "PRT"
+        "cca3": "PRT",
     },
     {
         "name": "Moldova",
@@ -1554,7 +1554,7 @@ countries = [
         "capital": "Chisinau",
         "lat": 47,
         "lng": 29,
-        "cca3": "MDA"
+        "cca3": "MDA",
     },
     {
         "name": "Guernsey",
@@ -1564,7 +1564,7 @@ countries = [
         "capital": "St. Peter Port",
         "lat": 49.46666666,
         "lng": -2.58333333,
-        "cca3": "GGY"
+        "cca3": "GGY",
     },
     {
         "name": "Madagascar",
@@ -1574,7 +1574,7 @@ countries = [
         "capital": "Antananarivo",
         "lat": -20,
         "lng": 47,
-        "cca3": "MDG"
+        "cca3": "MDG",
     },
     {
         "name": "Ecuador",
@@ -1584,7 +1584,7 @@ countries = [
         "capital": "Quito",
         "lat": -2,
         "lng": -77.5,
-        "cca3": "ECU"
+        "cca3": "ECU",
     },
     {
         "name": "Senegal",
@@ -1594,7 +1594,7 @@ countries = [
         "capital": "Dakar",
         "lat": 14,
         "lng": -14,
-        "cca3": "SEN"
+        "cca3": "SEN",
     },
     {
         "name": "New Zealand",
@@ -1604,7 +1604,7 @@ countries = [
         "capital": "Wellington",
         "lat": -41,
         "lng": 174,
-        "cca3": "NZL"
+        "cca3": "NZL",
     },
     {
         "name": "Maldives",
@@ -1614,7 +1614,7 @@ countries = [
         "capital": "Male",
         "lat": 3.25,
         "lng": 73,
-        "cca3": "MDV"
+        "cca3": "MDV",
     },
     {
         "name": "American Samoa",
@@ -1624,7 +1624,7 @@ countries = [
         "capital": "Pago Pago",
         "lat": -14.33333333,
         "lng": -170,
-        "cca3": "ASM"
+        "cca3": "ASM",
     },
     {
         "name": "Saint Pierre and Miquelon",
@@ -1634,7 +1634,7 @@ countries = [
         "capital": "Saint-Pierre",
         "lat": 46.83333333,
         "lng": -56.33333333,
-        "cca3": "SPM"
+        "cca3": "SPM",
     },
     {
         "name": "Curacao",
@@ -1644,7 +1644,7 @@ countries = [
         "capital": "Willemstad",
         "lat": 12.116667,
         "lng": -68.933333,
-        "cca3": "CUW"
+        "cca3": "CUW",
     },
     {
         "name": "France",
@@ -1654,7 +1654,7 @@ countries = [
         "capital": "Paris",
         "lat": 46,
         "lng": 2,
-        "cca3": "FRA"
+        "cca3": "FRA",
     },
     {
         "name": "Lithuania",
@@ -1664,7 +1664,7 @@ countries = [
         "capital": "Vilnius",
         "lat": 56,
         "lng": 24,
-        "cca3": "LTU"
+        "cca3": "LTU",
     },
     {
         "name": "Rwanda",
@@ -1674,7 +1674,7 @@ countries = [
         "capital": "Kigali",
         "lat": -2,
         "lng": 30,
-        "cca3": "RWA"
+        "cca3": "RWA",
     },
     {
         "name": "Zambia",
@@ -1684,7 +1684,7 @@ countries = [
         "capital": "Lusaka",
         "lat": -15,
         "lng": 30,
-        "cca3": "ZMB"
+        "cca3": "ZMB",
     },
     {
         "name": "Gambia",
@@ -1694,7 +1694,7 @@ countries = [
         "capital": "Banjul",
         "lat": 13.46666666,
         "lng": -16.56666666,
-        "cca3": "GMB"
+        "cca3": "GMB",
     },
     {
         "name": "Wallis and Futuna",
@@ -1704,7 +1704,7 @@ countries = [
         "capital": "Mata-Utu",
         "lat": -13.3,
         "lng": -176.2,
-        "cca3": "WLF"
+        "cca3": "WLF",
     },
     {
         "name": "Jersey",
@@ -1714,7 +1714,7 @@ countries = [
         "capital": "Saint Helier",
         "lat": 49.25,
         "lng": -2.16666666,
-        "cca3": "JEY"
+        "cca3": "JEY",
     },
     {
         "name": "Faroe Islands",
@@ -1724,7 +1724,7 @@ countries = [
         "capital": "Torshavn",
         "lat": 62,
         "lng": -7,
-        "cca3": "FRO"
+        "cca3": "FRO",
     },
     {
         "name": "Guatemala",
@@ -1734,7 +1734,7 @@ countries = [
         "capital": "Guatemala City",
         "lat": 15.5,
         "lng": -90.25,
-        "cca3": "GTM"
+        "cca3": "GTM",
     },
     {
         "name": "Denmark",
@@ -1744,7 +1744,7 @@ countries = [
         "capital": "Copenhagen",
         "lat": 56,
         "lng": 10,
-        "cca3": "DNK"
+        "cca3": "DNK",
     },
     {
         "name": "Isle of Man",
@@ -1754,7 +1754,7 @@ countries = [
         "capital": "Douglas",
         "lat": 54.25,
         "lng": -4.5,
-        "cca3": "IMN"
+        "cca3": "IMN",
     },
     {
         "name": "Australia",
@@ -1764,7 +1764,7 @@ countries = [
         "capital": "Canberra",
         "lat": -27,
         "lng": 133,
-        "cca3": "AUS"
+        "cca3": "AUS",
     },
     {
         "name": "Austria",
@@ -1774,7 +1774,7 @@ countries = [
         "capital": "Vienna",
         "lat": 47.33333333,
         "lng": 13.33333333,
-        "cca3": "AUT"
+        "cca3": "AUT",
     },
     {
         "name": "Svalbard and Jan Mayen",
@@ -1784,7 +1784,7 @@ countries = [
         "capital": "Longyearbyen",
         "lat": 78,
         "lng": 20,
-        "cca3": "SJM"
+        "cca3": "SJM",
     },
     {
         "name": "Venezuela",
@@ -1794,7 +1794,7 @@ countries = [
         "capital": "Caracas",
         "lat": 8,
         "lng": -66,
-        "cca3": "VEN"
+        "cca3": "VEN",
     },
     {
         "name": "Kosovo",
@@ -1804,7 +1804,7 @@ countries = [
         "capital": "Pristina",
         "lat": 42.666667,
         "lng": 21.166667,
-        "cca3": "UNK"
+        "cca3": "UNK",
     },
     {
         "name": "Palau",
@@ -1814,7 +1814,7 @@ countries = [
         "capital": "Ngerulmud",
         "lat": 7.5,
         "lng": 134.5,
-        "cca3": "PLW"
+        "cca3": "PLW",
     },
     {
         "name": "Kenya",
@@ -1824,7 +1824,7 @@ countries = [
         "capital": "Nairobi",
         "lat": 1,
         "lng": 38,
-        "cca3": "KEN"
+        "cca3": "KEN",
     },
     {
         "name": "Samoa",
@@ -1834,7 +1834,7 @@ countries = [
         "capital": "Apia",
         "lat": -13.58333333,
         "lng": -172.33333333,
-        "cca3": "WSM"
+        "cca3": "WSM",
     },
     {
         "name": "Turkey",
@@ -1844,7 +1844,7 @@ countries = [
         "capital": "Ankara",
         "lat": 39,
         "lng": 35,
-        "cca3": "TUR"
+        "cca3": "TUR",
     },
     {
         "name": "Albania",
@@ -1854,7 +1854,7 @@ countries = [
         "capital": "Tirana",
         "lat": 41,
         "lng": 20,
-        "cca3": "ALB"
+        "cca3": "ALB",
     },
     {
         "name": "Oman",
@@ -1864,7 +1864,7 @@ countries = [
         "capital": "Muscat",
         "lat": 21,
         "lng": 57,
-        "cca3": "OMN"
+        "cca3": "OMN",
     },
     {
         "name": "Tuvalu",
@@ -1874,7 +1874,7 @@ countries = [
         "capital": "Funafuti",
         "lat": -8,
         "lng": 178,
-        "cca3": "TUV"
+        "cca3": "TUV",
     },
     {
         "name": "Aland Islands",
@@ -1884,7 +1884,7 @@ countries = [
         "capital": "Mariehamn",
         "lat": 60.116667,
         "lng": 19.9,
-        "cca3": "ALA"
+        "cca3": "ALA",
     },
     {
         "name": "Brunei",
@@ -1894,7 +1894,7 @@ countries = [
         "capital": "Bandar Seri Begawan",
         "lat": 4.5,
         "lng": 114.66666666,
-        "cca3": "BRN"
+        "cca3": "BRN",
     },
     {
         "name": "Tunisia",
@@ -1904,7 +1904,7 @@ countries = [
         "capital": "Tunis",
         "lat": 34,
         "lng": 9,
-        "cca3": "TUN"
+        "cca3": "TUN",
     },
     {
         "name": "Pitcairn Islands",
@@ -1914,7 +1914,7 @@ countries = [
         "capital": "Adamstown",
         "lat": -25.06666666,
         "lng": -130.1,
-        "cca3": "PCN"
+        "cca3": "PCN",
     },
     {
         "name": "Barbados",
@@ -1924,7 +1924,7 @@ countries = [
         "capital": "Bridgetown",
         "lat": 13.16666666,
         "lng": -59.53333333,
-        "cca3": "BRB"
+        "cca3": "BRB",
     },
     {
         "name": "Brazil",
@@ -1934,7 +1934,7 @@ countries = [
         "capital": "Brasilia",
         "lat": -10,
         "lng": -55,
-        "cca3": "BRA"
+        "cca3": "BRA",
     },
     {
         "name": "Ivory Coast",
@@ -1944,7 +1944,7 @@ countries = [
         "capital": "Yamoussoukro",
         "lat": 8,
         "lng": -5,
-        "cca3": "CIV"
+        "cca3": "CIV",
     },
     {
         "name": "Serbia",
@@ -1954,7 +1954,7 @@ countries = [
         "capital": "Belgrade",
         "lat": 44,
         "lng": 21,
-        "cca3": "SRB"
+        "cca3": "SRB",
     },
     {
         "name": "Equatorial Guinea",
@@ -1964,7 +1964,7 @@ countries = [
         "capital": "Malabo",
         "lat": 2,
         "lng": 10,
-        "cca3": "GNQ"
+        "cca3": "GNQ",
     },
     {
         "name": "United States",
@@ -1974,7 +1974,7 @@ countries = [
         "capital": "Washington D.C.",
         "lat": 38,
         "lng": -97,
-        "cca3": "USA"
+        "cca3": "USA",
     },
     {
         "name": "Qatar",
@@ -1984,7 +1984,7 @@ countries = [
         "capital": "Doha",
         "lat": 25.5,
         "lng": 51.25,
-        "cca3": "QAT"
+        "cca3": "QAT",
     },
     {
         "name": "Sweden",
@@ -1994,7 +1994,7 @@ countries = [
         "capital": "Stockholm",
         "lat": 62,
         "lng": 15,
-        "cca3": "SWE"
+        "cca3": "SWE",
     },
     {
         "name": "Azerbaijan",
@@ -2004,7 +2004,7 @@ countries = [
         "capital": "Baku",
         "lat": 40.5,
         "lng": 47.5,
-        "cca3": "AZE"
+        "cca3": "AZE",
     },
     {
         "name": "Guinea-Bissau",
@@ -2014,7 +2014,7 @@ countries = [
         "capital": "Bissau",
         "lat": 12,
         "lng": -15,
-        "cca3": "GNB"
+        "cca3": "GNB",
     },
     {
         "name": "Swaziland",
@@ -2024,7 +2024,7 @@ countries = [
         "capital": "Lobamba",
         "lat": -26.5,
         "lng": 31.5,
-        "cca3": "SWZ"
+        "cca3": "SWZ",
     },
     {
         "name": "Tonga",
@@ -2034,7 +2034,7 @@ countries = [
         "capital": "Nuku'alofa",
         "lat": -20,
         "lng": -175,
-        "cca3": "TON"
+        "cca3": "TON",
     },
     {
         "name": "Canada",
@@ -2044,7 +2044,7 @@ countries = [
         "capital": "Ottawa",
         "lat": 60,
         "lng": -95,
-        "cca3": "CAN"
+        "cca3": "CAN",
     },
     {
         "name": "Ukraine",
@@ -2054,7 +2054,7 @@ countries = [
         "capital": "Kiev",
         "lat": 49,
         "lng": 32,
-        "cca3": "UKR"
+        "cca3": "UKR",
     },
     {
         "name": "South Korea",
@@ -2064,7 +2064,7 @@ countries = [
         "capital": "Seoul",
         "lat": 37,
         "lng": 127.5,
-        "cca3": "KOR"
+        "cca3": "KOR",
     },
     {
         "name": "Anguilla",
@@ -2074,7 +2074,7 @@ countries = [
         "capital": "The Valley",
         "lat": 18.25,
         "lng": -63.16666666,
-        "cca3": "AIA"
+        "cca3": "AIA",
     },
     {
         "name": "Central African Republic",
@@ -2084,7 +2084,7 @@ countries = [
         "capital": "Bangui",
         "lat": 7,
         "lng": 21,
-        "cca3": "CAF"
+        "cca3": "CAF",
     },
     {
         "name": "Slovakia",
@@ -2094,7 +2094,7 @@ countries = [
         "capital": "Bratislava",
         "lat": 48.66666666,
         "lng": 19.5,
-        "cca3": "SVK"
+        "cca3": "SVK",
     },
     {
         "name": "Cyprus",
@@ -2104,7 +2104,7 @@ countries = [
         "capital": "Nicosia",
         "lat": 35,
         "lng": 33,
-        "cca3": "CYP"
+        "cca3": "CYP",
     },
     {
         "name": "Bosnia and Herzegovina",
@@ -2114,7 +2114,7 @@ countries = [
         "capital": "Sarajevo",
         "lat": 44,
         "lng": 18,
-        "cca3": "BIH"
+        "cca3": "BIH",
     },
     {
         "name": "Singapore",
@@ -2124,7 +2124,7 @@ countries = [
         "capital": "Singapore",
         "lat": 1.36666666,
         "lng": 103.8,
-        "cca3": "SGP"
+        "cca3": "SGP",
     },
     {
         "name": "South Georgia",
@@ -2134,7 +2134,7 @@ countries = [
         "capital": "King Edward Point",
         "lat": -54.5,
         "lng": -37,
-        "cca3": "SGS"
+        "cca3": "SGS",
     },
     {
         "name": "Somalia",
@@ -2144,7 +2144,7 @@ countries = [
         "capital": "Mogadishu",
         "lat": 10,
         "lng": 49,
-        "cca3": "SOM"
+        "cca3": "SOM",
     },
     {
         "name": "Uzbekistan",
@@ -2154,7 +2154,7 @@ countries = [
         "capital": "Tashkent",
         "lat": 41,
         "lng": 64,
-        "cca3": "UZB"
+        "cca3": "UZB",
     },
     {
         "name": "Eritrea",
@@ -2164,7 +2164,7 @@ countries = [
         "capital": "Asmara",
         "lat": 15,
         "lng": 39,
-        "cca3": "ERI"
+        "cca3": "ERI",
     },
     {
         "name": "Poland",
@@ -2174,7 +2174,7 @@ countries = [
         "capital": "Warsaw",
         "lat": 52,
         "lng": 20,
-        "cca3": "POL"
+        "cca3": "POL",
     },
     {
         "name": "Kuwait",
@@ -2184,7 +2184,7 @@ countries = [
         "capital": "Kuwait City",
         "lat": 29.5,
         "lng": 45.75,
-        "cca3": "KWT"
+        "cca3": "KWT",
     },
     {
         "name": "Gabon",
@@ -2194,7 +2194,7 @@ countries = [
         "capital": "Libreville",
         "lat": -1,
         "lng": 11.75,
-        "cca3": "GAB"
+        "cca3": "GAB",
     },
     {
         "name": "Cayman Islands",
@@ -2204,7 +2204,7 @@ countries = [
         "capital": "George Town",
         "lat": 19.5,
         "lng": -80.5,
-        "cca3": "CYM"
+        "cca3": "CYM",
     },
     {
         "name": "Vatican City",
@@ -2214,7 +2214,7 @@ countries = [
         "capital": "Vatican City",
         "lat": 41.9,
         "lng": 12.45,
-        "cca3": "VAT"
+        "cca3": "VAT",
     },
     {
         "name": "Estonia",
@@ -2224,7 +2224,7 @@ countries = [
         "capital": "Tallinn",
         "lat": 59,
         "lng": 26,
-        "cca3": "EST"
+        "cca3": "EST",
     },
     {
         "name": "Malawi",
@@ -2234,7 +2234,7 @@ countries = [
         "capital": "Lilongwe",
         "lat": -13.5,
         "lng": 34,
-        "cca3": "MWI"
+        "cca3": "MWI",
     },
     {
         "name": "Spain",
@@ -2244,7 +2244,7 @@ countries = [
         "capital": "Madrid",
         "lat": 40,
         "lng": -4,
-        "cca3": "ESP"
+        "cca3": "ESP",
     },
     {
         "name": "Iraq",
@@ -2254,7 +2254,7 @@ countries = [
         "capital": "Baghdad",
         "lat": 33,
         "lng": 44,
-        "cca3": "IRQ"
+        "cca3": "IRQ",
     },
     {
         "name": "El Salvador",
@@ -2264,7 +2264,7 @@ countries = [
         "capital": "San Salvador",
         "lat": 13.83333333,
         "lng": -88.91666666,
-        "cca3": "SLV"
+        "cca3": "SLV",
     },
     {
         "name": "Mali",
@@ -2274,7 +2274,7 @@ countries = [
         "capital": "Bamako",
         "lat": 17,
         "lng": -4,
-        "cca3": "MLI"
+        "cca3": "MLI",
     },
     {
         "name": "Ireland",
@@ -2284,7 +2284,7 @@ countries = [
         "capital": "Dublin",
         "lat": 53,
         "lng": -8,
-        "cca3": "IRL"
+        "cca3": "IRL",
     },
     {
         "name": "Iran",
@@ -2294,7 +2294,7 @@ countries = [
         "capital": "Tehran",
         "lat": 32,
         "lng": 53,
-        "cca3": "IRN"
+        "cca3": "IRN",
     },
     {
         "name": "Aruba",
@@ -2304,7 +2304,7 @@ countries = [
         "capital": "Oranjestad",
         "lat": 12.5,
         "lng": -69.96666666,
-        "cca3": "ABW"
+        "cca3": "ABW",
     },
     {
         "name": "Papua New Guinea",
@@ -2314,7 +2314,7 @@ countries = [
         "capital": "Port Moresby",
         "lat": -6,
         "lng": 147,
-        "cca3": "PNG"
+        "cca3": "PNG",
     },
     {
         "name": "Panama",
@@ -2324,7 +2324,7 @@ countries = [
         "capital": "Panama City",
         "lat": 9,
         "lng": -80,
-        "cca3": "PAN"
+        "cca3": "PAN",
     },
     {
         "name": "Sudan",
@@ -2334,7 +2334,7 @@ countries = [
         "capital": "Khartoum",
         "lat": 15,
         "lng": 30,
-        "cca3": "SDN"
+        "cca3": "SDN",
     },
     {
         "name": "Solomon Islands",
@@ -2344,7 +2344,7 @@ countries = [
         "capital": "Honiara",
         "lat": -8,
         "lng": 159,
-        "cca3": "SLB"
+        "cca3": "SLB",
     },
     {
         "name": "Western Sahara",
@@ -2354,7 +2354,7 @@ countries = [
         "capital": "El Aaiun",
         "lat": 24.5,
         "lng": -13,
-        "cca3": "ESH"
+        "cca3": "ESH",
     },
     {
         "name": "Monaco",
@@ -2364,7 +2364,7 @@ countries = [
         "capital": "Monaco",
         "lat": 43.73333333,
         "lng": 7.4,
-        "cca3": "MCO"
+        "cca3": "MCO",
     },
     {
         "name": "Italy",
@@ -2374,7 +2374,7 @@ countries = [
         "capital": "Rome",
         "lat": 42.83333333,
         "lng": 12.83333333,
-        "cca3": "ITA"
+        "cca3": "ITA",
     },
     {
         "name": "Japan",
@@ -2384,7 +2384,7 @@ countries = [
         "capital": "Tokyo",
         "lat": 36,
         "lng": 138,
-        "cca3": "JPN"
+        "cca3": "JPN",
     },
     {
         "name": "Kyrgyzstan",
@@ -2394,7 +2394,7 @@ countries = [
         "capital": "Bishkek",
         "lat": 41,
         "lng": 75,
-        "cca3": "KGZ"
+        "cca3": "KGZ",
     },
     {
         "name": "Uganda",
@@ -2404,7 +2404,7 @@ countries = [
         "capital": "Kampala",
         "lat": 1,
         "lng": 32,
-        "cca3": "UGA"
+        "cca3": "UGA",
     },
     {
         "name": "New Caledonia",
@@ -2414,7 +2414,7 @@ countries = [
         "capital": "Noumea",
         "lat": -21.5,
         "lng": 165.5,
-        "cca3": "NCL"
+        "cca3": "NCL",
     },
     {
         "name": "United Arab Emirates",
@@ -2424,7 +2424,7 @@ countries = [
         "capital": "Abu Dhabi",
         "lat": 24,
         "lng": 54,
-        "cca3": "ARE"
+        "cca3": "ARE",
     },
     {
         "name": "Argentina",
@@ -2434,7 +2434,7 @@ countries = [
         "capital": "Buenos Aires",
         "lat": -34,
         "lng": -64,
-        "cca3": "ARG"
+        "cca3": "ARG",
     },
     {
         "name": "Bahamas",
@@ -2444,7 +2444,7 @@ countries = [
         "capital": "Nassau",
         "lat": 24.25,
         "lng": -76,
-        "cca3": "BHS"
+        "cca3": "BHS",
     },
     {
         "name": "Bahrain",
@@ -2454,7 +2454,7 @@ countries = [
         "capital": "Manama",
         "lat": 26,
         "lng": 50.55,
-        "cca3": "BHR"
+        "cca3": "BHR",
     },
     {
         "name": "Armenia",
@@ -2464,7 +2464,7 @@ countries = [
         "capital": "Yerevan",
         "lat": 40,
         "lng": 45,
-        "cca3": "ARM"
+        "cca3": "ARM",
     },
     {
         "name": "Nauru",
@@ -2474,7 +2474,7 @@ countries = [
         "capital": "Yaren",
         "lat": -0.53333333,
         "lng": 166.91666666,
-        "cca3": "NRU"
+        "cca3": "NRU",
     },
     {
         "name": "Cuba",
@@ -2484,12 +2484,12 @@ countries = [
         "capital": "Havana",
         "lat": 21.5,
         "lng": -80,
-        "cca3": "CUB"
-    }
+        "cca3": "CUB",
+    },
 ]
 
 all_lookups = {}
-lookups = ['cioc', 'cca2', 'cca3', 'name']
+lookups = ["cioc", "cca2", "cca3", "name"]
 for lookup in lookups:
     all_lookups[lookup] = {}
     for country in countries:
diff --git a/superset/data/country_map.py b/superset/data/country_map.py
index 303b85d..d2b12cf 100644
--- a/superset/data/country_map.py
+++ b/superset/data/country_map.py
@@ -36,75 +36,71 @@ from .helpers import (
 def load_country_map_data():
     """Loading data for map with country map"""
     csv_bytes = get_example_data(
-        'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True)
-    data = pd.read_csv(csv_bytes, encoding='utf-8')
-    data['dttm'] = datetime.datetime.now().date()
+        "birth_france_data_for_country_map.csv", is_gzip=False, make_bytes=True
+    )
+    data = pd.read_csv(csv_bytes, encoding="utf-8")
+    data["dttm"] = datetime.datetime.now().date()
     data.to_sql(  # pylint: disable=no-member
-        'birth_france_by_region',
+        "birth_france_by_region",
         db.engine,
-        if_exists='replace',
+        if_exists="replace",
         chunksize=500,
         dtype={
-            'DEPT_ID': String(10),
-            '2003': BigInteger,
-            '2004': BigInteger,
-            '2005': BigInteger,
-            '2006': BigInteger,
-            '2007': BigInteger,
-            '2008': BigInteger,
-            '2009': BigInteger,
-            '2010': BigInteger,
-            '2011': BigInteger,
-            '2012': BigInteger,
-            '2013': BigInteger,
-            '2014': BigInteger,
-            'dttm': Date(),
+            "DEPT_ID": String(10),
+            "2003": BigInteger,
+            "2004": BigInteger,
+            "2005": BigInteger,
+            "2006": BigInteger,
+            "2007": BigInteger,
+            "2008": BigInteger,
+            "2009": BigInteger,
+            "2010": BigInteger,
+            "2011": BigInteger,
+            "2012": BigInteger,
+            "2013": BigInteger,
+            "2014": BigInteger,
+            "dttm": Date(),
         },
-        index=False)
-    print('Done loading table!')
-    print('-' * 80)
-    print('Creating table reference')
-    obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first()
+        index=False,
+    )
+    print("Done loading table!")
+    print("-" * 80)
+    print("Creating table reference")
+    obj = db.session.query(TBL).filter_by(table_name="birth_france_by_region").first()
     if not obj:
-        obj = TBL(table_name='birth_france_by_region')
-    obj.main_dttm_col = 'dttm'
+        obj = TBL(table_name="birth_france_by_region")
+    obj.main_dttm_col = "dttm"
     obj.database = utils.get_or_create_main_db()
-    if not any(col.metric_name == 'avg__2004' for col in obj.metrics):
-        col = str(column('2004').compile(db.engine))
-        obj.metrics.append(SqlMetric(
-            metric_name='avg__2004',
-            expression=f'AVG({col})',
-        ))
+    if not any(col.metric_name == "avg__2004" for col in obj.metrics):
+        col = str(column("2004").compile(db.engine))
+        obj.metrics.append(SqlMetric(metric_name="avg__2004", expression=f"AVG({col})"))
     db.session.merge(obj)
     db.session.commit()
     obj.fetch_metadata()
     tbl = obj
 
     slice_data = {
-        'granularity_sqla': '',
-        'since': '',
-        'until': '',
-        'where': '',
-        'viz_type': 'country_map',
-        'entity': 'DEPT_ID',
-        'metric': {
-            'expressionType': 'SIMPLE',
-            'column': {
-                'type': 'INT',
-                'column_name': '2004',
-            },
-            'aggregate': 'AVG',
-            'label': 'Boys',
-            'optionName': 'metric_112342',
+        "granularity_sqla": "",
+        "since": "",
+        "until": "",
+        "where": "",
+        "viz_type": "country_map",
+        "entity": "DEPT_ID",
+        "metric": {
+            "expressionType": "SIMPLE",
+            "column": {"type": "INT", "column_name": "2004"},
+            "aggregate": "AVG",
+            "label": "Boys",
+            "optionName": "metric_112342",
         },
-        'row_limit': 500000,
+        "row_limit": 500000,
     }
 
-    print('Creating a slice')
+    print("Creating a slice")
     slc = Slice(
-        slice_name='Birth in France by department in 2016',
-        viz_type='country_map',
-        datasource_type='table',
+        slice_name="Birth in France by department in 2016",
+        viz_type="country_map",
+        datasource_type="table",
         datasource_id=tbl.id,
         params=get_slice_json(slice_data),
     )
diff --git a/superset/data/css_templates.py b/superset/data/css_templates.py
index dcee92f..09af029 100644
--- a/superset/data/css_templates.py
+++ b/superset/data/css_templates.py
@@ -22,12 +22,13 @@ from superset.models.core import CssTemplate
 
 def load_css_templates():
     """Loads 2 css templates to demonstrate the feature"""
-    print('Creating default CSS templates')
+    print("Creating default CSS templates")
 
-    obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first()
+    obj = db.session.query(CssTemplate).filter_by(template_name="Flat").first()
     if not obj:
-        obj = CssTemplate(template_name='Flat')
-    css = textwrap.dedent("""\
+        obj = CssTemplate(template_name="Flat")
+    css = textwrap.dedent(
+        """\
     .gridster div.widget {
         transition: background-color 0.5s ease;
         background-color: #FAFAFA;
@@ -58,16 +59,17 @@ def load_css_templates():
         '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
      ];
     */
-    """)
+    """
+    )
     obj.css = css
     db.session.merge(obj)
     db.session.commit()
 
-    obj = (
-        db.session.query(CssTemplate).filter_by(template_name='Courier Black').first())
+    obj = db.session.query(CssTemplate).filter_by(template_name="Courier Black").first()
     if not obj:
-        obj = CssTemplate(template_name='Courier Black')
-    css = textwrap.dedent("""\
+        obj = CssTemplate(template_name="Courier Black")
+    css = textwrap.dedent(
+        """\
     .gridster div.widget {
         transition: background-color 0.5s ease;
         background-color: #EEE;
@@ -113,7 +115,8 @@ def load_css_templates():
         '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
      ];
     */
-    """)
+    """
+    )
     obj.css = css
     db.session.merge(obj)
     db.session.commit()
diff --git a/superset/data/deck.py b/superset/data/deck.py
index 398255c..ab329b0 100644
--- a/superset/data/deck.py
+++ b/superset/data/deck.py
@@ -18,21 +18,9 @@
 import json
 
 from superset import db
-from .helpers import (
-    Dash,
-    get_slice_json,
-    merge_slice,
-    Slice,
-    TBL,
-    update_slice_ids,
-)
+from .helpers import Dash, get_slice_json, merge_slice, Slice, TBL, update_slice_ids
 
-COLOR_RED = {
-    'r': 205,
-    'g': 0,
-    'b': 3,
-    'a': 0.82,
-}
+COLOR_RED = {"r": 205, "g": 0, "b": 3, "a": 0.82}
 POSITION_JSON = """\
 {
     "CHART-3afd9d70": {
@@ -177,46 +165,42 @@ POSITION_JSON = """\
 
 
 def load_deck_dash():
-    print('Loading deck.gl dashboard')
+    print("Loading deck.gl dashboard")
     slices = []
-    tbl = db.session.query(TBL).filter_by(table_name='long_lat').first()
+    tbl = db.session.query(TBL).filter_by(table_name="long_lat").first()
     slice_data = {
-        'spatial': {
-            'type': 'latlong',
-            'lonCol': 'LON',
-            'latCol': 'LAT',
-        },
-        'color_picker': COLOR_RED,
-        'datasource': '5__table',
-        'filters': [],
-        'granularity_sqla': None,
-        'groupby': [],
-        'having': '',
-        'mapbox_style': 'mapbox://styles/mapbox/light-v9',
-        'multiplier': 10,
-        'point_radius_fixed': {'type': 'metric', 'value': 'count'},
-        'point_unit': 'square_m',
-        'min_radius': 1,
-        'row_limit': 5000,
-        'time_range': ' : ',
-        'size': 'count',
-        'time_grain_sqla': None,
-        'viewport': {
-            'bearing': -4.952916738791771,
-            'latitude': 37.78926922909199,
-            'longitude': -122.42613341901688,
-            'pitch': 4.750411100577438,
-            'zoom': 12.729132798697304,
-        },
-        'viz_type': 'deck_scatter',
-        'where': '',
+        "spatial": {"type": "latlong", "lonCol": "LON", "latCol": "LAT"},
+        "color_picker": COLOR_RED,
+        "datasource": "5__table",
+        "filters": [],
+        "granularity_sqla": None,
+        "groupby": [],
+        "having": "",
+        "mapbox_style": "mapbox://styles/mapbox/light-v9",
+        "multiplier": 10,
+        "point_radius_fixed": {"type": "metric", "value": "count"},
+        "point_unit": "square_m",
+        "min_radius": 1,
+        "row_limit": 5000,
+        "time_range": " : ",
+        "size": "count",
+        "time_grain_sqla": None,
+        "viewport": {
+            "bearing": -4.952916738791771,
+            "latitude": 37.78926922909199,
+            "longitude": -122.42613341901688,
+            "pitch": 4.750411100577438,
+            "zoom": 12.729132798697304,
+        },
+        "viz_type": "deck_scatter",
+        "where": "",
     }
 
-    print('Creating Scatterplot slice')
+    print("Creating Scatterplot slice")
     slc = Slice(
-        slice_name='Scatterplot',
-        viz_type='deck_scatter',
-        datasource_type='table',
+        slice_name="Scatterplot",
+        viz_type="deck_scatter",
+        datasource_type="table",
         datasource_id=tbl.id,
         params=get_slice_json(slice_data),
     )
@@ -224,46 +208,37 @@ def load_deck_dash():
     slices.append(slc)
 
     slice_data = {
-        'point_unit': 'square_m',
-        'filters': [],
-        'row_limit': 5000,
-        'spatial': {
-            'type': 'latlong',
-            'lonCol': 'LON',
-            'latCol': 'LAT',
-        },
-        'mapbox_style': 'mapbox://styles/mapbox/dark-v9',
-        'granularity_sqla': None,
-        'size': 'count',
-        'viz_type': 'deck_screengrid',
-        'time_range': 'No filter',
-        'point_radius': 'Auto',
-        'color_picker': {
-            'a': 1,
... 42630 lines suppressed ...