You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@superset.apache.org by ru...@apache.org on 2023/01/25 23:35:16 UTC

[superset] branch master updated: chore(misc): Spelling (#19678)

This is an automated email from the ASF dual-hosted git repository.

rusackas pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/superset.git


The following commit(s) were added to refs/heads/master by this push:
     new 02e5dcbbf2 chore(misc): Spelling (#19678)
02e5dcbbf2 is described below

commit 02e5dcbbf285e15b71c60b3aac1e120b1b5ada1b
Author: Josh Soref <21...@users.noreply.github.com>
AuthorDate: Wed Jan 25 18:35:08 2023 -0500

    chore(misc): Spelling (#19678)
    
    Signed-off-by: Josh Soref <js...@users.noreply.github.com>
    Co-authored-by: Josh Soref <js...@users.noreply.github.com>
---
 RELEASING/Dockerfile.from_local_tarball                |  2 +-
 RELEASING/Dockerfile.from_svn_tarball                  |  2 +-
 docs/static/resources/openapi.json                     | 12 ++++++------
 scripts/cancel_github_workflows.py                     |  2 +-
 scripts/permissions_cleanup.py                         | 12 ++++++------
 scripts/python_tests.sh                                |  2 +-
 scripts/tests/run.sh                                   |  2 +-
 superset-websocket/spec/index.test.ts                  |  2 +-
 superset-websocket/utils/client-ws-app/views/index.pug |  2 +-
 superset/utils/core.py                                 |  4 ++--
 tests/integration_tests/charts/data/api_tests.py       |  2 +-
 tests/integration_tests/core_tests.py                  |  2 +-
 tests/integration_tests/csv_upload_tests.py            |  2 +-
 tests/integration_tests/datasets/api_tests.py          |  2 +-
 tests/integration_tests/datasets/commands_tests.py     |  2 +-
 tests/integration_tests/datasource_tests.py            |  2 +-
 tests/integration_tests/model_tests.py                 |  6 +++---
 tests/integration_tests/query_context_tests.py         |  2 +-
 tests/integration_tests/reports/api_tests.py           |  2 +-
 tests/integration_tests/sqllab_tests.py                |  2 +-
 tests/unit_tests/charts/test_post_processing.py        |  4 ++--
 tests/unit_tests/db_engine_specs/test_snowflake.py     |  4 ++--
 tests/unit_tests/sql_parse_tests.py                    |  2 +-
 23 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/RELEASING/Dockerfile.from_local_tarball b/RELEASING/Dockerfile.from_local_tarball
index ff7eb56536..4860db6428 100644
--- a/RELEASING/Dockerfile.from_local_tarball
+++ b/RELEASING/Dockerfile.from_local_tarball
@@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \
 
 RUN apt-get update -y
 
-# Install dependencies to fix `curl https support error` and `elaying package configuration warning`
+# Install dependencies to fix `curl https support error` and `delaying package configuration warning`
 RUN apt-get install -y apt-transport-https apt-utils
 
 # Install superset dependencies
diff --git a/RELEASING/Dockerfile.from_svn_tarball b/RELEASING/Dockerfile.from_svn_tarball
index b88481f40d..3deea5b8d3 100644
--- a/RELEASING/Dockerfile.from_svn_tarball
+++ b/RELEASING/Dockerfile.from_svn_tarball
@@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \
 
 RUN apt-get update -y
 
-# Install dependencies to fix `curl https support error` and `elaying package configuration warning`
+# Install dependencies to fix `curl https support error` and `delaying package configuration warning`
 RUN apt-get install -y apt-transport-https apt-utils
 
 # Install superset dependencies
diff --git a/docs/static/resources/openapi.json b/docs/static/resources/openapi.json
index 8279811b53..86060e5470 100644
--- a/docs/static/resources/openapi.json
+++ b/docs/static/resources/openapi.json
@@ -746,7 +746,7 @@
             "type": "array"
           },
           "metrics": {
-            "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.",
+            "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.",
             "items": {},
             "nullable": true,
             "type": "array"
@@ -1309,7 +1309,7 @@
             "type": "boolean"
           },
           "metrics": {
-            "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
+            "description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
             "items": {},
             "nullable": true,
             "type": "array"
@@ -1968,7 +1968,7 @@
             "type": "string"
           },
           "query_context_generation": {
-            "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
+            "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
             "nullable": true,
             "type": "boolean"
           },
@@ -2075,7 +2075,7 @@
             "type": "string"
           },
           "query_context_generation": {
-            "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
+            "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
             "nullable": true,
             "type": "boolean"
           },
@@ -2760,7 +2760,7 @@
             "type": "string"
           },
           "query_context_generation": {
-            "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
+            "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
             "nullable": true,
             "type": "boolean"
           },
@@ -2867,7 +2867,7 @@
             "type": "string"
           },
           "query_context_generation": {
-            "description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
+            "description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
             "nullable": true,
             "type": "boolean"
           },
diff --git a/scripts/cancel_github_workflows.py b/scripts/cancel_github_workflows.py
index 720dc05cbe..4d30d34adf 100755
--- a/scripts/cancel_github_workflows.py
+++ b/scripts/cancel_github_workflows.py
@@ -143,7 +143,7 @@ Date:   {date_str}
     "--include-last/--skip-last",
     default=False,
     show_default=True,
-    help="Whether to also cancel the lastest run.",
+    help="Whether to also cancel the latest run.",
 )
 @click.option(
     "--include-running/--skip-running",
diff --git a/scripts/permissions_cleanup.py b/scripts/permissions_cleanup.py
index 99d192919c..5ca75e394c 100644
--- a/scripts/permissions_cleanup.py
+++ b/scripts/permissions_cleanup.py
@@ -24,7 +24,7 @@ def cleanup_permissions() -> None:
     pvms = security_manager.get_session.query(
         security_manager.permissionview_model
     ).all()
-    print("# of permission view menues is: {}".format(len(pvms)))
+    print("# of permission view menus is: {}".format(len(pvms)))
     pvms_dict = defaultdict(list)
     for pvm in pvms:
         pvms_dict[(pvm.permission, pvm.view_menu)].append(pvm)
@@ -43,9 +43,9 @@ def cleanup_permissions() -> None:
     pvms = security_manager.get_session.query(
         security_manager.permissionview_model
     ).all()
-    print("Stage 1: # of permission view menues is: {}".format(len(pvms)))
+    print("Stage 1: # of permission view menus is: {}".format(len(pvms)))
 
-    # 2. Clean up None permissions or view menues
+    # 2. Clean up None permissions or view menus
     pvms = security_manager.get_session.query(
         security_manager.permissionview_model
     ).all()
@@ -57,15 +57,15 @@ def cleanup_permissions() -> None:
     pvms = security_manager.get_session.query(
         security_manager.permissionview_model
     ).all()
-    print("Stage 2: # of permission view menues is: {}".format(len(pvms)))
+    print("Stage 2: # of permission view menus is: {}".format(len(pvms)))
 
-    # 3. Delete empty permission view menues from roles
+    # 3. Delete empty permission view menus from roles
     roles = security_manager.get_session.query(security_manager.role_model).all()
     for role in roles:
         role.permissions = [p for p in role.permissions if p]
     security_manager.get_session.commit()
 
-    # 4. Delete empty roles from permission view menues
+    # 4. Delete empty roles from permission view menus
     pvms = security_manager.get_session.query(
         security_manager.permissionview_model
     ).all()
diff --git a/scripts/python_tests.sh b/scripts/python_tests.sh
index 6491a3f6f9..c3f27d17f7 100755
--- a/scripts/python_tests.sh
+++ b/scripts/python_tests.sh
@@ -19,7 +19,7 @@
 set -e
 
 # Temporary fix, probably related with https://bugs.launchpad.net/ubuntu/+source/opencv/+bug/1890170
-# MySQL was failling with:
+# MySQL was failing with:
 # from . import _mysql
 # ImportError: /lib/x86_64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block
 export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6
diff --git a/scripts/tests/run.sh b/scripts/tests/run.sh
index 2423301010..2c3b5bf359 100755
--- a/scripts/tests/run.sh
+++ b/scripts/tests/run.sh
@@ -24,7 +24,7 @@ set -e
 #
 function reset_db() {
   echo --------------------
-  echo Reseting test DB
+  echo Resetting test DB
   echo --------------------
   docker-compose stop superset-tests-worker superset || true
   RESET_DB_CMD="psql \"postgresql://${DB_USER}:${DB_PASSWORD}@127.0.0.1:5432\" <<-EOF
diff --git a/superset-websocket/spec/index.test.ts b/superset-websocket/spec/index.test.ts
index 320f13b445..ca575e9e8a 100644
--- a/superset-websocket/spec/index.test.ts
+++ b/superset-websocket/spec/index.test.ts
@@ -98,7 +98,7 @@ describe('server', () => {
       expect(endMock).toHaveBeenLastCalledWith('OK');
     });
 
-    test('reponds with a 404 when not found', () => {
+    test('responds with a 404 when not found', () => {
       const endMock = jest.fn();
       const writeHeadMock = jest.fn();
 
diff --git a/superset-websocket/utils/client-ws-app/views/index.pug b/superset-websocket/utils/client-ws-app/views/index.pug
index 3b1efc7fbf..2322bec580 100644
--- a/superset-websocket/utils/client-ws-app/views/index.pug
+++ b/superset-websocket/utils/client-ws-app/views/index.pug
@@ -24,7 +24,7 @@ block content
 
   div Sockets connected:
     span#socket-count 0
-  div Messages recevied:
+  div Messages received:
     span#message-count 0
   div Last message received:
   code#message-debug
diff --git a/superset/utils/core.py b/superset/utils/core.py
index 0ab3a685a3..15c3554276 100644
--- a/superset/utils/core.py
+++ b/superset/utils/core.py
@@ -1026,7 +1026,7 @@ def send_mime_email(
     smtp_password = config["SMTP_PASSWORD"]
     smtp_starttls = config["SMTP_STARTTLS"]
     smtp_ssl = config["SMTP_SSL"]
-    smpt_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"]
+    smtp_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"]
 
     if dryrun:
         logger.info("Dryrun enabled, email notification content is below:")
@@ -1035,7 +1035,7 @@ def send_mime_email(
 
     # Default ssl context is SERVER_AUTH using the default system
     # root CA certificates
-    ssl_context = ssl.create_default_context() if smpt_ssl_server_auth else None
+    ssl_context = ssl.create_default_context() if smtp_ssl_server_auth else None
     smtp = (
         smtplib.SMTP_SSL(smtp_host, smtp_port, context=ssl_context)
         if smtp_ssl
diff --git a/tests/integration_tests/charts/data/api_tests.py b/tests/integration_tests/charts/data/api_tests.py
index d83cb8286b..164fb0ca6c 100644
--- a/tests/integration_tests/charts/data/api_tests.py
+++ b/tests/integration_tests/charts/data/api_tests.py
@@ -451,7 +451,7 @@ class TestPostChartDataApi(BaseTestChartDataApi):
 
     def test_with_invalid_where_parameter__400(self):
         self.query_context_payload["queries"][0]["filters"] = []
-        # erroneus WHERE-clause
+        # erroneous WHERE-clause
         self.query_context_payload["queries"][0]["extras"]["where"] = "(gender abc def)"
 
         rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
diff --git a/tests/integration_tests/core_tests.py b/tests/integration_tests/core_tests.py
index 799ddacad4..1b35d81f83 100644
--- a/tests/integration_tests/core_tests.py
+++ b/tests/integration_tests/core_tests.py
@@ -1619,7 +1619,7 @@ class TestCore(SupersetTestCase):
         Handle injected exceptions from the db mutator
         """
 
-        # Assert we can handle a custom excetion at the mutator level
+        # Assert we can handle a custom exception at the mutator level
         exception = SupersetException("Error message")
         mock_db_connection_mutator.side_effect = exception
         dash = db.session.query(Dashboard).first()
diff --git a/tests/integration_tests/csv_upload_tests.py b/tests/integration_tests/csv_upload_tests.py
index 724a177634..3e0200d18a 100644
--- a/tests/integration_tests/csv_upload_tests.py
+++ b/tests/integration_tests/csv_upload_tests.py
@@ -209,7 +209,7 @@ def mock_upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str:
     container.exec_run(f"hdfs dfs -mkdir -p {dest_dir}")
     dest = os.path.join(dest_dir, os.path.basename(filename))
     container.exec_run(f"hdfs dfs -put {src} {dest}")
-    # hive external table expectes a directory for the location
+    # hive external table expects a directory for the location
     return dest_dir
 
 
diff --git a/tests/integration_tests/datasets/api_tests.py b/tests/integration_tests/datasets/api_tests.py
index ff8206354c..95236af090 100644
--- a/tests/integration_tests/datasets/api_tests.py
+++ b/tests/integration_tests/datasets/api_tests.py
@@ -1810,7 +1810,7 @@ class TestDatasetApi(SupersetTestCase):
             "datasource_access", dataset.perm
         )
 
-        # add perissions to allow export + access to query this dataset
+        # add permissions to allow export + access to query this dataset
         gamma_role = security_manager.find_role("Gamma")
         security_manager.add_permission_role(gamma_role, perm1)
         security_manager.add_permission_role(gamma_role, perm2)
diff --git a/tests/integration_tests/datasets/commands_tests.py b/tests/integration_tests/datasets/commands_tests.py
index 9498c911f2..5cc5c85bea 100644
--- a/tests/integration_tests/datasets/commands_tests.py
+++ b/tests/integration_tests/datasets/commands_tests.py
@@ -72,7 +72,7 @@ class TestExportDatasetsCommand(SupersetTestCase):
 
         metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
 
-        # sort columns for deterministc comparison
+        # sort columns for deterministic comparison
         metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
         metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name"))
 
diff --git a/tests/integration_tests/datasource_tests.py b/tests/integration_tests/datasource_tests.py
index 4969321a1c..52bd9ec244 100644
--- a/tests/integration_tests/datasource_tests.py
+++ b/tests/integration_tests/datasource_tests.py
@@ -233,7 +233,7 @@ class TestDatasource(SupersetTestCase):
             resp = self.get_json_resp(url)
             self.assertEqual(resp["error"], "Only `SELECT` statements are allowed")
 
-    def test_external_metadata_for_mutistatement_virtual_table(self):
+    def test_external_metadata_for_multistatement_virtual_table(self):
         self.login(username="admin")
         table = SqlaTable(
             table_name="multistatement_sql_table",
diff --git a/tests/integration_tests/model_tests.py b/tests/integration_tests/model_tests.py
index f187eadfbb..da6c5e6a3c 100644
--- a/tests/integration_tests/model_tests.py
+++ b/tests/integration_tests/model_tests.py
@@ -476,15 +476,15 @@ class TestSqlaTableModel(SupersetTestCase):
             # TODO(bkyryliuk): make it work for presto.
             return
 
-        def cannonicalize_df(df):
+        def canonicalize_df(df):
             ret = df.sort_values(by=list(df.columns.values), inplace=False)
             ret.reset_index(inplace=True, drop=True)
             return ret
 
         df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
-        name_list1 = cannonicalize_df(df1).name.values.tolist()
+        name_list1 = canonicalize_df(df1).name.values.tolist()
         df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
-        name_list2 = cannonicalize_df(df1).name.values.tolist()
+        name_list2 = canonicalize_df(df1).name.values.tolist()
         self.assertFalse(df2.empty)
 
         assert name_list2 == name_list1
diff --git a/tests/integration_tests/query_context_tests.py b/tests/integration_tests/query_context_tests.py
index 23bec876f7..5e5beae345 100644
--- a/tests/integration_tests/query_context_tests.py
+++ b/tests/integration_tests/query_context_tests.py
@@ -74,7 +74,7 @@ class TestQueryContext(SupersetTestCase):
         for query_idx, query in enumerate(query_context.queries):
             payload_query = payload["queries"][query_idx]
 
-            # check basic properies
+            # check basic properties
             self.assertEqual(query.extras, payload_query["extras"])
             self.assertEqual(query.filter, payload_query["filters"])
             self.assertEqual(query.columns, payload_query["columns"])
diff --git a/tests/integration_tests/reports/api_tests.py b/tests/integration_tests/reports/api_tests.py
index a304f08315..22b9be9990 100644
--- a/tests/integration_tests/reports/api_tests.py
+++ b/tests/integration_tests/reports/api_tests.py
@@ -571,7 +571,7 @@ class TestReportSchedulesApi(SupersetTestCase):
     @pytest.mark.usefixtures("create_report_schedules")
     def test_get_related_report_schedule(self):
         """
-        ReportSchedule Api: Test get releated report schedule
+        ReportSchedule Api: Test get related report schedule
         """
         self.login(username="admin")
         related_columns = ["created_by", "chart", "dashboard", "database"]
diff --git a/tests/integration_tests/sqllab_tests.py b/tests/integration_tests/sqllab_tests.py
index a33a541a63..19e397e8f6 100644
--- a/tests/integration_tests/sqllab_tests.py
+++ b/tests/integration_tests/sqllab_tests.py
@@ -91,7 +91,7 @@ class TestSqlLab(SupersetTestCase):
         data = self.run_sql("SELECT * FROM birth_names LIMIT 10", "1")
         self.assertLess(0, len(data["data"]))
 
-        data = self.run_sql("SELECT * FROM unexistant_table", "2")
+        data = self.run_sql("SELECT * FROM nonexistent_table", "2")
         if backend() == "presto":
             assert (
                 data["errors"][0]["error_type"]
diff --git a/tests/unit_tests/charts/test_post_processing.py b/tests/unit_tests/charts/test_post_processing.py
index cfab4e3d74..f63ee5d66a 100644
--- a/tests/unit_tests/charts/test_post_processing.py
+++ b/tests/unit_tests/charts/test_post_processing.py
@@ -64,7 +64,7 @@ def test_pivot_df_no_cols_no_rows_single_metric():
     """.strip()
     )
 
-    # tranpose_pivot and combine_metrics do nothing in this case
+    # transpose_pivot and combine_metrics do nothing in this case
     pivoted = pivot_df(
         df,
         rows=[],
@@ -169,7 +169,7 @@ def test_pivot_df_no_cols_no_rows_two_metrics():
     """.strip()
     )
 
-    # tranpose_pivot and combine_metrics do nothing in this case
+    # transpose_pivot and combine_metrics do nothing in this case
     pivoted = pivot_df(
         df,
         rows=[],
diff --git a/tests/unit_tests/db_engine_specs/test_snowflake.py b/tests/unit_tests/db_engine_specs/test_snowflake.py
index 3611c7214d..854d3f5f61 100644
--- a/tests/unit_tests/db_engine_specs/test_snowflake.py
+++ b/tests/unit_tests/db_engine_specs/test_snowflake.py
@@ -77,11 +77,11 @@ def test_extract_errors() -> None:
         )
     ]
 
-    msg = "syntax error line 1 at position 10 unexpected 'limmmited'."
+    msg = "syntax error line 1 at position 10 unexpected 'limited'."
     result = SnowflakeEngineSpec.extract_errors(Exception(msg))
     assert result == [
         SupersetError(
-            message='Please check your query for syntax errors at or near "limmmited". Then, try running your query again.',
+            message='Please check your query for syntax errors at or near "limited". Then, try running your query again.',
             error_type=SupersetErrorType.SYNTAX_ERROR,
             level=ErrorLevel.ERROR,
             extra={
diff --git a/tests/unit_tests/sql_parse_tests.py b/tests/unit_tests/sql_parse_tests.py
index 70e5d4d3b9..ba3da69aae 100644
--- a/tests/unit_tests/sql_parse_tests.py
+++ b/tests/unit_tests/sql_parse_tests.py
@@ -675,7 +675,7 @@ WHERE TABLE_SCHEMA like "%bi%"),0x7e)));
             """
 select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(COLUMN_NAME)
 from INFORMATION_SCHEMA.COLUMNS
-WHERE TABLE_NAME="bi_achivement_daily"),0x7e)));
+WHERE TABLE_NAME="bi_achievement_daily"),0x7e)));
 """
         )
         == {Table("COLUMNS", "INFORMATION_SCHEMA")}