You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@superset.apache.org by hu...@apache.org on 2023/04/10 22:40:31 UTC

[superset] branch 2023.13.1 created (now ba7b6fd7d9)

This is an automated email from the ASF dual-hosted git repository.

hugh pushed a change to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git


      at ba7b6fd7d9 chore: Refactor ExploreMixin to power both Datasets (SqlaTable) and Query models (#22853)

This branch includes the following new commits:

     new f138893bb9 Revert "feat: Add excel export (#22006)"
     new 73ad7c3cc0 fix: Dashboard not loading with default first value in filter (#23512)
     new 7c993ace14 chore: allow logs to show error levels for ssh tunnel (#23536)
     new 34bfd647f7 fix: Hide FilterBar for Reports (#23543)
     new 83bda092b8 fix(charts): Time range filters are not being applied to charts that were overwritten (#23589)
     new 971c6c3d2f solve conflict
     new ba7b6fd7d9 chore: Refactor ExploreMixin to power both Datasets (SqlaTable) and Query models (#22853)

The 7 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.



[superset] 01/07: Revert "feat: Add excel export (#22006)"

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit f138893bb952d1ba9e3deeba9705e5c6d40a118b
Author: Jinghua Yao <ji...@preset.io>
AuthorDate: Wed Mar 29 08:47:24 2023 -0700

    Revert "feat: Add excel export (#22006)"
    
    This reverts commit 6cf434e5c16e8b4e387a79bbf7fca31bab9d96b8.
---
 requirements/base.txt                              |  2 -
 setup.py                                           |  1 -
 .../useExploreAdditionalActionsMenu/index.jsx      | 22 ----------
 superset/charts/data/api.py                        | 28 ++++---------
 superset/common/chart_data.py                      |  6 ---
 superset/common/query_context_processor.py         | 15 +++----
 superset/config.py                                 |  5 ---
 superset/utils/excel.py                            | 29 -------------
 superset/views/base.py                             | 11 -----
 tests/integration_tests/charts/data/api_tests.py   | 48 ----------------------
 10 files changed, 14 insertions(+), 153 deletions(-)

diff --git a/requirements/base.txt b/requirements/base.txt
index 3a5ec607fe..b41ea6cb62 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -294,8 +294,6 @@ wtforms==2.3.3
     #   wtforms-json
 wtforms-json==0.3.3
     # via apache-superset
-xlsxwriter==3.0.7
-    # via apache-superset
 
 # The following packages are considered to be unsafe in a requirements file:
 # setuptools
diff --git a/setup.py b/setup.py
index c6850070a0..53565f1c76 100644
--- a/setup.py
+++ b/setup.py
@@ -125,7 +125,6 @@ setup(
         "waitress; sys_platform == 'win32'",
         "wtforms>=2.3.3, <2.4",
         "wtforms-json",
-        "xlsxwriter>=3.0.7, <3.1",
     ],
     extras_require={
         "athena": ["pyathena[pandas]>=2, <3"],
diff --git a/superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx b/superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx
index 445db6dc44..62fcdaaf15 100644
--- a/superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx
+++ b/superset-frontend/src/explore/components/useExploreAdditionalActionsMenu/index.jsx
@@ -41,7 +41,6 @@ const MENU_KEYS = {
   EXPORT_TO_CSV: 'export_to_csv',
   EXPORT_TO_CSV_PIVOTED: 'export_to_csv_pivoted',
   EXPORT_TO_JSON: 'export_to_json',
-  EXPORT_TO_XLSX: 'export_to_xlsx',
   DOWNLOAD_AS_IMAGE: 'download_as_image',
   SHARE_SUBMENU: 'share_submenu',
   COPY_PERMALINK: 'copy_permalink',
@@ -166,16 +165,6 @@ export const useExploreAdditionalActionsMenu = (
     [latestQueryFormData],
   );
 
-  const exportExcel = useCallback(
-    () =>
-      exportChart({
-        formData: latestQueryFormData,
-        resultType: 'results',
-        resultFormat: 'xlsx',
-      }),
-    [latestQueryFormData],
-  );
-
   const copyLink = useCallback(async () => {
     try {
       if (!latestQueryFormData) {
@@ -210,11 +199,6 @@ export const useExploreAdditionalActionsMenu = (
           setIsDropdownVisible(false);
           setOpenSubmenus([]);
 
-          break;
-        case MENU_KEYS.EXPORT_TO_XLSX:
-          exportExcel();
-          setIsDropdownVisible(false);
-          setOpenSubmenus([]);
           break;
         case MENU_KEYS.DOWNLOAD_AS_IMAGE:
           downloadAsImage(
@@ -328,12 +312,6 @@ export const useExploreAdditionalActionsMenu = (
           >
             {t('Download as image')}
           </Menu.Item>
-          <Menu.Item
-            key={MENU_KEYS.EXPORT_TO_XLSX}
-            icon={<Icons.FileOutlined css={iconReset} />}
-          >
-            {t('Export to Excel')}
-          </Menu.Item>
         </Menu.SubMenu>
         <Menu.SubMenu title={t('Share')} key={MENU_KEYS.SHARE_SUBMENU}>
           <Menu.Item key={MENU_KEYS.COPY_PERMALINK}>
diff --git a/superset/charts/data/api.py b/superset/charts/data/api.py
index 0d0758819e..152383e0c6 100644
--- a/superset/charts/data/api.py
+++ b/superset/charts/data/api.py
@@ -46,7 +46,7 @@ from superset.exceptions import QueryObjectValidationError
 from superset.extensions import event_logger
 from superset.utils.async_query_manager import AsyncQueryTokenException
 from superset.utils.core import create_zip, get_user_id, json_int_dttm_ser
-from superset.views.base import CsvResponse, generate_download_headers, XlsxResponse
+from superset.views.base import CsvResponse, generate_download_headers
 from superset.views.base_api import statsd_metrics
 
 if TYPE_CHECKING:
@@ -353,34 +353,24 @@ class ChartDataRestApi(ChartRestApi):
         if result_type == ChartDataResultType.POST_PROCESSED:
             result = apply_post_process(result, form_data, datasource)
 
-        if result_format in ChartDataResultFormat.table_like():
-            # Verify user has permission to export file
+        if result_format == ChartDataResultFormat.CSV:
+            # Verify user has permission to export CSV file
             if not security_manager.can_access("can_csv", "Superset"):
                 return self.response_403()
 
             if not result["queries"]:
                 return self.response_400(_("Empty query result"))
 
-            is_csv_format = result_format == ChartDataResultFormat.CSV
-
             if len(result["queries"]) == 1:
-                # return single query results
+                # return single query results csv format
                 data = result["queries"][0]["data"]
-                if is_csv_format:
-                    return CsvResponse(data, headers=generate_download_headers("csv"))
-
-                return XlsxResponse(data, headers=generate_download_headers("xlsx"))
-
-            # return multi-query results bundled as a zip file
-            def _process_data(query_data: Any) -> Any:
-                if result_format == ChartDataResultFormat.CSV:
-                    encoding = current_app.config["CSV_EXPORT"].get("encoding", "utf-8")
-                    return query_data.encode(encoding)
-                return query_data
+                return CsvResponse(data, headers=generate_download_headers("csv"))
 
+            # return multi-query csv results bundled as a zip file
+            encoding = current_app.config["CSV_EXPORT"].get("encoding", "utf-8")
             files = {
-                f"query_{idx + 1}.{result_format}": _process_data(query["data"])
-                for idx, query in enumerate(result["queries"])
+                f"query_{idx + 1}.csv": result["data"].encode(encoding)
+                for idx, result in enumerate(result["queries"])
             }
             return Response(
                 create_zip(files),
diff --git a/superset/common/chart_data.py b/superset/common/chart_data.py
index 659a640159..ea31d4f138 100644
--- a/superset/common/chart_data.py
+++ b/superset/common/chart_data.py
@@ -15,7 +15,6 @@
 # specific language governing permissions and limitations
 # under the License.
 from enum import Enum
-from typing import Set
 
 
 class ChartDataResultFormat(str, Enum):
@@ -25,11 +24,6 @@ class ChartDataResultFormat(str, Enum):
 
     CSV = "csv"
     JSON = "json"
-    XLSX = "xlsx"
-
-    @classmethod
-    def table_like(cls) -> Set["ChartDataResultFormat"]:
-        return {cls.CSV} | {cls.XLSX}
 
 
 class ChartDataResultType(str, Enum):
diff --git a/superset/common/query_context_processor.py b/superset/common/query_context_processor.py
index 703e1d71dd..a2616013c9 100644
--- a/superset/common/query_context_processor.py
+++ b/superset/common/query_context_processor.py
@@ -46,7 +46,7 @@ from superset.exceptions import (
 from superset.extensions import cache_manager, security_manager
 from superset.models.helpers import QueryResult
 from superset.models.sql_lab import Query
-from superset.utils import csv, excel
+from superset.utils import csv
 from superset.utils.cache import generate_cache_key, set_and_log_cache
 from superset.utils.core import (
     DatasourceType,
@@ -448,20 +448,15 @@ class QueryContextProcessor:
         return CachedTimeOffset(df=rv_df, queries=queries, cache_keys=cache_keys)
 
     def get_data(self, df: pd.DataFrame) -> Union[str, List[Dict[str, Any]]]:
-        if self._query_context.result_format in ChartDataResultFormat.table_like():
+        if self._query_context.result_format == ChartDataResultFormat.CSV:
             include_index = not isinstance(df.index, pd.RangeIndex)
             columns = list(df.columns)
             verbose_map = self._qc_datasource.data.get("verbose_map", {})
             if verbose_map:
                 df.columns = [verbose_map.get(column, column) for column in columns]
-
-            result = None
-            if self._query_context.result_format == ChartDataResultFormat.CSV:
-                result = csv.df_to_escaped_csv(
-                    df, index=include_index, **config["CSV_EXPORT"]
-                )
-            elif self._query_context.result_format == ChartDataResultFormat.XLSX:
-                result = excel.df_to_excel(df, **config["EXCEL_EXPORT"])
+            result = csv.df_to_escaped_csv(
+                df, index=include_index, **config["CSV_EXPORT"]
+            )
             return result or ""
 
         return df.to_dict(orient="records")
diff --git a/superset/config.py b/superset/config.py
index c9234e521d..d77dd81507 100644
--- a/superset/config.py
+++ b/superset/config.py
@@ -751,11 +751,6 @@ ALLOWED_EXTENSIONS = {*EXCEL_EXTENSIONS, *CSV_EXTENSIONS, *COLUMNAR_EXTENSIONS}
 # note: index option should not be overridden
 CSV_EXPORT = {"encoding": "utf-8"}
 
-# Excel Options: key/value pairs that will be passed as argument to DataFrame.to_excel
-# method.
-# note: index option should not be overridden
-EXCEL_EXPORT = {"encoding": "utf-8"}
-
 # ---------------------------------------------------
 # Time grain configurations
 # ---------------------------------------------------
diff --git a/superset/utils/excel.py b/superset/utils/excel.py
deleted file mode 100644
index 1f68031b64..0000000000
--- a/superset/utils/excel.py
+++ /dev/null
@@ -1,29 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-import io
-from typing import Any
-
-import pandas as pd
-
-
-def df_to_excel(df: pd.DataFrame, **kwargs: Any) -> Any:
-    output = io.BytesIO()
-    # pylint: disable=abstract-class-instantiated
-    with pd.ExcelWriter(output, engine="xlsxwriter") as writer:
-        df.to_excel(writer, **kwargs)
-
-    return output.getvalue()
diff --git a/superset/views/base.py b/superset/views/base.py
index ec74b8ccdb..487c9c14db 100644
--- a/superset/views/base.py
+++ b/superset/views/base.py
@@ -693,17 +693,6 @@ class CsvResponse(Response):
     default_mimetype = "text/csv"
 
 
-class XlsxResponse(Response):
-    """
-    Override Response to use xlsx mimetype
-    """
-
-    charset = "utf-8"
-    default_mimetype = (
-        "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
-    )
-
-
 def bind_field(
     _: Any, form: DynamicForm, unbound_field: UnboundField, options: Dict[Any, Any]
 ) -> Field:
diff --git a/tests/integration_tests/charts/data/api_tests.py b/tests/integration_tests/charts/data/api_tests.py
index 2818793af0..ed8de062d7 100644
--- a/tests/integration_tests/charts/data/api_tests.py
+++ b/tests/integration_tests/charts/data/api_tests.py
@@ -260,16 +260,6 @@ class TestPostChartDataApi(BaseTestChartDataApi):
         rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
         assert rv.status_code == 400
 
-    @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
-    def test_empty_request_with_excel_result_format(self):
-        """
-        Chart data API: Test empty chart data with Excel result format
-        """
-        self.query_context_payload["result_format"] = "xlsx"
-        self.query_context_payload["queries"] = []
-        rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
-        assert rv.status_code == 400
-
     @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
     def test_with_csv_result_format(self):
         """
@@ -280,17 +270,6 @@ class TestPostChartDataApi(BaseTestChartDataApi):
         assert rv.status_code == 200
         assert rv.mimetype == "text/csv"
 
-    @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
-    def test_with_excel_result_format(self):
-        """
-        Chart data API: Test chart data with Excel result format
-        """
-        self.query_context_payload["result_format"] = "xlsx"
-        mimetype = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
-        rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
-        assert rv.status_code == 200
-        assert rv.mimetype == mimetype
-
     @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
     def test_with_multi_query_csv_result_format(self):
         """
@@ -306,21 +285,6 @@ class TestPostChartDataApi(BaseTestChartDataApi):
         zipfile = ZipFile(BytesIO(rv.data), "r")
         assert zipfile.namelist() == ["query_1.csv", "query_2.csv"]
 
-    @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
-    def test_with_multi_query_excel_result_format(self):
-        """
-        Chart data API: Test chart data with multi-query Excel result format
-        """
-        self.query_context_payload["result_format"] = "xlsx"
-        self.query_context_payload["queries"].append(
-            self.query_context_payload["queries"][0]
-        )
-        rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
-        assert rv.status_code == 200
-        assert rv.mimetype == "application/zip"
-        zipfile = ZipFile(BytesIO(rv.data), "r")
-        assert zipfile.namelist() == ["query_1.xlsx", "query_2.xlsx"]
-
     @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
     def test_with_csv_result_format_when_actor_not_permitted_for_csv__403(self):
         """
@@ -333,18 +297,6 @@ class TestPostChartDataApi(BaseTestChartDataApi):
         rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
         assert rv.status_code == 403
 
-    @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
-    def test_with_excel_result_format_when_actor_not_permitted_for_excel__403(self):
-        """
-        Chart data API: Test chart data with Excel result format
-        """
-        self.logout()
-        self.login(username="gamma_no_csv")
-        self.query_context_payload["result_format"] = "xlsx"
-
-        rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
-        assert rv.status_code == 403
-
     @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices")
     def test_with_row_limit_and_offset__row_limit_and_offset_were_applied(self):
         """


[superset] 03/07: chore: allow logs to show error levels for ssh tunnel (#23536)

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 7c993ace148c8d5e2215bb7f2ed4431b1760823e
Author: Hugh A. Miles II <hu...@gmail.com>
AuthorDate: Thu Mar 30 18:23:22 2023 -0400

    chore: allow logs to show error levels for ssh tunnel (#23536)
    
    (cherry picked from commit 81b32d104566313ad5bf9cba17b533da0137cffd)
---
 superset/extensions/ssh.py                  | 4 +++-
 tests/integration_tests/csv_upload_tests.py | 2 +-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/superset/extensions/ssh.py b/superset/extensions/ssh.py
index 8fb235618c..6a852ea7cd 100644
--- a/superset/extensions/ssh.py
+++ b/superset/extensions/ssh.py
@@ -16,6 +16,7 @@
 # under the License.
 
 import importlib
+import logging
 from io import StringIO
 from typing import TYPE_CHECKING
 
@@ -53,8 +54,9 @@ class SSHManager:
         params = {
             "ssh_address_or_host": (ssh_tunnel.server_address, ssh_tunnel.server_port),
             "ssh_username": ssh_tunnel.username,
-            "remote_bind_address": (url.host, url.port),  # bind_port, bind_host
+            "remote_bind_address": (url.host, url.port),
             "local_bind_address": (self.local_bind_address,),
+            "debug_level": logging.getLogger("flask_appbuilder").level,
         }
 
         if ssh_tunnel.password:
diff --git a/tests/integration_tests/csv_upload_tests.py b/tests/integration_tests/csv_upload_tests.py
index f8781a3649..59fd3c1ef5 100644
--- a/tests/integration_tests/csv_upload_tests.py
+++ b/tests/integration_tests/csv_upload_tests.py
@@ -513,4 +513,4 @@ def test_import_parquet(mock_event_logger):
 
     with test_db.get_sqla_engine_with_context() as engine:
         data = engine.execute(f"SELECT * from {PARQUET_UPLOAD_TABLE}").fetchall()
-        assert data == [("john", 1), ("paul", 2), ("max", 3), ("bob", 4)]
+        assert data == [("max", 3), ("bob", 4), ("john", 1), ("paul", 2)]


[superset] 04/07: fix: Hide FilterBar for Reports (#23543)

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 34bfd647f7e65a6d06c044d506d8353b829c7ebd
Author: Geido <60...@users.noreply.github.com>
AuthorDate: Sat Apr 1 10:45:50 2023 +0200

    fix: Hide FilterBar for Reports (#23543)
    
    (cherry picked from commit a18e33b3bc7fbca4c5eb49715ac66ef7641043e9)
---
 .../DashboardBuilder/DashboardBuilder.tsx          | 28 +++++-----
 .../components/nativeFilters/FilterBar/index.tsx   | 65 +++++++++++++---------
 .../components/nativeFilters/FilterBar/types.ts    |  1 +
 3 files changed, 55 insertions(+), 39 deletions(-)

diff --git a/superset-frontend/src/dashboard/components/DashboardBuilder/DashboardBuilder.tsx b/superset-frontend/src/dashboard/components/DashboardBuilder/DashboardBuilder.tsx
index 3c64c2ef1e..4b4047c9d1 100644
--- a/superset-frontend/src/dashboard/components/DashboardBuilder/DashboardBuilder.tsx
+++ b/superset-frontend/src/dashboard/components/DashboardBuilder/DashboardBuilder.tsx
@@ -582,7 +582,10 @@ const DashboardBuilder: FC<DashboardBuilderProps> = () => {
         {!hideDashboardHeader && <DashboardHeader />}
         {showFilterBar &&
           filterBarOrientation === FilterBarOrientation.HORIZONTAL && (
-            <FilterBar orientation={FilterBarOrientation.HORIZONTAL} />
+            <FilterBar
+              orientation={FilterBarOrientation.HORIZONTAL}
+              hidden={isReport}
+            />
           )}
         {dropIndicatorProps && <div {...dropIndicatorProps} />}
         {!isReport && topLevelTabs && !uiConfig.hideNav && (
@@ -654,18 +657,17 @@ const DashboardBuilder: FC<DashboardBuilderProps> = () => {
                 >
                   <StickyPanel ref={containerRef} width={filterBarWidth}>
                     <ErrorBoundary>
-                      {!isReport && (
-                        <FilterBar
-                          orientation={FilterBarOrientation.VERTICAL}
-                          verticalConfig={{
-                            filtersOpen: dashboardFiltersOpen,
-                            toggleFiltersBar: toggleDashboardFiltersOpen,
-                            width: filterBarWidth,
-                            height: filterBarHeight,
-                            offset: filterBarOffset,
-                          }}
-                        />
-                      )}
+                      <FilterBar
+                        orientation={FilterBarOrientation.VERTICAL}
+                        verticalConfig={{
+                          filtersOpen: dashboardFiltersOpen,
+                          toggleFiltersBar: toggleDashboardFiltersOpen,
+                          width: filterBarWidth,
+                          height: filterBarHeight,
+                          offset: filterBarOffset,
+                        }}
+                        hidden={isReport}
+                      />
                     </ErrorBoundary>
                   </StickyPanel>
                 </FiltersPanel>
diff --git a/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/index.tsx b/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/index.tsx
index 5b2ce29326..52d36c254c 100644
--- a/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/index.tsx
+++ b/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/index.tsx
@@ -28,6 +28,7 @@ import {
   SLOW_DEBOUNCE,
   isNativeFilter,
   usePrevious,
+  styled,
 } from '@superset-ui/core';
 import { useHistory } from 'react-router-dom';
 import { updateDataMask, clearDataMask } from 'src/dataMask/actions';
@@ -51,6 +52,10 @@ import ActionButtons from './ActionButtons';
 import Horizontal from './Horizontal';
 import Vertical from './Vertical';
 
+const HiddenFilterBar = styled.div`
+  display: none;
+`;
+
 const EXCLUDED_URL_PARAMS: string[] = [
   URL_PARAMS.nativeFilters.name,
   URL_PARAMS.permalinkKey.name,
@@ -113,6 +118,7 @@ export const FilterBarScrollContext = createContext(false);
 const FilterBar: React.FC<FiltersBarProps> = ({
   orientation = FilterBarOrientation.VERTICAL,
   verticalConfig,
+  hidden = false,
 }) => {
   const history = useHistory();
   const dataMaskApplied: DataMaskStateWithId = useNativeFiltersDataMask();
@@ -247,31 +253,38 @@ const FilterBar: React.FC<FiltersBarProps> = ({
     />
   );
 
-  return orientation === FilterBarOrientation.HORIZONTAL ? (
-    <Horizontal
-      actions={actions}
-      canEdit={canEdit}
-      dashboardId={dashboardId}
-      dataMaskSelected={dataMaskSelected}
-      filterValues={filterValues}
-      isInitialized={isInitialized}
-      onSelectionChange={handleFilterSelectionChange}
-    />
-  ) : verticalConfig ? (
-    <Vertical
-      actions={actions}
-      canEdit={canEdit}
-      dataMaskSelected={dataMaskSelected}
-      filtersOpen={verticalConfig.filtersOpen}
-      filterValues={filterValues}
-      isInitialized={isInitialized}
-      isDisabled={isApplyDisabled}
-      height={verticalConfig.height}
-      offset={verticalConfig.offset}
-      onSelectionChange={handleFilterSelectionChange}
-      toggleFiltersBar={verticalConfig.toggleFiltersBar}
-      width={verticalConfig.width}
-    />
-  ) : null;
+  const filterBarComponent =
+    orientation === FilterBarOrientation.HORIZONTAL ? (
+      <Horizontal
+        actions={actions}
+        canEdit={canEdit}
+        dashboardId={dashboardId}
+        dataMaskSelected={dataMaskSelected}
+        filterValues={filterValues}
+        isInitialized={isInitialized}
+        onSelectionChange={handleFilterSelectionChange}
+      />
+    ) : verticalConfig ? (
+      <Vertical
+        actions={actions}
+        canEdit={canEdit}
+        dataMaskSelected={dataMaskSelected}
+        filtersOpen={verticalConfig.filtersOpen}
+        filterValues={filterValues}
+        isInitialized={isInitialized}
+        isDisabled={isApplyDisabled}
+        height={verticalConfig.height}
+        offset={verticalConfig.offset}
+        onSelectionChange={handleFilterSelectionChange}
+        toggleFiltersBar={verticalConfig.toggleFiltersBar}
+        width={verticalConfig.width}
+      />
+    ) : null;
+
+  return hidden ? (
+    <HiddenFilterBar>{filterBarComponent}</HiddenFilterBar>
+  ) : (
+    filterBarComponent
+  );
 };
 export default React.memo(FilterBar);
diff --git a/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/types.ts b/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/types.ts
index ac7ed70456..e146690a99 100644
--- a/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/types.ts
+++ b/superset-frontend/src/dashboard/components/nativeFilters/FilterBar/types.ts
@@ -47,6 +47,7 @@ interface VerticalBarConfig {
 }
 
 export interface FiltersBarProps {
+  hidden?: boolean;
   orientation: FilterBarOrientation;
   verticalConfig?: VerticalBarConfig;
 }


[superset] 06/07: solve conflict

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 971c6c3d2f874bd46d6780c2f03e94c23bc20100
Author: Akshay Kumar Sharma <ak...@gmail.com>
AuthorDate: Tue Apr 11 02:07:16 2023 +0530

    solve conflict
---
 superset/models/sql_lab.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/superset/models/sql_lab.py b/superset/models/sql_lab.py
index f12c8d6c45..11ea7ef528 100644
--- a/superset/models/sql_lab.py
+++ b/superset/models/sql_lab.py
@@ -372,7 +372,7 @@ class SavedQuery(Model, AuditMixinNullable, ExtraJSONMixin, ImportExportMixin):
             secondary="tagged_object",
             primaryjoin="and_(SavedQuery.id == TaggedObject.object_id)",
             secondaryjoin="and_(TaggedObject.tag_id == Tag.id, "
-            "TaggedObject.object_type == 'saved_query')",
+            "TaggedObject.object_type == 'query')",
         )
 
     export_parent = "database"


[superset] 05/07: fix(charts): Time range filters are not being applied to charts that were overwritten (#23589)

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 83bda092b85066a9d710ad9b0fe340a89b5108f0
Author: Antonio Rivero <38...@users.noreply.github.com>
AuthorDate: Fri Apr 7 15:44:47 2023 -0400

    fix(charts): Time range filters are not being applied to charts that were overwritten (#23589)
---
 .../src/explore/actions/saveModalActions.js        |  40 +++++--
 .../src/explore/actions/saveModalActions.test.js   | 122 +++++++++++++++++++++
 2 files changed, 154 insertions(+), 8 deletions(-)

diff --git a/superset-frontend/src/explore/actions/saveModalActions.js b/superset-frontend/src/explore/actions/saveModalActions.js
index 14bbe09995..1c3c3b765f 100644
--- a/superset-frontend/src/explore/actions/saveModalActions.js
+++ b/superset-frontend/src/explore/actions/saveModalActions.js
@@ -19,6 +19,7 @@
 import rison from 'rison';
 import { SupersetClient, t } from '@superset-ui/core';
 import { addSuccessToast } from 'src/components/MessageToasts/actions';
+import { isEmpty } from 'lodash';
 import { buildV1ChartDataPayload } from '../exploreUtils';
 
 const ADHOC_FILTER_REGEX = /^adhoc_filters/;
@@ -76,19 +77,35 @@ export function removeSaveModalAlert() {
   return { type: REMOVE_SAVE_MODAL_ALERT };
 }
 
+const extractAddHocFiltersFromFormData = formDataToHandle =>
+  Object.entries(formDataToHandle).reduce(
+    (acc, [key, value]) =>
+      ADHOC_FILTER_REGEX.test(key)
+        ? { ...acc, [key]: value?.filter(f => !f.isExtra) }
+        : acc,
+    {},
+  );
+
 export const getSlicePayload = (
   sliceName,
   formDataWithNativeFilters,
   dashboards,
   owners,
+  formDataFromSlice = {},
 ) => {
-  const adhocFilters = Object.entries(formDataWithNativeFilters).reduce(
-    (acc, [key, value]) =>
-      ADHOC_FILTER_REGEX.test(key)
-        ? { ...acc, [key]: value?.filter(f => !f.isExtra) }
-        : acc,
-    {},
+  let adhocFilters = extractAddHocFiltersFromFormData(
+    formDataWithNativeFilters,
   );
+
+  // Retain adhoc_filters from the slice if no adhoc_filters are present
+  // after overwriting a chart.  This ensures the dashboard can continue
+  // to filter the chart. Before, any time range filter applied in the dashboard
+  // would end up as an extra filter and when overwriting the chart the original
+  // time range adhoc_filter was lost
+  if (isEmpty(adhocFilters?.adhoc_filters) && !isEmpty(formDataFromSlice)) {
+    adhocFilters = extractAddHocFiltersFromFormData(formDataFromSlice);
+  }
+
   const formData = {
     ...formDataWithNativeFilters,
     ...adhocFilters,
@@ -157,8 +174,9 @@ const addToasts = (isNewSlice, sliceName, addedToDashboard) => {
 
 //  Update existing slice
 export const updateSlice =
-  ({ slice_id: sliceId, owners }, sliceName, dashboards, addedToDashboard) =>
+  (slice, sliceName, dashboards, addedToDashboard) =>
   async (dispatch, getState) => {
+    const { slice_id: sliceId, owners, form_data: formDataFromSlice } = slice;
     const {
       explore: {
         form_data: { url_params: _, ...formData },
@@ -167,7 +185,13 @@ export const updateSlice =
     try {
       const response = await SupersetClient.put({
         endpoint: `/api/v1/chart/${sliceId}`,
-        jsonPayload: getSlicePayload(sliceName, formData, dashboards, owners),
+        jsonPayload: getSlicePayload(
+          sliceName,
+          formData,
+          dashboards,
+          owners,
+          formDataFromSlice,
+        ),
       });
 
       dispatch(saveSliceSuccess());
diff --git a/superset-frontend/src/explore/actions/saveModalActions.test.js b/superset-frontend/src/explore/actions/saveModalActions.test.js
index 8e4a4e5abd..f89729f5ff 100644
--- a/superset-frontend/src/explore/actions/saveModalActions.test.js
+++ b/superset-frontend/src/explore/actions/saveModalActions.test.js
@@ -31,6 +31,7 @@ import {
   SAVE_SLICE_FAILED,
   SAVE_SLICE_SUCCESS,
   updateSlice,
+  getSlicePayload,
 } from './saveModalActions';
 
 /**
@@ -339,3 +340,124 @@ test('getSliceDashboards with slice handles failure', async () => {
   expect(dispatch.callCount).toBe(1);
   expect(dispatch.getCall(0).args[0].type).toBe(SAVE_SLICE_FAILED);
 });
+
+describe('getSlicePayload', () => {
+  const sliceName = 'Test Slice';
+  const formDataWithNativeFilters = {
+    datasource: '22__table',
+    viz_type: 'pie',
+    adhoc_filters: [],
+  };
+  const dashboards = [5];
+  const owners = [1];
+  const formDataFromSlice = {
+    datasource: '22__table',
+    viz_type: 'pie',
+    adhoc_filters: [
+      {
+        clause: 'WHERE',
+        subject: 'year',
+        operator: 'TEMPORAL_RANGE',
+        comparator: 'No filter',
+        expressionType: 'SIMPLE',
+      },
+    ],
+  };
+
+  test('should return the correct payload when no adhoc_filters are present in formDataWithNativeFilters', () => {
+    const result = getSlicePayload(
+      sliceName,
+      formDataWithNativeFilters,
+      dashboards,
+      owners,
+      formDataFromSlice,
+    );
+    expect(result).toHaveProperty('params');
+    expect(result).toHaveProperty('slice_name', sliceName);
+    expect(result).toHaveProperty(
+      'viz_type',
+      formDataWithNativeFilters.viz_type,
+    );
+    expect(result).toHaveProperty('datasource_id', 22);
+    expect(result).toHaveProperty('datasource_type', 'table');
+    expect(result).toHaveProperty('dashboards', dashboards);
+    expect(result).toHaveProperty('owners', owners);
+    expect(result).toHaveProperty('query_context');
+    expect(JSON.parse(result.params).adhoc_filters).toEqual(
+      formDataFromSlice.adhoc_filters,
+    );
+  });
+
+  test('should return the correct payload when adhoc_filters are present in formDataWithNativeFilters', () => {
+    const formDataWithAdhocFilters = {
+      ...formDataWithNativeFilters,
+      adhoc_filters: [
+        {
+          clause: 'WHERE',
+          subject: 'year',
+          operator: 'TEMPORAL_RANGE',
+          comparator: 'No filter',
+          expressionType: 'SIMPLE',
+        },
+      ],
+    };
+    const result = getSlicePayload(
+      sliceName,
+      formDataWithAdhocFilters,
+      dashboards,
+      owners,
+      formDataFromSlice,
+    );
+    expect(result).toHaveProperty('params');
+    expect(result).toHaveProperty('slice_name', sliceName);
+    expect(result).toHaveProperty(
+      'viz_type',
+      formDataWithAdhocFilters.viz_type,
+    );
+    expect(result).toHaveProperty('datasource_id', 22);
+    expect(result).toHaveProperty('datasource_type', 'table');
+    expect(result).toHaveProperty('dashboards', dashboards);
+    expect(result).toHaveProperty('owners', owners);
+    expect(result).toHaveProperty('query_context');
+    expect(JSON.parse(result.params).adhoc_filters).toEqual(
+      formDataWithAdhocFilters.adhoc_filters,
+    );
+  });
+
+  test('should return the correct payload when formDataWithNativeFilters has a filter with isExtra set to true', () => {
+    const formDataWithAdhocFiltersWithExtra = {
+      ...formDataWithNativeFilters,
+      adhoc_filters: [
+        {
+          clause: 'WHERE',
+          subject: 'year',
+          operator: 'TEMPORAL_RANGE',
+          comparator: 'No filter',
+          expressionType: 'SIMPLE',
+          isExtra: true,
+        },
+      ],
+    };
+    const result = getSlicePayload(
+      sliceName,
+      formDataWithAdhocFiltersWithExtra,
+      dashboards,
+      owners,
+      formDataFromSlice,
+    );
+    expect(result).toHaveProperty('params');
+    expect(result).toHaveProperty('slice_name', sliceName);
+    expect(result).toHaveProperty(
+      'viz_type',
+      formDataWithAdhocFiltersWithExtra.viz_type,
+    );
+    expect(result).toHaveProperty('datasource_id', 22);
+    expect(result).toHaveProperty('datasource_type', 'table');
+    expect(result).toHaveProperty('dashboards', dashboards);
+    expect(result).toHaveProperty('owners', owners);
+    expect(result).toHaveProperty('query_context');
+    expect(JSON.parse(result.params).adhoc_filters).toEqual(
+      formDataFromSlice.adhoc_filters,
+    );
+  });
+});


[superset] 07/07: chore: Refactor ExploreMixin to power both Datasets (SqlaTable) and Query models (#22853)

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit ba7b6fd7d9a5eb02ca997965f159e46685f7e2d4
Author: Hugh A. Miles II <hu...@gmail.com>
AuthorDate: Mon Apr 10 16:32:52 2023 -0400

    chore: Refactor ExploreMixin to power both Datasets (SqlaTable) and Query models (#22853)
---
 .../SqlLab/components/SaveDatasetModal/index.tsx   |   6 +-
 superset-frontend/src/SqlLab/fixtures.ts           |   6 +-
 .../controls/MetricControl/AdhocMetricOption.jsx   |   2 +-
 superset/connectors/sqla/models.py                 | 766 ++-------------------
 superset/models/helpers.py                         | 487 ++++++++-----
 superset/models/sql_lab.py                         |  65 +-
 superset/utils/core.py                             |  12 +-
 superset/views/core.py                             |   2 +-
 tests/integration_tests/charts/data/api_tests.py   |   4 +-
 tests/integration_tests/sqllab_tests.py            |  24 +-
 10 files changed, 413 insertions(+), 961 deletions(-)

diff --git a/superset-frontend/src/SqlLab/components/SaveDatasetModal/index.tsx b/superset-frontend/src/SqlLab/components/SaveDatasetModal/index.tsx
index 949323b9aa..402e26462e 100644
--- a/superset-frontend/src/SqlLab/components/SaveDatasetModal/index.tsx
+++ b/superset-frontend/src/SqlLab/components/SaveDatasetModal/index.tsx
@@ -61,7 +61,7 @@ export type ExploreQuery = QueryResponse & {
 };
 
 export interface ISimpleColumn {
-  name?: string | null;
+  column_name?: string | null;
   type?: string | null;
   is_dttm?: boolean | null;
 }
@@ -216,7 +216,7 @@ export const SaveDatasetModal = ({
         ...formDataWithDefaults,
         datasource: `${datasetToOverwrite.datasetid}__table`,
         ...(defaultVizType === 'table' && {
-          all_columns: datasource?.columns?.map(column => column.name),
+          all_columns: datasource?.columns?.map(column => column.column_name),
         }),
       }),
     ]);
@@ -301,7 +301,7 @@ export const SaveDatasetModal = ({
           ...formDataWithDefaults,
           datasource: `${data.table_id}__table`,
           ...(defaultVizType === 'table' && {
-            all_columns: selectedColumns.map(column => column.name),
+            all_columns: selectedColumns.map(column => column.column_name),
           }),
         }),
       )
diff --git a/superset-frontend/src/SqlLab/fixtures.ts b/superset-frontend/src/SqlLab/fixtures.ts
index fcb0fff8e3..ba88a41b0a 100644
--- a/superset-frontend/src/SqlLab/fixtures.ts
+++ b/superset-frontend/src/SqlLab/fixtures.ts
@@ -692,17 +692,17 @@ export const testQuery: ISaveableDatasource = {
   sql: 'SELECT *',
   columns: [
     {
-      name: 'Column 1',
+      column_name: 'Column 1',
       type: DatasourceType.Query,
       is_dttm: false,
     },
     {
-      name: 'Column 3',
+      column_name: 'Column 3',
       type: DatasourceType.Query,
       is_dttm: false,
     },
     {
-      name: 'Column 2',
+      column_name: 'Column 2',
       type: DatasourceType.Query,
       is_dttm: true,
     },
diff --git a/superset-frontend/src/explore/components/controls/MetricControl/AdhocMetricOption.jsx b/superset-frontend/src/explore/components/controls/MetricControl/AdhocMetricOption.jsx
index 80cf879f7f..c74212f0ba 100644
--- a/superset-frontend/src/explore/components/controls/MetricControl/AdhocMetricOption.jsx
+++ b/superset-frontend/src/explore/components/controls/MetricControl/AdhocMetricOption.jsx
@@ -48,7 +48,7 @@ class AdhocMetricOption extends React.PureComponent {
   }
 
   onRemoveMetric(e) {
-    e.stopPropagation();
+    e?.stopPropagation();
     this.props.onRemoveMetric(this.props.index);
   }
 
diff --git a/superset/connectors/sqla/models.py b/superset/connectors/sqla/models.py
index b76e423caf..fd1276f592 100644
--- a/superset/connectors/sqla/models.py
+++ b/superset/connectors/sqla/models.py
@@ -31,7 +31,6 @@ from typing import (
     Dict,
     Hashable,
     List,
-    NamedTuple,
     Optional,
     Set,
     Tuple,
@@ -50,11 +49,9 @@ from flask_babel import lazy_gettext as _
 from jinja2.exceptions import TemplateError
 from sqlalchemy import (
     and_,
-    asc,
     Boolean,
     Column,
     DateTime,
-    desc,
     Enum,
     ForeignKey,
     inspect,
@@ -80,13 +77,11 @@ from sqlalchemy.orm.mapper import Mapper
 from sqlalchemy.schema import UniqueConstraint
 from sqlalchemy.sql import column, ColumnElement, literal_column, table
 from sqlalchemy.sql.elements import ColumnClause, TextClause
-from sqlalchemy.sql.expression import Label, Select, TextAsFrom
+from sqlalchemy.sql.expression import Label, TextAsFrom
 from sqlalchemy.sql.selectable import Alias, TableClause
 
 from superset import app, db, is_feature_enabled, security_manager
-from superset.advanced_data_type.types import AdvancedDataTypeResponse
 from superset.common.db_query_status import QueryStatus
-from superset.common.utils.time_range_utils import get_since_until_from_time_range
 from superset.connectors.base.models import BaseColumn, BaseDatasource, BaseMetric
 from superset.connectors.sqla.utils import (
     find_cached_objects_in_session,
@@ -98,7 +93,6 @@ from superset.connectors.sqla.utils import (
 from superset.datasets.models import Dataset as NewDataset
 from superset.db_engine_specs.base import BaseEngineSpec, TimestampExpression
 from superset.exceptions import (
-    AdvancedDataTypeResponseError,
     ColumnNotFoundException,
     DatasetInvalidPermissionEvaluationException,
     QueryClauseValidationException,
@@ -106,7 +100,6 @@ from superset.exceptions import (
     SupersetGenericDBErrorException,
     SupersetSecurityException,
 )
-from superset.extensions import feature_flag_manager
 from superset.jinja_context import (
     BaseTemplateProcessor,
     ExtraCache,
@@ -114,26 +107,17 @@ from superset.jinja_context import (
 )
 from superset.models.annotations import Annotation
 from superset.models.core import Database
-from superset.models.helpers import AuditMixinNullable, CertificationMixin, QueryResult
-from superset.sql_parse import ParsedQuery, sanitize_clause
-from superset.superset_typing import (
-    AdhocColumn,
-    AdhocMetric,
-    Column as ColumnTyping,
-    Metric,
-    OrderBy,
-    QueryObjectDict,
+from superset.models.helpers import (
+    AuditMixinNullable,
+    CertificationMixin,
+    ExploreMixin,
+    QueryResult,
+    QueryStringExtended,
 )
+from superset.sql_parse import ParsedQuery, sanitize_clause
+from superset.superset_typing import AdhocColumn, AdhocMetric, Metric, QueryObjectDict
 from superset.utils import core as utils
-from superset.utils.core import (
-    GenericDataType,
-    get_column_name,
-    get_username,
-    is_adhoc_column,
-    MediumText,
-    QueryObjectFilterClause,
-    remove_duplicates,
-)
+from superset.utils.core import GenericDataType, get_username, MediumText
 
 config = app.config
 metadata = Model.metadata  # pylint: disable=no-member
@@ -150,26 +134,6 @@ ADDITIVE_METRIC_TYPES = {
 ADDITIVE_METRIC_TYPES_LOWER = {op.lower() for op in ADDITIVE_METRIC_TYPES}
 
 
-class SqlaQuery(NamedTuple):
-    applied_template_filters: List[str]
-    applied_filter_columns: List[ColumnTyping]
-    rejected_filter_columns: List[ColumnTyping]
-    cte: Optional[str]
-    extra_cache_keys: List[Any]
-    labels_expected: List[str]
-    prequeries: List[str]
-    sqla_query: Select
-
-
-class QueryStringExtended(NamedTuple):
-    applied_template_filters: Optional[List[str]]
-    applied_filter_columns: List[ColumnTyping]
-    rejected_filter_columns: List[ColumnTyping]
-    labels_expected: List[str]
-    prequeries: List[str]
-    sql: str
-
-
 @dataclass
 class MetadataResult:
     added: List[str] = field(default_factory=list)
@@ -310,6 +274,35 @@ class TableColumn(Model, BaseColumn, CertificationMixin):
     def type_generic(self) -> Optional[utils.GenericDataType]:
         if self.is_dttm:
             return GenericDataType.TEMPORAL
+
+        bool_types = ("BOOL",)
+        num_types = (
+            "DOUBLE",
+            "FLOAT",
+            "INT",
+            "BIGINT",
+            "NUMBER",
+            "LONG",
+            "REAL",
+            "NUMERIC",
+            "DECIMAL",
+            "MONEY",
+        )
+        date_types = ("DATE", "TIME")
+        str_types = ("VARCHAR", "STRING", "CHAR")
+
+        if self.table is None:
+            # Query.TableColumns don't have a reference to a table.db_engine_spec
+            # reference so this logic will manage rendering types
+            if self.type and any(map(lambda t: t in self.type.upper(), str_types)):
+                return GenericDataType.STRING
+            if self.type and any(map(lambda t: t in self.type.upper(), bool_types)):
+                return GenericDataType.BOOLEAN
+            if self.type and any(map(lambda t: t in self.type.upper(), num_types)):
+                return GenericDataType.NUMERIC
+            if self.type and any(map(lambda t: t in self.type.upper(), date_types)):
+                return GenericDataType.TEMPORAL
+
         column_spec = self.db_engine_spec.get_column_spec(
             self.type, db_extra=self.db_extra
         )
@@ -545,8 +538,10 @@ def _process_sql_expression(
     return expression
 
 
-class SqlaTable(Model, BaseDatasource):  # pylint: disable=too-many-public-methods
-    """An ORM object for SqlAlchemy table references."""
+class SqlaTable(
+    Model, BaseDatasource, ExploreMixin
+):  # pylint: disable=too-many-public-methods
+    """An ORM object for SqlAlchemy table references"""
 
     type = "table"
     query_language = "sql"
@@ -626,6 +621,10 @@ class SqlaTable(Model, BaseDatasource):  # pylint: disable=too-many-public-metho
     def __repr__(self) -> str:  # pylint: disable=invalid-repr-returned
         return self.name
 
+    @property
+    def db_extra(self) -> Dict[str, Any]:
+        return self.database.get_extra()
+
     @staticmethod
     def _apply_cte(sql: str, cte: Optional[str]) -> str:
         """
@@ -1009,6 +1008,7 @@ class SqlaTable(Model, BaseDatasource):  # pylint: disable=too-many-public-metho
     def adhoc_column_to_sqla(
         self,
         col: AdhocColumn,
+        force_type_check: bool = False,
         template_processor: Optional[BaseTemplateProcessor] = None,
     ) -> ColumnElement:
         """
@@ -1147,676 +1147,6 @@ class SqlaTable(Model, BaseDatasource):  # pylint: disable=too-many-public-metho
     def text(self, clause: str) -> TextClause:
         return self.db_engine_spec.get_text_clause(clause)
 
-    def get_sqla_query(  # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
-        self,
-        apply_fetch_values_predicate: bool = False,
-        columns: Optional[List[ColumnTyping]] = None,
-        extras: Optional[Dict[str, Any]] = None,
-        filter: Optional[  # pylint: disable=redefined-builtin
-            List[QueryObjectFilterClause]
-        ] = None,
-        from_dttm: Optional[datetime] = None,
-        granularity: Optional[str] = None,
-        groupby: Optional[List[Column]] = None,
-        inner_from_dttm: Optional[datetime] = None,
-        inner_to_dttm: Optional[datetime] = None,
-        is_rowcount: bool = False,
-        is_timeseries: bool = True,
-        metrics: Optional[List[Metric]] = None,
-        orderby: Optional[List[OrderBy]] = None,
-        order_desc: bool = True,
-        to_dttm: Optional[datetime] = None,
-        series_columns: Optional[List[Column]] = None,
-        series_limit: Optional[int] = None,
-        series_limit_metric: Optional[Metric] = None,
-        row_limit: Optional[int] = None,
-        row_offset: Optional[int] = None,
-        timeseries_limit: Optional[int] = None,
-        timeseries_limit_metric: Optional[Metric] = None,
-        time_shift: Optional[str] = None,
-    ) -> SqlaQuery:
-        """Querying any sqla table from this common interface"""
-        if granularity not in self.dttm_cols and granularity is not None:
-            granularity = self.main_dttm_col
-
-        extras = extras or {}
-        time_grain = extras.get("time_grain_sqla")
-
-        template_kwargs = {
-            "columns": columns,
-            "from_dttm": from_dttm.isoformat() if from_dttm else None,
-            "groupby": groupby,
-            "metrics": metrics,
-            "row_limit": row_limit,
-            "row_offset": row_offset,
-            "time_column": granularity,
-            "time_grain": time_grain,
-            "to_dttm": to_dttm.isoformat() if to_dttm else None,
-            "table_columns": [col.column_name for col in self.columns],
-            "filter": filter,
-        }
-        columns = columns or []
-        groupby = groupby or []
-        rejected_adhoc_filters_columns: List[Union[str, ColumnTyping]] = []
-        applied_adhoc_filters_columns: List[Union[str, ColumnTyping]] = []
-        series_column_names = utils.get_column_names(series_columns or [])
-        # deprecated, to be removed in 2.0
-        if is_timeseries and timeseries_limit:
-            series_limit = timeseries_limit
-        series_limit_metric = series_limit_metric or timeseries_limit_metric
-        template_kwargs.update(self.template_params_dict)
-        extra_cache_keys: List[Any] = []
-        template_kwargs["extra_cache_keys"] = extra_cache_keys
-        removed_filters: List[str] = []
-        applied_template_filters: List[str] = []
-        template_kwargs["removed_filters"] = removed_filters
-        template_kwargs["applied_filters"] = applied_template_filters
-        template_processor = self.get_template_processor(**template_kwargs)
-        db_engine_spec = self.db_engine_spec
-        prequeries: List[str] = []
-        orderby = orderby or []
-        need_groupby = bool(metrics is not None or groupby)
-        metrics = metrics or []
-
-        # For backward compatibility
-        if granularity not in self.dttm_cols and granularity is not None:
-            granularity = self.main_dttm_col
-
-        columns_by_name: Dict[str, TableColumn] = {
-            col.column_name: col for col in self.columns
-        }
-
-        metrics_by_name: Dict[str, SqlMetric] = {m.metric_name: m for m in self.metrics}
-
-        if not granularity and is_timeseries:
-            raise QueryObjectValidationError(
-                _(
-                    "Datetime column not provided as part table configuration "
-                    "and is required by this type of chart"
-                )
-            )
-        if not metrics and not columns and not groupby:
-            raise QueryObjectValidationError(_("Empty query?"))
-
-        metrics_exprs: List[ColumnElement] = []
-        for metric in metrics:
-            if utils.is_adhoc_metric(metric):
-                assert isinstance(metric, dict)
-                metrics_exprs.append(
-                    self.adhoc_metric_to_sqla(
-                        metric=metric,
-                        columns_by_name=columns_by_name,
-                        template_processor=template_processor,
-                    )
-                )
-            elif isinstance(metric, str) and metric in metrics_by_name:
-                metrics_exprs.append(
-                    metrics_by_name[metric].get_sqla_col(
-                        template_processor=template_processor
-                    )
-                )
-            else:
-                raise QueryObjectValidationError(
-                    _("Metric '%(metric)s' does not exist", metric=metric)
-                )
-
-        if metrics_exprs:
-            main_metric_expr = metrics_exprs[0]
-        else:
-            main_metric_expr, label = literal_column("COUNT(*)"), "count"
-            main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
-
-        # To ensure correct handling of the ORDER BY labeling we need to reference the
-        # metric instance if defined in the SELECT clause.
-        # use the key of the ColumnClause for the expected label
-        metrics_exprs_by_label = {m.key: m for m in metrics_exprs}
-        metrics_exprs_by_expr = {str(m): m for m in metrics_exprs}
-
-        # Since orderby may use adhoc metrics, too; we need to process them first
-        orderby_exprs: List[ColumnElement] = []
-        for orig_col, ascending in orderby:
-            col: Union[AdhocMetric, ColumnElement] = orig_col
-            if isinstance(col, dict):
-                col = cast(AdhocMetric, col)
-                if col.get("sqlExpression"):
-                    col["sqlExpression"] = _process_sql_expression(
-                        expression=col["sqlExpression"],
-                        database_id=self.database_id,
-                        schema=self.schema,
-                        template_processor=template_processor,
-                    )
-                if utils.is_adhoc_metric(col):
-                    # add adhoc sort by column to columns_by_name if not exists
-                    col = self.adhoc_metric_to_sqla(col, columns_by_name)
-                    # if the adhoc metric has been defined before
-                    # use the existing instance.
-                    col = metrics_exprs_by_expr.get(str(col), col)
-                    need_groupby = True
-            elif col in columns_by_name:
-                col = columns_by_name[col].get_sqla_col(
-                    template_processor=template_processor
-                )
-            elif col in metrics_exprs_by_label:
-                col = metrics_exprs_by_label[col]
-                need_groupby = True
-            elif col in metrics_by_name:
-                col = metrics_by_name[col].get_sqla_col(
-                    template_processor=template_processor
-                )
-                need_groupby = True
-
-            if isinstance(col, ColumnElement):
-                orderby_exprs.append(col)
-            else:
-                # Could not convert a column reference to valid ColumnElement
-                raise QueryObjectValidationError(
-                    _("Unknown column used in orderby: %(col)s", col=orig_col)
-                )
-
-        select_exprs: List[Union[Column, Label]] = []
-        groupby_all_columns = {}
-        groupby_series_columns = {}
-
-        # filter out the pseudo column  __timestamp from columns
-        columns = [col for col in columns if col != utils.DTTM_ALIAS]
-        dttm_col = columns_by_name.get(granularity) if granularity else None
-
-        if need_groupby:
-            # dedup columns while preserving order
-            columns = groupby or columns
-            for selected in columns:
-                if isinstance(selected, str):
-                    # if groupby field/expr equals granularity field/expr
-                    if selected == granularity:
-                        table_col = columns_by_name[selected]
-                        outer = table_col.get_timestamp_expression(
-                            time_grain=time_grain,
-                            label=selected,
-                            template_processor=template_processor,
-                        )
-                    # if groupby field equals a selected column
-                    elif selected in columns_by_name:
-                        outer = columns_by_name[selected].get_sqla_col(
-                            template_processor=template_processor
-                        )
-                    else:
-                        selected = validate_adhoc_subquery(
-                            selected,
-                            self.database_id,
-                            self.schema,
-                        )
-                        outer = literal_column(f"({selected})")
-                        outer = self.make_sqla_column_compatible(outer, selected)
-                else:
-                    outer = self.adhoc_column_to_sqla(
-                        col=selected, template_processor=template_processor
-                    )
-                groupby_all_columns[outer.name] = outer
-                if (
-                    is_timeseries and not series_column_names
-                ) or outer.name in series_column_names:
-                    groupby_series_columns[outer.name] = outer
-                select_exprs.append(outer)
-        elif columns:
-            for selected in columns:
-                if is_adhoc_column(selected):
-                    _sql = selected["sqlExpression"]
-                    _column_label = selected["label"]
-                elif isinstance(selected, str):
-                    _sql = selected
-                    _column_label = selected
-
-                selected = validate_adhoc_subquery(
-                    _sql,
-                    self.database_id,
-                    self.schema,
-                )
-                select_exprs.append(
-                    columns_by_name[selected].get_sqla_col(
-                        template_processor=template_processor
-                    )
-                    if isinstance(selected, str) and selected in columns_by_name
-                    else self.make_sqla_column_compatible(
-                        literal_column(selected), _column_label
-                    )
-                )
-            metrics_exprs = []
-
-        if granularity:
-            if granularity not in columns_by_name or not dttm_col:
-                raise QueryObjectValidationError(
-                    _(
-                        'Time column "%(col)s" does not exist in dataset',
-                        col=granularity,
-                    )
-                )
-            time_filters = []
-
-            if is_timeseries:
-                timestamp = dttm_col.get_timestamp_expression(
-                    time_grain=time_grain, template_processor=template_processor
-                )
-                # always put timestamp as the first column
-                select_exprs.insert(0, timestamp)
-                groupby_all_columns[timestamp.name] = timestamp
-
-            # Use main dttm column to support index with secondary dttm columns.
-            if (
-                db_engine_spec.time_secondary_columns
-                and self.main_dttm_col in self.dttm_cols
-                and self.main_dttm_col != dttm_col.column_name
-            ):
-                time_filters.append(
-                    columns_by_name[self.main_dttm_col].get_time_filter(
-                        start_dttm=from_dttm,
-                        end_dttm=to_dttm,
-                        template_processor=template_processor,
-                    )
-                )
-            time_filters.append(
-                dttm_col.get_time_filter(
-                    start_dttm=from_dttm,
-                    end_dttm=to_dttm,
-                    template_processor=template_processor,
-                )
-            )
-
-        # Always remove duplicates by column name, as sometimes `metrics_exprs`
-        # can have the same name as a groupby column (e.g. when users use
-        # raw columns as custom SQL adhoc metric).
-        select_exprs = remove_duplicates(
-            select_exprs + metrics_exprs, key=lambda x: x.name
-        )
-
-        # Expected output columns
-        labels_expected = [c.key for c in select_exprs]
-
-        # Order by columns are "hidden" columns, some databases require them
-        # always be present in SELECT if an aggregation function is used
-        if not db_engine_spec.allows_hidden_orderby_agg:
-            select_exprs = remove_duplicates(select_exprs + orderby_exprs)
-
-        qry = sa.select(select_exprs)
-
-        tbl, cte = self.get_from_clause(template_processor)
-
-        if groupby_all_columns:
-            qry = qry.group_by(*groupby_all_columns.values())
-
-        where_clause_and = []
-        having_clause_and = []
-
-        for flt in filter:  # type: ignore
-            if not all(flt.get(s) for s in ["col", "op"]):
-                continue
-            flt_col = flt["col"]
-            val = flt.get("val")
-            op = flt["op"].upper()
-            col_obj: Optional[TableColumn] = None
-            sqla_col: Optional[Column] = None
-            if flt_col == utils.DTTM_ALIAS and is_timeseries and dttm_col:
-                col_obj = dttm_col
-            elif is_adhoc_column(flt_col):
-                try:
-                    sqla_col = self.adhoc_column_to_sqla(flt_col)
-                    applied_adhoc_filters_columns.append(flt_col)
-                except ColumnNotFoundException:
-                    rejected_adhoc_filters_columns.append(flt_col)
-                    continue
-            else:
-                col_obj = columns_by_name.get(cast(str, flt_col))
-            filter_grain = flt.get("grain")
-
-            if is_feature_enabled("ENABLE_TEMPLATE_REMOVE_FILTERS"):
-                if get_column_name(flt_col) in removed_filters:
-                    # Skip generating SQLA filter when the jinja template handles it.
-                    continue
-
-            if col_obj or sqla_col is not None:
-                if sqla_col is not None:
-                    pass
-                elif col_obj and filter_grain:
-                    sqla_col = col_obj.get_timestamp_expression(
-                        time_grain=filter_grain, template_processor=template_processor
-                    )
-                elif col_obj:
-                    sqla_col = col_obj.get_sqla_col(
-                        template_processor=template_processor
-                    )
-                col_type = col_obj.type if col_obj else None
-                col_spec = db_engine_spec.get_column_spec(
-                    native_type=col_type,
-                    db_extra=self.database.get_extra(),
-                )
-                is_list_target = op in (
-                    utils.FilterOperator.IN.value,
-                    utils.FilterOperator.NOT_IN.value,
-                )
-
-                col_advanced_data_type = col_obj.advanced_data_type if col_obj else ""
-
-                if col_spec and not col_advanced_data_type:
-                    target_generic_type = col_spec.generic_type
-                else:
-                    target_generic_type = GenericDataType.STRING
-                eq = self.filter_values_handler(
-                    values=val,
-                    operator=op,
-                    target_generic_type=target_generic_type,
-                    target_native_type=col_type,
-                    is_list_target=is_list_target,
-                    db_engine_spec=db_engine_spec,
-                    db_extra=self.database.get_extra(),
-                )
-                if (
-                    col_advanced_data_type != ""
-                    and feature_flag_manager.is_feature_enabled(
-                        "ENABLE_ADVANCED_DATA_TYPES"
-                    )
-                    and col_advanced_data_type in ADVANCED_DATA_TYPES
-                ):
-                    values = eq if is_list_target else [eq]  # type: ignore
-                    bus_resp: AdvancedDataTypeResponse = ADVANCED_DATA_TYPES[
-                        col_advanced_data_type
-                    ].translate_type(
-                        {
-                            "type": col_advanced_data_type,
-                            "values": values,
-                        }
-                    )
-                    if bus_resp["error_message"]:
-                        raise AdvancedDataTypeResponseError(
-                            _(bus_resp["error_message"])
-                        )
-
-                    where_clause_and.append(
-                        ADVANCED_DATA_TYPES[col_advanced_data_type].translate_filter(
-                            sqla_col, op, bus_resp["values"]
-                        )
-                    )
-                elif is_list_target:
-                    assert isinstance(eq, (tuple, list))
-                    if len(eq) == 0:
-                        raise QueryObjectValidationError(
-                            _("Filter value list cannot be empty")
-                        )
-                    if len(eq) > len(
-                        eq_without_none := [x for x in eq if x is not None]
-                    ):
-                        is_null_cond = sqla_col.is_(None)
-                        if eq:
-                            cond = or_(is_null_cond, sqla_col.in_(eq_without_none))
-                        else:
-                            cond = is_null_cond
-                    else:
-                        cond = sqla_col.in_(eq)
-                    if op == utils.FilterOperator.NOT_IN.value:
-                        cond = ~cond
-                    where_clause_and.append(cond)
-                elif op == utils.FilterOperator.IS_NULL.value:
-                    where_clause_and.append(sqla_col.is_(None))
-                elif op == utils.FilterOperator.IS_NOT_NULL.value:
-                    where_clause_and.append(sqla_col.isnot(None))
-                elif op == utils.FilterOperator.IS_TRUE.value:
-                    where_clause_and.append(sqla_col.is_(True))
-                elif op == utils.FilterOperator.IS_FALSE.value:
-                    where_clause_and.append(sqla_col.is_(False))
-                else:
-                    if (
-                        op
-                        not in {
-                            utils.FilterOperator.EQUALS.value,
-                            utils.FilterOperator.NOT_EQUALS.value,
-                        }
-                        and eq is None
-                    ):
-                        raise QueryObjectValidationError(
-                            _(
-                                "Must specify a value for filters "
-                                "with comparison operators"
-                            )
-                        )
-                    if op == utils.FilterOperator.EQUALS.value:
-                        where_clause_and.append(sqla_col == eq)
-                    elif op == utils.FilterOperator.NOT_EQUALS.value:
-                        where_clause_and.append(sqla_col != eq)
-                    elif op == utils.FilterOperator.GREATER_THAN.value:
-                        where_clause_and.append(sqla_col > eq)
-                    elif op == utils.FilterOperator.LESS_THAN.value:
-                        where_clause_and.append(sqla_col < eq)
-                    elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
-                        where_clause_and.append(sqla_col >= eq)
-                    elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
-                        where_clause_and.append(sqla_col <= eq)
-                    elif op == utils.FilterOperator.LIKE.value:
-                        where_clause_and.append(sqla_col.like(eq))
-                    elif op == utils.FilterOperator.ILIKE.value:
-                        where_clause_and.append(sqla_col.ilike(eq))
-                    elif (
-                        op == utils.FilterOperator.TEMPORAL_RANGE.value
-                        and isinstance(eq, str)
-                        and col_obj is not None
-                    ):
-                        _since, _until = get_since_until_from_time_range(
-                            time_range=eq,
-                            time_shift=time_shift,
-                            extras=extras,
-                        )
-                        where_clause_and.append(
-                            col_obj.get_time_filter(
-                                start_dttm=_since,
-                                end_dttm=_until,
-                                label=sqla_col.key,
-                                template_processor=template_processor,
-                            )
-                        )
-                    else:
-                        raise QueryObjectValidationError(
-                            _("Invalid filter operation type: %(op)s", op=op)
-                        )
-        where_clause_and += self.get_sqla_row_level_filters(template_processor)
-        if extras:
-            where = extras.get("where")
-            if where:
-                try:
-                    where = template_processor.process_template(f"({where})")
-                except TemplateError as ex:
-                    raise QueryObjectValidationError(
-                        _(
-                            "Error in jinja expression in WHERE clause: %(msg)s",
-                            msg=ex.message,
-                        )
-                    ) from ex
-                where = _process_sql_expression(
-                    expression=where,
-                    database_id=self.database_id,
-                    schema=self.schema,
-                )
-                where_clause_and += [self.text(where)]
-            having = extras.get("having")
-            if having:
-                try:
-                    having = template_processor.process_template(f"({having})")
-                except TemplateError as ex:
-                    raise QueryObjectValidationError(
-                        _(
-                            "Error in jinja expression in HAVING clause: %(msg)s",
-                            msg=ex.message,
-                        )
-                    ) from ex
-                having = _process_sql_expression(
-                    expression=having,
-                    database_id=self.database_id,
-                    schema=self.schema,
-                )
-                having_clause_and += [self.text(having)]
-
-        if apply_fetch_values_predicate and self.fetch_values_predicate:
-            qry = qry.where(
-                self.get_fetch_values_predicate(template_processor=template_processor)
-            )
-        if granularity:
-            qry = qry.where(and_(*(time_filters + where_clause_and)))
-        else:
-            qry = qry.where(and_(*where_clause_and))
-        qry = qry.having(and_(*having_clause_and))
-
-        self.make_orderby_compatible(select_exprs, orderby_exprs)
-
-        for col, (orig_col, ascending) in zip(orderby_exprs, orderby):
-            if not db_engine_spec.allows_alias_in_orderby and isinstance(col, Label):
-                # if engine does not allow using SELECT alias in ORDER BY
-                # revert to the underlying column
-                col = col.element
-
-            if (
-                db_engine_spec.allows_alias_in_select
-                and db_engine_spec.allows_hidden_cc_in_orderby
-                and col.name in [select_col.name for select_col in select_exprs]
-            ):
-                col = literal_column(col.name)
-            direction = asc if ascending else desc
-            qry = qry.order_by(direction(col))
-
-        if row_limit:
-            qry = qry.limit(row_limit)
-        if row_offset:
-            qry = qry.offset(row_offset)
-
-        if series_limit and groupby_series_columns:
-            if db_engine_spec.allows_joins and db_engine_spec.allows_subqueries:
-                # some sql dialects require for order by expressions
-                # to also be in the select clause -- others, e.g. vertica,
-                # require a unique inner alias
-                inner_main_metric_expr = self.make_sqla_column_compatible(
-                    main_metric_expr, "mme_inner__"
-                )
-                inner_groupby_exprs = []
-                inner_select_exprs = []
-                for gby_name, gby_obj in groupby_series_columns.items():
-                    label = get_column_name(gby_name)
-                    inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
-                    inner_groupby_exprs.append(inner)
-                    inner_select_exprs.append(inner)
-
-                inner_select_exprs += [inner_main_metric_expr]
-                subq = select(inner_select_exprs).select_from(tbl)
-                inner_time_filter = []
-
-                if dttm_col and not db_engine_spec.time_groupby_inline:
-                    inner_time_filter = [
-                        dttm_col.get_time_filter(
-                            start_dttm=inner_from_dttm or from_dttm,
-                            end_dttm=inner_to_dttm or to_dttm,
-                            template_processor=template_processor,
-                        )
-                    ]
-                subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
-                subq = subq.group_by(*inner_groupby_exprs)
-
-                ob = inner_main_metric_expr
-                if series_limit_metric:
-                    ob = self._get_series_orderby(
-                        series_limit_metric=series_limit_metric,
-                        metrics_by_name=metrics_by_name,
-                        columns_by_name=columns_by_name,
-                        template_processor=template_processor,
-                    )
-                direction = desc if order_desc else asc
-                subq = subq.order_by(direction(ob))
-                subq = subq.limit(series_limit)
-
-                on_clause = []
-                for gby_name, gby_obj in groupby_series_columns.items():
-                    # in this case the column name, not the alias, needs to be
-                    # conditionally mutated, as it refers to the column alias in
-                    # the inner query
-                    col_name = db_engine_spec.make_label_compatible(gby_name + "__")
-                    on_clause.append(gby_obj == column(col_name))
-
-                tbl = tbl.join(subq.alias(), and_(*on_clause))
-            else:
-                if series_limit_metric:
-                    orderby = [
-                        (
-                            self._get_series_orderby(
-                                series_limit_metric=series_limit_metric,
-                                metrics_by_name=metrics_by_name,
-                                columns_by_name=columns_by_name,
-                                template_processor=template_processor,
-                            ),
-                            not order_desc,
-                        )
-                    ]
-
-                # run prequery to get top groups
-                prequery_obj = {
-                    "is_timeseries": False,
-                    "row_limit": series_limit,
-                    "metrics": metrics,
-                    "granularity": granularity,
-                    "groupby": groupby,
-                    "from_dttm": inner_from_dttm or from_dttm,
-                    "to_dttm": inner_to_dttm or to_dttm,
-                    "filter": filter,
-                    "orderby": orderby,
-                    "extras": extras,
-                    "columns": columns,
-                    "order_desc": True,
-                }
-
-                result = self.query(prequery_obj)
-                prequeries.append(result.query)
-                dimensions = [
-                    c
-                    for c in result.df.columns
-                    if c not in metrics and c in groupby_series_columns
-                ]
-                top_groups = self._get_top_groups(
-                    result.df, dimensions, groupby_series_columns, columns_by_name
-                )
-                qry = qry.where(top_groups)
-
-        qry = qry.select_from(tbl)
-
-        if is_rowcount:
-            if not db_engine_spec.allows_subqueries:
-                raise QueryObjectValidationError(
-                    _("Database does not support subqueries")
-                )
-            label = "rowcount"
-            col = self.make_sqla_column_compatible(literal_column("COUNT(*)"), label)
-            qry = select([col]).select_from(qry.alias("rowcount_qry"))
-            labels_expected = [label]
-
-        filter_columns = [flt.get("col") for flt in filter] if filter else []
-        rejected_filter_columns = [
-            col
-            for col in filter_columns
-            if col
-            and not is_adhoc_column(col)
-            and col not in self.column_names
-            and col not in applied_template_filters
-        ] + rejected_adhoc_filters_columns
-        applied_filter_columns = [
-            col
-            for col in filter_columns
-            if col
-            and not is_adhoc_column(col)
-            and (col in self.column_names or col in applied_template_filters)
-        ] + applied_adhoc_filters_columns
-
-        return SqlaQuery(
-            applied_template_filters=applied_template_filters,
-            rejected_filter_columns=rejected_filter_columns,
-            applied_filter_columns=applied_filter_columns,
-            cte=cte,
-            extra_cache_keys=extra_cache_keys,
-            labels_expected=labels_expected,
-            sqla_query=qry,
-            prequeries=prequeries,
-        )
-
     def _get_series_orderby(
         self,
         series_limit_metric: Metric,
diff --git a/superset/models/helpers.py b/superset/models/helpers.py
index 0c52465caa..ad76e0ed85 100644
--- a/superset/models/helpers.py
+++ b/superset/models/helpers.py
@@ -14,20 +14,22 @@
 # KIND, either express or implied.  See the License for the
 # specific language governing permissions and limitations
 # under the License.
-"""a collection of model-related helper classes and functions"""
 # pylint: disable=too-many-lines
+"""a collection of model-related helper classes and functions"""
+import dataclasses
 import json
 import logging
 import re
 import uuid
+from collections import defaultdict
 from datetime import datetime, timedelta
 from json.decoder import JSONDecodeError
 from typing import (
     Any,
     cast,
     Dict,
+    Hashable,
     List,
-    Mapping,
     NamedTuple,
     Optional,
     Set,
@@ -71,6 +73,7 @@ from superset.db_engine_specs.base import TimestampExpression
 from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
 from superset.exceptions import (
     AdvancedDataTypeResponseError,
+    ColumnNotFoundException,
     QueryClauseValidationException,
     QueryObjectValidationError,
     SupersetSecurityException,
@@ -88,7 +91,14 @@ from superset.superset_typing import (
     QueryObjectDict,
 )
 from superset.utils import core as utils
-from superset.utils.core import get_user_id
+from superset.utils.core import (
+    GenericDataType,
+    get_column_name,
+    get_user_id,
+    is_adhoc_column,
+    remove_duplicates,
+)
+from superset.utils.dates import datetime_to_epoch
 
 if TYPE_CHECKING:
     from superset.connectors.sqla.models import SqlMetric, TableColumn
@@ -657,6 +667,8 @@ def clone_model(
 # todo(hugh): centralize where this code lives
 class QueryStringExtended(NamedTuple):
     applied_template_filters: Optional[List[str]]
+    applied_filter_columns: List[ColumnTyping]
+    rejected_filter_columns: List[ColumnTyping]
     labels_expected: List[str]
     prequeries: List[str]
     sql: str
@@ -664,6 +676,8 @@ class QueryStringExtended(NamedTuple):
 
 class SqlaQuery(NamedTuple):
     applied_template_filters: List[str]
+    applied_filter_columns: List[ColumnTyping]
+    rejected_filter_columns: List[ColumnTyping]
     cte: Optional[str]
     extra_cache_keys: List[Any]
     labels_expected: List[str]
@@ -687,7 +701,18 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
     }
 
     @property
-    def query(self) -> str:
+    def fetch_value_predicate(self) -> str:
+        return "fix this!"
+
+    @property
+    def type(self) -> str:
+        raise NotImplementedError()
+
+    @property
+    def db_extra(self) -> Optional[Dict[str, Any]]:
+        raise NotImplementedError()
+
+    def query(self, query_obj: QueryObjectDict) -> QueryResult:
         raise NotImplementedError()
 
     @property
@@ -700,7 +725,7 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
 
     @property
     def metrics(self) -> List[Any]:
-        raise NotImplementedError()
+        return []
 
     @property
     def uid(self) -> str:
@@ -750,17 +775,59 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
     def columns(self) -> List[Any]:
         raise NotImplementedError()
 
-    @property
-    def get_fetch_values_predicate(self) -> List[Any]:
+    def get_fetch_values_predicate(
+        self, template_processor: Optional[BaseTemplateProcessor] = None
+    ) -> TextClause:
         raise NotImplementedError()
 
-    @staticmethod
-    def get_extra_cache_keys(query_obj: Dict[str, Any]) -> List[str]:
+    def get_extra_cache_keys(self, query_obj: Dict[str, Any]) -> List[Hashable]:
         raise NotImplementedError()
 
     def get_template_processor(self, **kwargs: Any) -> BaseTemplateProcessor:
         raise NotImplementedError()
 
+    def get_sqla_row_level_filters(
+        self,
+        template_processor: BaseTemplateProcessor,
+    ) -> List[TextClause]:
+        """
+        Return the appropriate row level security filters for this table and the
+        current user. A custom username can be passed when the user is not present in the
+        Flask global namespace.
+
+        :param template_processor: The template processor to apply to the filters.
+        :returns: A list of SQL clauses to be ANDed together.
+        """
+        all_filters: List[TextClause] = []
+        filter_groups: Dict[Union[int, str], List[TextClause]] = defaultdict(list)
+        try:
+            for filter_ in security_manager.get_rls_filters(self):
+                clause = self.text(
+                    f"({template_processor.process_template(filter_.clause)})"
+                )
+                if filter_.group_key:
+                    filter_groups[filter_.group_key].append(clause)
+                else:
+                    all_filters.append(clause)
+
+            if is_feature_enabled("EMBEDDED_SUPERSET"):
+                for rule in security_manager.get_guest_rls_filters(self):
+                    clause = self.text(
+                        f"({template_processor.process_template(rule['clause'])})"
+                    )
+                    all_filters.append(clause)
+
+            grouped_filters = [or_(*clauses) for clauses in filter_groups.values()]
+            all_filters.extend(grouped_filters)
+            return all_filters
+        except TemplateError as ex:
+            raise QueryObjectValidationError(
+                _(
+                    "Error in jinja expression in RLS filters: %(msg)s",
+                    msg=ex.message,
+                )
+            ) from ex
+
     def _process_sql_expression(  # pylint: disable=no-self-use
         self,
         expression: Optional[str],
@@ -859,14 +926,19 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
 
         return ";\n".join(str(statement) for statement in statements)
 
-    def get_query_str_extended(self, query_obj: QueryObjectDict) -> QueryStringExtended:
+    def get_query_str_extended(
+        self, query_obj: QueryObjectDict, mutate: bool = True
+    ) -> QueryStringExtended:
         sqlaq = self.get_sqla_query(**query_obj)
         sql = self.database.compile_sqla_query(sqlaq.sqla_query)  # type: ignore
         sql = self._apply_cte(sql, sqlaq.cte)
         sql = sqlparse.format(sql, reindent=True)
-        sql = self.mutate_query_from_config(sql)
+        if mutate:
+            sql = self.mutate_query_from_config(sql)
         return QueryStringExtended(
             applied_template_filters=sqlaq.applied_template_filters,
+            applied_filter_columns=sqlaq.applied_filter_columns,
+            rejected_filter_columns=sqlaq.rejected_filter_columns,
             labels_expected=sqlaq.labels_expected,
             prequeries=sqlaq.prequeries,
             sql=sql,
@@ -991,9 +1063,16 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
             logger.warning(
                 "Query %s on schema %s failed", sql, self.schema, exc_info=True
             )
+            db_engine_spec = self.db_engine_spec
+            errors = [
+                dataclasses.asdict(error) for error in db_engine_spec.extract_errors(ex)
+            ]
             error_message = utils.error_msg_from_exception(ex)
 
         return QueryResult(
+            applied_template_filters=query_str_ext.applied_template_filters,
+            applied_filter_columns=query_str_ext.applied_filter_columns,
+            rejected_filter_columns=query_str_ext.rejected_filter_columns,
             status=status,
             df=df,
             duration=datetime.now() - qry_start_dttm,
@@ -1063,7 +1142,7 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
     def adhoc_metric_to_sqla(
         self,
         metric: AdhocMetric,
-        columns_by_name: Dict[str, "TableColumn"],  # # pylint: disable=unused-argument
+        columns_by_name: Dict[str, "TableColumn"],  # pylint: disable=unused-argument
         template_processor: Optional[BaseTemplateProcessor] = None,
     ) -> ColumnElement:
         """
@@ -1163,19 +1242,20 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
     def _get_series_orderby(
         self,
         series_limit_metric: Metric,
-        metrics_by_name: Mapping[str, "SqlMetric"],
-        columns_by_name: Mapping[str, "TableColumn"],
+        metrics_by_name: Dict[str, "SqlMetric"],
+        columns_by_name: Dict[str, "TableColumn"],
+        template_processor: Optional[BaseTemplateProcessor] = None,
     ) -> Column:
         if utils.is_adhoc_metric(series_limit_metric):
             assert isinstance(series_limit_metric, dict)
-            ob = self.adhoc_metric_to_sqla(
-                series_limit_metric, columns_by_name  # type: ignore
-            )
+            ob = self.adhoc_metric_to_sqla(series_limit_metric, columns_by_name)
         elif (
             isinstance(series_limit_metric, str)
             and series_limit_metric in metrics_by_name
         ):
-            ob = metrics_by_name[series_limit_metric].get_sqla_col()
+            ob = metrics_by_name[series_limit_metric].get_sqla_col(
+                template_processor=template_processor
+            )
         else:
             raise QueryObjectValidationError(
                 _("Metric '%(metric)s' does not exist", metric=series_limit_metric)
@@ -1184,26 +1264,11 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
 
     def adhoc_column_to_sqla(
         self,
-        col: Type["AdhocColumn"],  # type: ignore
+        col: "AdhocColumn",  # type: ignore
+        force_type_check: bool = False,
         template_processor: Optional[BaseTemplateProcessor] = None,
     ) -> ColumnElement:
-        """
-        Turn an adhoc column into a sqlalchemy column.
-
-        :param col: Adhoc column definition
-        :param template_processor: template_processor instance
-        :returns: The metric defined as a sqlalchemy column
-        :rtype: sqlalchemy.sql.column
-        """
-        label = utils.get_column_name(col)  # type: ignore
-        expression = self._process_sql_expression(
-            expression=col["sqlExpression"],
-            database_id=self.database_id,
-            schema=self.schema,
-            template_processor=template_processor,
-        )
-        sqla_column = literal_column(expression)
-        return self.make_sqla_column_compatible(sqla_column, label)
+        raise NotImplementedError()
 
     def _get_top_groups(
         self,
@@ -1241,29 +1306,30 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
 
         return f'{dttm.strftime("%Y-%m-%d %H:%M:%S.%f")}'
 
-    def get_time_filter(
+    def get_time_filter(  # pylint: disable=too-many-arguments
         self,
-        time_col: Dict[str, Any],
+        time_col: "TableColumn",
         start_dttm: Optional[sa.DateTime],
         end_dttm: Optional[sa.DateTime],
+        label: Optional[str] = "__time",
+        template_processor: Optional[BaseTemplateProcessor] = None,
     ) -> ColumnElement:
-        label = "__time"
-        col = time_col.get("column_name")
-        sqla_col = literal_column(col)
-        my_col = self.make_sqla_column_compatible(sqla_col, label)
+        col = self.convert_tbl_column_to_sqla_col(
+            time_col, label=label, template_processor=template_processor
+        )
         l = []
         if start_dttm:
             l.append(
-                my_col
+                col
                 >= self.db_engine_spec.get_text_clause(
-                    self.dttm_sql_literal(start_dttm, time_col.get("type"))
+                    self.dttm_sql_literal(start_dttm, time_col.type)
                 )
             )
         if end_dttm:
             l.append(
-                my_col
+                col
                 < self.db_engine_spec.get_text_clause(
-                    self.dttm_sql_literal(end_dttm, time_col.get("type"))
+                    self.dttm_sql_literal(end_dttm, time_col.type)
                 )
             )
         return and_(*l)
@@ -1327,11 +1393,24 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
         time_expr = self.db_engine_spec.get_timestamp_expr(col, None, time_grain)
         return self.make_sqla_column_compatible(time_expr, label)
 
-    def get_sqla_col(self, col: Dict[str, Any]) -> Column:
-        label = col.get("column_name")
-        col_type = col.get("type")
-        col = sa.column(label, type_=col_type)
-        return self.make_sqla_column_compatible(col, label)
+    def convert_tbl_column_to_sqla_col(
+        self,
+        tbl_column: "TableColumn",
+        label: Optional[str] = None,
+        template_processor: Optional[BaseTemplateProcessor] = None,
+    ) -> Column:
+        label = label or tbl_column.column_name
+        db_engine_spec = self.db_engine_spec
+        column_spec = db_engine_spec.get_column_spec(self.type, db_extra=self.db_extra)
+        type_ = column_spec.sqla_type if column_spec else None
+        if expression := tbl_column.expression:
+            if template_processor:
+                expression = template_processor.process_template(expression)
+            col = literal_column(expression, type_=type_)
+        else:
+            col = sa.column(tbl_column.column_name, type_=type_)
+        col = self.make_sqla_column_compatible(col, label)
+        return col
 
     def get_sqla_query(  # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements
         self,
@@ -1378,11 +1457,13 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
             "time_column": granularity,
             "time_grain": time_grain,
             "to_dttm": to_dttm.isoformat() if to_dttm else None,
-            "table_columns": [col.get("column_name") for col in self.columns],
+            "table_columns": [col.column_name for col in self.columns],
             "filter": filter,
         }
         columns = columns or []
         groupby = groupby or []
+        rejected_adhoc_filters_columns: List[Union[str, ColumnTyping]] = []
+        applied_adhoc_filters_columns: List[Union[str, ColumnTyping]] = []
         series_column_names = utils.get_column_names(series_columns or [])
         # deprecated, to be removed in 2.0
         if is_timeseries and timeseries_limit:
@@ -1407,8 +1488,11 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
             granularity = self.main_dttm_col
 
         columns_by_name: Dict[str, "TableColumn"] = {
-            col.get("column_name"): col
-            for col in self.columns  # col.column_name: col for col in self.columns
+            col.column_name: col for col in self.columns
+        }
+
+        metrics_by_name: Dict[str, "SqlMetric"] = {
+            m.metric_name: m for m in self.metrics
         }
 
         if not granularity and is_timeseries:
@@ -1432,6 +1516,12 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                         template_processor=template_processor,
                     )
                 )
+            elif isinstance(metric, str) and metric in metrics_by_name:
+                metrics_exprs.append(
+                    metrics_by_name[metric].get_sqla_col(
+                        template_processor=template_processor
+                    )
+                )
             else:
                 raise QueryObjectValidationError(
                     _("Metric '%(metric)s' does not exist", metric=metric)
@@ -1470,14 +1560,17 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                     col = metrics_exprs_by_expr.get(str(col), col)
                     need_groupby = True
             elif col in columns_by_name:
-                gb_column_obj = columns_by_name[col]
-                if isinstance(gb_column_obj, dict):
-                    col = self.get_sqla_col(gb_column_obj)
-                else:
-                    col = gb_column_obj.get_sqla_col()
+                col = self.convert_tbl_column_to_sqla_col(
+                    columns_by_name[col], template_processor=template_processor
+                )
             elif col in metrics_exprs_by_label:
                 col = metrics_exprs_by_label[col]
                 need_groupby = True
+            elif col in metrics_by_name:
+                col = metrics_by_name[col].get_sqla_col(
+                    template_processor=template_processor
+                )
+                need_groupby = True
 
             if isinstance(col, ColumnElement):
                 orderby_exprs.append(col)
@@ -1503,33 +1596,24 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                     # if groupby field/expr equals granularity field/expr
                     if selected == granularity:
                         table_col = columns_by_name[selected]
-                        if isinstance(table_col, dict):
-                            outer = self.get_timestamp_expression(
-                                column=table_col,
-                                time_grain=time_grain,
-                                label=selected,
-                                template_processor=template_processor,
-                            )
-                        else:
-                            outer = table_col.get_timestamp_expression(
-                                time_grain=time_grain,
-                                label=selected,
-                                template_processor=template_processor,
-                            )
+                        outer = table_col.get_timestamp_expression(
+                            time_grain=time_grain,
+                            label=selected,
+                            template_processor=template_processor,
+                        )
                     # if groupby field equals a selected column
                     elif selected in columns_by_name:
-                        if isinstance(columns_by_name[selected], dict):
-                            outer = sa.column(f"{selected}")
-                            outer = self.make_sqla_column_compatible(outer, selected)
-                        else:
-                            outer = columns_by_name[selected].get_sqla_col()
+                        outer = self.convert_tbl_column_to_sqla_col(
+                            columns_by_name[selected],
+                            template_processor=template_processor,
+                        )
                     else:
-                        selected = self.validate_adhoc_subquery(
+                        selected = validate_adhoc_subquery(
                             selected,
                             self.database_id,
                             self.schema,
                         )
-                        outer = sa.column(f"{selected}")
+                        outer = literal_column(f"({selected})")
                         outer = self.make_sqla_column_compatible(outer, selected)
                 else:
                     outer = self.adhoc_column_to_sqla(
@@ -1543,19 +1627,28 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                 select_exprs.append(outer)
         elif columns:
             for selected in columns:
-                selected = self.validate_adhoc_subquery(
-                    selected,
+                if is_adhoc_column(selected):
+                    _sql = selected["sqlExpression"]
+                    _column_label = selected["label"]
+                elif isinstance(selected, str):
+                    _sql = selected
+                    _column_label = selected
+
+                selected = validate_adhoc_subquery(
+                    _sql,
                     self.database_id,
                     self.schema,
                 )
-                if isinstance(columns_by_name[selected], dict):
-                    select_exprs.append(sa.column(f"{selected}"))
-                else:
-                    select_exprs.append(
-                        columns_by_name[selected].get_sqla_col()
-                        if selected in columns_by_name
-                        else self.make_sqla_column_compatible(literal_column(selected))
+
+                select_exprs.append(
+                    self.convert_tbl_column_to_sqla_col(
+                        columns_by_name[selected], template_processor=template_processor
                     )
+                    if isinstance(selected, str) and selected in columns_by_name
+                    else self.make_sqla_column_compatible(
+                        literal_column(selected), _column_label
+                    )
+                )
             metrics_exprs = []
 
         if granularity:
@@ -1566,57 +1659,43 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                         col=granularity,
                     )
                 )
-            time_filters: List[Any] = []
+            time_filters = []
 
             if is_timeseries:
-                if isinstance(dttm_col, dict):
-                    timestamp = self.get_timestamp_expression(
-                        dttm_col, time_grain, template_processor=template_processor
-                    )
-                else:
-                    timestamp = dttm_col.get_timestamp_expression(
-                        time_grain=time_grain, template_processor=template_processor
-                    )
+                timestamp = dttm_col.get_timestamp_expression(
+                    time_grain=time_grain, template_processor=template_processor
+                )
                 # always put timestamp as the first column
                 select_exprs.insert(0, timestamp)
                 groupby_all_columns[timestamp.name] = timestamp
 
             # Use main dttm column to support index with secondary dttm columns.
-            if db_engine_spec.time_secondary_columns:
-                if isinstance(dttm_col, dict):
-                    dttm_col_name = dttm_col.get("column_name")
-                else:
-                    dttm_col_name = dttm_col.column_name
-
-                if (
-                    self.main_dttm_col in self.dttm_cols
-                    and self.main_dttm_col != dttm_col_name
-                ):
-                    if isinstance(self.main_dttm_col, dict):
-                        time_filters.append(
-                            self.get_time_filter(
-                                self.main_dttm_col,
-                                from_dttm,
-                                to_dttm,
-                            )
-                        )
-                    else:
-                        time_filters.append(
-                            columns_by_name[self.main_dttm_col].get_time_filter(
-                                from_dttm,
-                                to_dttm,
-                            )
-                        )
+            if (
+                db_engine_spec.time_secondary_columns
+                and self.main_dttm_col in self.dttm_cols
+                and self.main_dttm_col != dttm_col.column_name
+            ):
+                time_filters.append(
+                    self.get_time_filter(
+                        time_col=columns_by_name[self.main_dttm_col],
+                        start_dttm=from_dttm,
+                        end_dttm=to_dttm,
+                        template_processor=template_processor,
+                    )
+                )
 
-            if isinstance(dttm_col, dict):
-                time_filters.append(self.get_time_filter(dttm_col, from_dttm, to_dttm))
-            else:
-                time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm))
+            time_filter_column = self.get_time_filter(
+                time_col=dttm_col,
+                start_dttm=from_dttm,
+                end_dttm=to_dttm,
+                template_processor=template_processor,
+            )
+            time_filters.append(time_filter_column)
 
         # Always remove duplicates by column name, as sometimes `metrics_exprs`
         # can have the same name as a groupby column (e.g. when users use
         # raw columns as custom SQL adhoc metric).
-        select_exprs = utils.remove_duplicates(
+        select_exprs = remove_duplicates(
             select_exprs + metrics_exprs, key=lambda x: x.name
         )
 
@@ -1626,7 +1705,7 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
         # Order by columns are "hidden" columns, some databases require them
         # always be present in SELECT if an aggregation function is used
         if not db_engine_spec.allows_hidden_orderby_agg:
-            select_exprs = utils.remove_duplicates(select_exprs + orderby_exprs)
+            select_exprs = remove_duplicates(select_exprs + orderby_exprs)
 
         qry = sa.select(select_exprs)
 
@@ -1648,14 +1727,19 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
             sqla_col: Optional[Column] = None
             if flt_col == utils.DTTM_ALIAS and is_timeseries and dttm_col:
                 col_obj = dttm_col
-            elif utils.is_adhoc_column(flt_col):
-                sqla_col = self.adhoc_column_to_sqla(flt_col)  # type: ignore
+            elif is_adhoc_column(flt_col):
+                try:
+                    sqla_col = self.adhoc_column_to_sqla(flt_col, force_type_check=True)
+                    applied_adhoc_filters_columns.append(flt_col)
+                except ColumnNotFoundException:
+                    rejected_adhoc_filters_columns.append(flt_col)
+                    continue
             else:
                 col_obj = columns_by_name.get(cast(str, flt_col))
             filter_grain = flt.get("grain")
 
             if is_feature_enabled("ENABLE_TEMPLATE_REMOVE_FILTERS"):
-                if utils.get_column_name(flt_col) in removed_filters:
+                if get_column_name(flt_col) in removed_filters:
                     # Skip generating SQLA filter when the jinja template handles it.
                     continue
 
@@ -1663,44 +1747,29 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                 if sqla_col is not None:
                     pass
                 elif col_obj and filter_grain:
-                    if isinstance(col_obj, dict):
-                        sqla_col = self.get_timestamp_expression(
-                            col_obj, time_grain, template_processor=template_processor
-                        )
-                    else:
-                        sqla_col = col_obj.get_timestamp_expression(
-                            time_grain=filter_grain,
-                            template_processor=template_processor,
-                        )
-                elif col_obj and isinstance(col_obj, dict):
-                    sqla_col = sa.column(col_obj.get("column_name"))
+                    sqla_col = col_obj.get_timestamp_expression(
+                        time_grain=filter_grain, template_processor=template_processor
+                    )
                 elif col_obj:
-                    sqla_col = col_obj.get_sqla_col()
-
-                if col_obj and isinstance(col_obj, dict):
-                    col_type = col_obj.get("type")
-                else:
-                    col_type = col_obj.type if col_obj else None
+                    sqla_col = self.convert_tbl_column_to_sqla_col(
+                        tbl_column=col_obj, template_processor=template_processor
+                    )
+                col_type = col_obj.type if col_obj else None
                 col_spec = db_engine_spec.get_column_spec(
                     native_type=col_type,
-                    db_extra=self.database.get_extra(),  # type: ignore
+                    #                    db_extra=self.database.get_extra(),
                 )
                 is_list_target = op in (
                     utils.FilterOperator.IN.value,
                     utils.FilterOperator.NOT_IN.value,
                 )
 
-                if col_obj and isinstance(col_obj, dict):
-                    col_advanced_data_type = ""
-                else:
-                    col_advanced_data_type = (
-                        col_obj.advanced_data_type if col_obj else ""
-                    )
+                col_advanced_data_type = col_obj.advanced_data_type if col_obj else ""
 
                 if col_spec and not col_advanced_data_type:
                     target_generic_type = col_spec.generic_type
                 else:
-                    target_generic_type = utils.GenericDataType.STRING
+                    target_generic_type = GenericDataType.STRING
                 eq = self.filter_values_handler(
                     values=val,
                     operator=op,
@@ -1708,7 +1777,7 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                     target_native_type=col_type,
                     is_list_target=is_list_target,
                     db_engine_spec=db_engine_spec,
-                    db_extra=self.database.get_extra(),  # type: ignore
+                    #                     db_extra=self.database.get_extra(),
                 )
                 if (
                     col_advanced_data_type != ""
@@ -1764,7 +1833,14 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                 elif op == utils.FilterOperator.IS_FALSE.value:
                     where_clause_and.append(sqla_col.is_(False))
                 else:
-                    if eq is None:
+                    if (
+                        op
+                        not in {
+                            utils.FilterOperator.EQUALS.value,
+                            utils.FilterOperator.NOT_EQUALS.value,
+                        }
+                        and eq is None
+                    ):
                         raise QueryObjectValidationError(
                             _(
                                 "Must specify a value for filters "
@@ -1802,19 +1878,20 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                                 time_col=col_obj,
                                 start_dttm=_since,
                                 end_dttm=_until,
+                                label=sqla_col.key,
+                                template_processor=template_processor,
                             )
                         )
                     else:
                         raise QueryObjectValidationError(
                             _("Invalid filter operation type: %(op)s", op=op)
                         )
-        # todo(hugh): fix this w/ template_processor
-        # where_clause_and += self.get_sqla_row_level_filters(template_processor)
+        where_clause_and += self.get_sqla_row_level_filters(template_processor)
         if extras:
             where = extras.get("where")
             if where:
                 try:
-                    where = template_processor.process_template(f"{where}")
+                    where = template_processor.process_template(f"({where})")
                 except TemplateError as ex:
                     raise QueryObjectValidationError(
                         _(
@@ -1822,11 +1899,17 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                             msg=ex.message,
                         )
                     ) from ex
+                where = self._process_sql_expression(
+                    expression=where,
+                    database_id=self.database_id,
+                    schema=self.schema,
+                    template_processor=template_processor,
+                )
                 where_clause_and += [self.text(where)]
             having = extras.get("having")
             if having:
                 try:
-                    having = template_processor.process_template(f"{having}")
+                    having = template_processor.process_template(f"({having})")
                 except TemplateError as ex:
                     raise QueryObjectValidationError(
                         _(
@@ -1834,9 +1917,18 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                             msg=ex.message,
                         )
                     ) from ex
+                having = self._process_sql_expression(
+                    expression=having,
+                    database_id=self.database_id,
+                    schema=self.schema,
+                    template_processor=template_processor,
+                )
                 having_clause_and += [self.text(having)]
+
         if apply_fetch_values_predicate and self.fetch_values_predicate:  # type: ignore
-            qry = qry.where(self.get_fetch_values_predicate())  # type: ignore
+            qry = qry.where(
+                self.get_fetch_values_predicate(template_processor=template_processor)
+            )
         if granularity:
             qry = qry.where(and_(*(time_filters + where_clause_and)))
         else:
@@ -1876,7 +1968,7 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                 inner_groupby_exprs = []
                 inner_select_exprs = []
                 for gby_name, gby_obj in groupby_series_columns.items():
-                    label = utils.get_column_name(gby_name)
+                    label = get_column_name(gby_name)
                     inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
                     inner_groupby_exprs.append(inner)
                     inner_select_exprs.append(inner)
@@ -1886,26 +1978,25 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                 inner_time_filter = []
 
                 if dttm_col and not db_engine_spec.time_groupby_inline:
-                    if isinstance(dttm_col, dict):
-                        inner_time_filter = [
-                            self.get_time_filter(
-                                dttm_col,
-                                inner_from_dttm or from_dttm,
-                                inner_to_dttm or to_dttm,
-                            )
-                        ]
-                    else:
-                        inner_time_filter = [
-                            dttm_col.get_time_filter(
-                                inner_from_dttm or from_dttm,
-                                inner_to_dttm or to_dttm,
-                            )
-                        ]
-
+                    inner_time_filter = [
+                        self.get_time_filter(
+                            time_col=dttm_col,
+                            start_dttm=inner_from_dttm or from_dttm,
+                            end_dttm=inner_to_dttm or to_dttm,
+                            template_processor=template_processor,
+                        )
+                    ]
                 subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
                 subq = subq.group_by(*inner_groupby_exprs)
 
                 ob = inner_main_metric_expr
+                if series_limit_metric:
+                    ob = self._get_series_orderby(
+                        series_limit_metric=series_limit_metric,
+                        metrics_by_name=metrics_by_name,
+                        columns_by_name=columns_by_name,
+                        template_processor=template_processor,
+                    )
                 direction = sa.desc if order_desc else sa.asc
                 subq = subq.order_by(direction(ob))
                 subq = subq.limit(series_limit)
@@ -1919,6 +2010,19 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                     on_clause.append(gby_obj == sa.column(col_name))
 
                 tbl = tbl.join(subq.alias(), and_(*on_clause))
+            else:
+                if series_limit_metric:
+                    orderby = [
+                        (
+                            self._get_series_orderby(
+                                series_limit_metric=series_limit_metric,
+                                metrics_by_name=metrics_by_name,
+                                columns_by_name=columns_by_name,
+                                template_processor=template_processor,
+                            ),
+                            not order_desc,
+                        )
+                    ]
 
                 # run prequery to get top groups
                 prequery_obj = {
@@ -1935,7 +2039,8 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
                     "columns": columns,
                     "order_desc": True,
                 }
-                result = self.exc_query(prequery_obj)
+
+                result = self.query(prequery_obj)
                 prequeries.append(result.query)
                 dimensions = [
                     c
@@ -1959,9 +2064,29 @@ class ExploreMixin:  # pylint: disable=too-many-public-methods
             qry = sa.select([col]).select_from(qry.alias("rowcount_qry"))
             labels_expected = [label]
 
+        filter_columns = [flt.get("col") for flt in filter] if filter else []
+        rejected_filter_columns = [
+            col
+            for col in filter_columns
+            if col
+            and not is_adhoc_column(col)
+            and col not in self.column_names
+            and col not in applied_template_filters
+        ] + rejected_adhoc_filters_columns
+
+        applied_filter_columns = [
+            col
+            for col in filter_columns
+            if col
+            and not is_adhoc_column(col)
+            and (col in self.column_names or col in applied_template_filters)
+        ] + applied_adhoc_filters_columns
+
         return SqlaQuery(
             applied_template_filters=applied_template_filters,
             cte=cte,
+            applied_filter_columns=applied_filter_columns,
+            rejected_filter_columns=rejected_filter_columns,
             extra_cache_keys=extra_cache_keys,
             labels_expected=labels_expected,
             sqla_query=qry,
diff --git a/superset/models/sql_lab.py b/superset/models/sql_lab.py
index 11ea7ef528..e7d9f672e0 100644
--- a/superset/models/sql_lab.py
+++ b/superset/models/sql_lab.py
@@ -19,7 +19,7 @@ import inspect
 import logging
 import re
 from datetime import datetime
-from typing import Any, Dict, List, Optional, Type, TYPE_CHECKING
+from typing import Any, Dict, Hashable, List, Optional, Type, TYPE_CHECKING
 
 import simplejson as json
 import sqlalchemy as sqla
@@ -52,9 +52,10 @@ from superset.models.helpers import (
 )
 from superset.sql_parse import CtasMethod, ParsedQuery, Table
 from superset.sqllab.limiting_factor import LimitingFactor
-from superset.utils.core import GenericDataType, QueryStatus, user_label
+from superset.utils.core import QueryStatus, user_label
 
 if TYPE_CHECKING:
+    from superset.connectors.sqla.models import TableColumn
     from superset.db_engine_specs import BaseEngineSpec
 
 
@@ -183,47 +184,33 @@ class Query(
         return list(ParsedQuery(self.sql).tables)
 
     @property
-    def columns(self) -> List[Dict[str, Any]]:
-        bool_types = ("BOOL",)
-        num_types = (
-            "DOUBLE",
-            "FLOAT",
-            "INT",
-            "BIGINT",
-            "NUMBER",
-            "LONG",
-            "REAL",
-            "NUMERIC",
-            "DECIMAL",
-            "MONEY",
+    def columns(self) -> List["TableColumn"]:
+        from superset.connectors.sqla.models import (  # pylint: disable=import-outside-toplevel
+            TableColumn,
         )
-        date_types = ("DATE", "TIME")
-        str_types = ("VARCHAR", "STRING", "CHAR")
+
         columns = []
-        col_type = ""
         for col in self.extra.get("columns", []):
-            computed_column = {**col}
-            col_type = col.get("type")
-
-            if col_type and any(map(lambda t: t in col_type.upper(), str_types)):
-                computed_column["type_generic"] = GenericDataType.STRING
-            if col_type and any(map(lambda t: t in col_type.upper(), bool_types)):
-                computed_column["type_generic"] = GenericDataType.BOOLEAN
-            if col_type and any(map(lambda t: t in col_type.upper(), num_types)):
-                computed_column["type_generic"] = GenericDataType.NUMERIC
-            if col_type and any(map(lambda t: t in col_type.upper(), date_types)):
-                computed_column["type_generic"] = GenericDataType.TEMPORAL
-
-            computed_column["column_name"] = col.get("name")
-            computed_column["groupby"] = True
-            columns.append(computed_column)
+            columns.append(
+                TableColumn(
+                    column_name=col["name"],
+                    type=col["type"],
+                    is_dttm=col["is_dttm"],
+                    groupby=True,
+                    filterable=True,
+                )
+            )
         return columns
 
+    @property
+    def db_extra(self) -> Optional[Dict[str, Any]]:
+        return None
+
     @property
     def data(self) -> Dict[str, Any]:
         order_by_choices = []
         for col in self.columns:
-            column_name = str(col.get("column_name") or "")
+            column_name = str(col.column_name or "")
             order_by_choices.append(
                 (json.dumps([column_name, True]), f"{column_name} " + __("[asc]"))
             )
@@ -237,7 +224,7 @@ class Query(
             ],
             "filter_select": True,
             "name": self.tab_name,
-            "columns": self.columns,
+            "columns": [o.data for o in self.columns],
             "metrics": [],
             "id": self.id,
             "type": self.type,
@@ -279,7 +266,7 @@ class Query(
 
     @property
     def column_names(self) -> List[Any]:
-        return [col.get("column_name") for col in self.columns]
+        return [col.column_name for col in self.columns]
 
     @property
     def offset(self) -> int:
@@ -294,7 +281,7 @@ class Query(
 
     @property
     def dttm_cols(self) -> List[Any]:
-        return [col.get("column_name") for col in self.columns if col.get("is_dttm")]
+        return [col.column_name for col in self.columns if col.is_dttm]
 
     @property
     def schema_perm(self) -> str:
@@ -309,7 +296,7 @@ class Query(
         return ""
 
     @staticmethod
-    def get_extra_cache_keys(query_obj: Dict[str, Any]) -> List[str]:
+    def get_extra_cache_keys(query_obj: Dict[str, Any]) -> List[Hashable]:
         return []
 
     @property
@@ -337,7 +324,7 @@ class Query(
         if not column_name:
             return None
         for col in self.columns:
-            if col.get("column_name") == column_name:
+            if col.column_name == column_name:
                 return col
         return None
 
diff --git a/superset/utils/core.py b/superset/utils/core.py
index d229942834..ee84d52eb9 100644
--- a/superset/utils/core.py
+++ b/superset/utils/core.py
@@ -1785,15 +1785,9 @@ def get_time_filter_status(
     datasource: "BaseDatasource",
     applied_time_extras: Dict[str, str],
 ) -> Tuple[List[Dict[str, str]], List[Dict[str, str]]]:
-    temporal_columns: Set[Any]
-    if datasource.type == "query":
-        temporal_columns = {
-            col.get("column_name") for col in datasource.columns if col.get("is_dttm")
-        }
-    else:
-        temporal_columns = {
-            col.column_name for col in datasource.columns if col.is_dttm
-        }
+    temporal_columns: Set[Any] = {
+        col.column_name for col in datasource.columns if col.is_dttm
+    }
     applied: List[Dict[str, str]] = []
     rejected: List[Dict[str, str]] = []
     time_column = applied_time_extras.get(ExtraFiltersTimeColumnType.TIME_COL)
diff --git a/superset/views/core.py b/superset/views/core.py
index 44f1b78af0..7db64279a3 100755
--- a/superset/views/core.py
+++ b/superset/views/core.py
@@ -2021,7 +2021,7 @@ class Superset(BaseSupersetView):  # pylint: disable=too-many-public-methods
         db.session.add(table)
         cols = []
         for config_ in data.get("columns"):
-            column_name = config_.get("name")
+            column_name = config_.get("column_name") or config_.get("name")
             col = TableColumn(
                 column_name=column_name,
                 filterable=True,
diff --git a/tests/integration_tests/charts/data/api_tests.py b/tests/integration_tests/charts/data/api_tests.py
index ed8de062d7..231f06598e 100644
--- a/tests/integration_tests/charts/data/api_tests.py
+++ b/tests/integration_tests/charts/data/api_tests.py
@@ -1186,8 +1186,8 @@ def test_chart_cache_timeout_chart_not_found(
     [
         (200, {"where": "1 = 1"}),
         (200, {"having": "count(*) > 0"}),
-        (400, {"where": "col1 in (select distinct col1 from physical_dataset)"}),
-        (400, {"having": "count(*) > (select count(*) from physical_dataset)"}),
+        (403, {"where": "col1 in (select distinct col1 from physical_dataset)"}),
+        (403, {"having": "count(*) > (select count(*) from physical_dataset)"}),
     ],
 )
 @with_feature_flags(ALLOW_ADHOC_SUBQUERY=False)
diff --git a/tests/integration_tests/sqllab_tests.py b/tests/integration_tests/sqllab_tests.py
index d9f26239d1..27ccdde96b 100644
--- a/tests/integration_tests/sqllab_tests.py
+++ b/tests/integration_tests/sqllab_tests.py
@@ -493,8 +493,16 @@ class TestSqlLab(SupersetTestCase):
             "datasourceName": f"test_viz_flow_table_{random()}",
             "schema": "superset",
             "columns": [
-                {"is_dttm": False, "type": "STRING", "name": f"viz_type_{random()}"},
-                {"is_dttm": False, "type": "OBJECT", "name": f"ccount_{random()}"},
+                {
+                    "is_dttm": False,
+                    "type": "STRING",
+                    "column_name": f"viz_type_{random()}",
+                },
+                {
+                    "is_dttm": False,
+                    "type": "OBJECT",
+                    "column_name": f"ccount_{random()}",
+                },
             ],
             "sql": """\
                 SELECT *
@@ -523,8 +531,16 @@ class TestSqlLab(SupersetTestCase):
             "chartType": "dist_bar",
             "schema": "superset",
             "columns": [
-                {"is_dttm": False, "type": "STRING", "name": f"viz_type_{random()}"},
-                {"is_dttm": False, "type": "OBJECT", "name": f"ccount_{random()}"},
+                {
+                    "is_dttm": False,
+                    "type": "STRING",
+                    "column_name": f"viz_type_{random()}",
+                },
+                {
+                    "is_dttm": False,
+                    "type": "OBJECT",
+                    "column_name": f"ccount_{random()}",
+                },
             ],
             "sql": """\
                 SELECT *


[superset] 02/07: fix: Dashboard not loading with default first value in filter (#23512)

Posted by hu...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

hugh pushed a commit to branch 2023.13.1
in repository https://gitbox.apache.org/repos/asf/superset.git

commit 73ad7c3cc0ec99b81c5ca68c1d796f3ef821c8ef
Author: Geido <60...@users.noreply.github.com>
AuthorDate: Wed Mar 29 18:18:08 2023 +0200

    fix: Dashboard not loading with default first value in filter (#23512)
    
    (cherry picked from commit 4220d32f3d5994f8459dc421d6d2396e7446fdfa)
---
 .../src/filters/components/Select/SelectFilterPlugin.tsx           | 7 +------
 1 file changed, 1 insertion(+), 6 deletions(-)

diff --git a/superset-frontend/src/filters/components/Select/SelectFilterPlugin.tsx b/superset-frontend/src/filters/components/Select/SelectFilterPlugin.tsx
index d3cb2b4dea..a04606212c 100644
--- a/superset-frontend/src/filters/components/Select/SelectFilterPlugin.tsx
+++ b/superset-frontend/src/filters/components/Select/SelectFilterPlugin.tsx
@@ -129,7 +129,6 @@ export default function PluginFilterSelect(props: PluginFilterSelectProps) {
         enableEmptyFilter && !inverseSelection && !values?.length;
 
       const suffix = inverseSelection && values?.length ? t(' (excluded)') : '';
-
       dispatchDataMask({
         type: 'filterState',
         __cache: filterState,
@@ -290,13 +289,9 @@ export default function PluginFilterSelect(props: PluginFilterSelectProps) {
     updateDataMask,
     data,
     groupby,
-    JSON.stringify(filterState),
+    JSON.stringify(filterState.value),
   ]);
 
-  useEffect(() => {
-    updateDataMask(filterState.value);
-  }, [JSON.stringify(filterState.value)]);
-
   useEffect(() => {
     setDataMask(dataMask);
   }, [JSON.stringify(dataMask)]);