You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@superset.apache.org by GitBox <gi...@apache.org> on 2019/01/03 22:07:08 UTC

[GitHub] williaster closed pull request #6524: [SIP 6] Querycontext improvement

williaster closed pull request #6524: [SIP 6] Querycontext improvement
URL: https://github.com/apache/incubator-superset/pull/6524
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/superset/assets/cypress/fixtures/example.json b/superset/assets/cypress/fixtures/example.json
new file mode 100644
index 0000000000..da18d9352a
--- /dev/null
+++ b/superset/assets/cypress/fixtures/example.json
@@ -0,0 +1,5 @@
+{
+  "name": "Using fixtures to represent data",
+  "email": "hello@cypress.io",
+  "body": "Fixtures are a great way to mock data for responses to routes"
+}
\ No newline at end of file
diff --git a/superset/assets/spec/javascripts/explore/utils_spec.jsx b/superset/assets/spec/javascripts/explore/utils_spec.jsx
index 323655dc06..863f5f7e8d 100644
--- a/superset/assets/spec/javascripts/explore/utils_spec.jsx
+++ b/superset/assets/spec/javascripts/explore/utils_spec.jsx
@@ -14,6 +14,10 @@ describe('exploreUtils', () => {
     expect(uri1.toString()).toBe(uri2.toString());
   }
 
+  const expectedPayload = {
+    form_data: formData,
+  };
+
   describe('getExploreUrlAndPayload', () => {
     it('generates proper base url', () => {
       // This assertion is to show clearly the value of location.href
@@ -30,7 +34,7 @@ describe('exploreUtils', () => {
         URI(url),
         URI('/superset/explore/'),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('generates proper json url', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -43,7 +47,7 @@ describe('exploreUtils', () => {
         URI(url),
         URI('/superset/explore_json/'),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('generates proper json forced url', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -57,7 +61,7 @@ describe('exploreUtils', () => {
         URI('/superset/explore_json/')
           .search({ force: 'true' }),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('generates proper csv URL', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -71,7 +75,7 @@ describe('exploreUtils', () => {
         URI('/superset/explore_json/')
           .search({ csv: 'true' }),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('generates proper standalone URL', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -85,7 +89,7 @@ describe('exploreUtils', () => {
         URI('/superset/explore/')
           .search({ standalone: 'true' }),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('preserves main URLs params', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -99,7 +103,7 @@ describe('exploreUtils', () => {
         URI('/superset/explore_json/')
           .search({ foo: 'bar' }),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('generate proper save slice url', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -113,7 +117,7 @@ describe('exploreUtils', () => {
         URI('/superset/explore_json/')
           .search({ foo: 'bar' }),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
     it('generate proper saveas slice url', () => {
       const { url, payload } = getExploreUrlAndPayload({
@@ -127,7 +131,7 @@ describe('exploreUtils', () => {
         URI('/superset/explore_json/')
           .search({ foo: 'bar' }),
       );
-      expect(payload).toEqual(formData);
+      expect(payload).toEqual(expectedPayload);
     });
   });
 
@@ -199,7 +203,7 @@ describe('exploreUtils', () => {
   describe('getExploreLongUrl', () => {
     it('generates proper base url with form_data', () => {
       compareURI(
-        URI(getExploreLongUrl(formData, 'base')),
+        URI(getExploreLongUrl({ formData, endpointType: 'base' })),
         URI('/superset/explore/').search({ form_data: sFormData }),
       );
     });
diff --git a/superset/assets/spec/javascripts/superset-ui/WordCloudBuildQuery.test.ts b/superset/assets/spec/javascripts/superset-ui/WordCloudBuildQuery.test.ts
index a6edfbc31d..be789920e6 100644
--- a/superset/assets/spec/javascripts/superset-ui/WordCloudBuildQuery.test.ts
+++ b/superset/assets/spec/javascripts/superset-ui/WordCloudBuildQuery.test.ts
@@ -3,7 +3,8 @@ import buildQuery from 'src/visualizations/wordcloud/buildQuery';
 describe('WordCloud buildQuery', () => {
   const formData = {
     datasource: '5__table',
-    granularity_sqla: 'ds',
+    granularity: 'ds',
+    metric: 'simpleMetric',
     series: 'foo',
   };
 
diff --git a/superset/assets/src/chart/chartAction.js b/superset/assets/src/chart/chartAction.js
index 6c6ddab2fb..0b1eaa77c4 100644
--- a/superset/assets/src/chart/chartAction.js
+++ b/superset/assets/src/chart/chartAction.js
@@ -156,7 +156,7 @@ export function runQuery(formData, force = false, timeout = 60, key) {
 
     let querySettings = {
       url,
-      postPayload: { form_data: payload },
+      postPayload: { ...payload },
       signal,
       timeout: timeout * 1000,
     };
@@ -212,7 +212,7 @@ export function runQuery(formData, force = false, timeout = 60, key) {
     return Promise.all([
       queryPromise,
       dispatch(triggerQuery(false, key)),
-      dispatch(updateQueryFormData(payload, key)),
+      dispatch(updateQueryFormData(payload.form_data, key)),
       ...annotationLayers.map(x => dispatch(runAnnotationQuery(x, timeout, formData, key))),
     ]);
   };
diff --git a/superset/assets/src/explore/actions/saveModalActions.js b/superset/assets/src/explore/actions/saveModalActions.js
index 1993a382a9..ac9c1be076 100644
--- a/superset/assets/src/explore/actions/saveModalActions.js
+++ b/superset/assets/src/explore/actions/saveModalActions.js
@@ -52,7 +52,7 @@ export function saveSlice(formData, requestParams) {
       requestParams,
     });
 
-    return SupersetClient.post({ url, postPayload: { form_data: payload } })
+    return SupersetClient.post({ url, postPayload: { ...payload } })
       .then(({ json }) => dispatch(saveSliceSuccess(json)))
       .catch(() => dispatch(saveSliceFailed()));
   };
diff --git a/superset/assets/src/explore/components/DisplayQueryButton.jsx b/superset/assets/src/explore/components/DisplayQueryButton.jsx
index 042ca24d83..9601b2ca2b 100644
--- a/superset/assets/src/explore/components/DisplayQueryButton.jsx
+++ b/superset/assets/src/explore/components/DisplayQueryButton.jsx
@@ -60,7 +60,7 @@ export default class DisplayQueryButton extends React.PureComponent {
     });
     SupersetClient.post({
       url,
-      postPayload: { form_data: payload },
+      postPayload: { ...payload },
     })
       .then(({ json }) => {
         this.setState({
diff --git a/superset/assets/src/explore/components/EmbedCodeButton.jsx b/superset/assets/src/explore/components/EmbedCodeButton.jsx
index ff28fbbfeb..23529dfd16 100644
--- a/superset/assets/src/explore/components/EmbedCodeButton.jsx
+++ b/superset/assets/src/explore/components/EmbedCodeButton.jsx
@@ -31,7 +31,10 @@ export default class EmbedCodeButton extends React.Component {
   generateEmbedHTML() {
     const srcLink = (
       window.location.origin +
-      getExploreLongUrl(this.props.latestQueryFormData, 'standalone') +
+      getExploreLongUrl({
+        formData: this.props.latestQueryFormData,
+        endpointType: 'standalone',
+      }) +
       `&height=${this.state.height}`
     );
     return (
diff --git a/superset/assets/src/explore/components/ExploreActionButtons.jsx b/superset/assets/src/explore/components/ExploreActionButtons.jsx
index 6bb059c66b..7e413cdb9b 100644
--- a/superset/assets/src/explore/components/ExploreActionButtons.jsx
+++ b/superset/assets/src/explore/components/ExploreActionButtons.jsx
@@ -28,7 +28,7 @@ export default function ExploreActionButtons({
     <div className="btn-group results" role="group">
       {latestQueryFormData &&
         <URLShortLinkButton
-          url={getExploreLongUrl(latestQueryFormData)}
+          url={getExploreLongUrl({ formData: latestQueryFormData })}
           emailSubject="Superset Chart"
           emailContent="Check out this chart: "
         />
diff --git a/superset/assets/src/explore/components/ExploreViewContainer.jsx b/superset/assets/src/explore/components/ExploreViewContainer.jsx
index 162cb16333..b6f22f9670 100644
--- a/superset/assets/src/explore/components/ExploreViewContainer.jsx
+++ b/superset/assets/src/explore/components/ExploreViewContainer.jsx
@@ -171,7 +171,7 @@ class ExploreViewContainer extends React.Component {
 
   addHistory({ isReplace = false, title }) {
     const { payload } = getExploreUrlAndPayload({ formData: this.props.form_data });
-    const longUrl = getExploreLongUrl(this.props.form_data);
+    const longUrl = getExploreLongUrl({ formData: this.props.form_data, forceExplore: true });
     try {
       if (isReplace) {
         history.replaceState(payload, title, longUrl);
diff --git a/superset/assets/src/explore/exploreUtils.js b/superset/assets/src/explore/exploreUtils.js
index 4eac3abce4..658eddf3be 100644
--- a/superset/assets/src/explore/exploreUtils.js
+++ b/superset/assets/src/explore/exploreUtils.js
@@ -1,5 +1,6 @@
 /* eslint camelcase: 0 */
 import URI from 'urijs';
+import { getChartBuildQueryRegistry } from '@superset-ui/chart';
 import { availableDomains } from '../utils/hostNamesConfig';
 
 export function getChartKey(explore) {
@@ -31,22 +32,34 @@ export function getAnnotationJsonUrl(slice_id, form_data, isNative) {
     }).toString();
 }
 
-export function getURIDirectory(formData, endpointType = 'base') {
+export function getURIDirectory({
+  formData,
+  endpointType = 'base',
+  forceExplore,
+}) {
   // Building the directory part of the URI
   let directory = '/superset/explore/';
   if (['json', 'csv', 'query', 'results', 'samples'].indexOf(endpointType) >= 0) {
     directory = '/superset/explore_json/';
   }
+  // const buildQueryRegistry = getChartBuildQueryRegistry();
+  if (formData.viz_type === 'word_cloud' && !forceExplore) {
+    directory = '/api/v1/query/';
+  }
   return directory;
 }
 
-export function getExploreLongUrl(formData, endpointType) {
+export function getExploreLongUrl({
+  formData,
+  endpointType,
+  forceExplore = false,
+}) {
   if (!formData.datasource) {
     return null;
   }
 
   const uri = new URI('/');
-  const directory = getURIDirectory(formData, endpointType);
+  const directory = getURIDirectory({ formData, endpointType, forceExplore });
   const search = uri.search(true);
   search.form_data = JSON.stringify(formData);
   if (endpointType === 'standalone') {
@@ -81,7 +94,7 @@ export function getExploreUrlAndPayload({
     uri = URI(URI(curUrl).search());
   }
 
-  const directory = getURIDirectory(formData, endpointType);
+  const directory = getURIDirectory({ formData, endpointType });
 
   // Building the querystring (search) part of the URI
   const search = uri.search(true);
@@ -115,7 +128,12 @@ export function getExploreUrlAndPayload({
     });
   }
   uri = uri.search(search).directory(directory);
-  const payload = { ...formData };
+  let payload = { form_data: { ...formData } };
+
+  const buildQuery = getChartBuildQueryRegistry().get(formData.viz_type);
+  if (buildQuery) {
+    payload = { query_context: buildQuery(formData) };
+  }
 
   return {
     url: uri.toString(),
diff --git a/superset/assets/src/visualizations/deckgl/Multi/Multi.jsx b/superset/assets/src/visualizations/deckgl/Multi/Multi.jsx
index 50dfa4bbab..0669e0b9b3 100644
--- a/superset/assets/src/visualizations/deckgl/Multi/Multi.jsx
+++ b/superset/assets/src/visualizations/deckgl/Multi/Multi.jsx
@@ -49,7 +49,7 @@ class DeckMulti extends React.PureComponent {
       };
 
       SupersetClient.get({
-          endpoint: getExploreLongUrl(subsliceCopy.form_data, 'json'),
+          endpoint: getExploreLongUrl({ formData: subsliceCopy.form_data, endpointType: 'json' }),
         })
         .then(({ json }) => {
           const layer = layerGenerators[subsliceCopy.form_data.viz_type](
diff --git a/superset/assets/src/visualizations/nvd3/LineMulti/LineMulti.jsx b/superset/assets/src/visualizations/nvd3/LineMulti/LineMulti.jsx
index 525a0f0944..3f5aff4350 100644
--- a/superset/assets/src/visualizations/nvd3/LineMulti/LineMulti.jsx
+++ b/superset/assets/src/visualizations/nvd3/LineMulti/LineMulti.jsx
@@ -81,7 +81,7 @@ class LineMulti extends React.Component {
         time_range: timeRange,
       };
       const addPrefix = prefixMetricWithSliceName;
-      return getJson(getExploreLongUrl(combinedFormData, 'json'))
+      return getJson(getExploreLongUrl({ formData: combinedFormData, endpointType: 'json' }))
         .then(data => data.map(({ key, values }) => ({
           key: addPrefix ? `${subslice.slice_name}: ${key}` : key,
           type: combinedFormData.viz_type,
diff --git a/superset/assets/src/visualizations/wordcloud/FormData.ts b/superset/assets/src/visualizations/wordcloud/FormData.ts
index 55f81313af..dafabc652c 100644
--- a/superset/assets/src/visualizations/wordcloud/FormData.ts
+++ b/superset/assets/src/visualizations/wordcloud/FormData.ts
@@ -3,6 +3,8 @@ import { FormData as GenericFormData } from 'src/query';
 // FormData specific to the wordcloud viz
 interface WordCloudFormData {
   series: string;
+  metric: string;
+  time_range?: string;
 }
 
 // FormData for wordcloud contains both common properties of all form data
diff --git a/superset/assets/src/visualizations/wordcloud/buildQuery.ts b/superset/assets/src/visualizations/wordcloud/buildQuery.ts
index 2aa0f2cd75..feee0dee94 100644
--- a/superset/assets/src/visualizations/wordcloud/buildQuery.ts
+++ b/superset/assets/src/visualizations/wordcloud/buildQuery.ts
@@ -6,5 +6,6 @@ export default function buildQuery(formData: FormData) {
   return buildQueryContext(formData, (baseQueryObject) => [{
     ...baseQueryObject,
     groupby: [formData.series],
+    time_range: formData.time_range,
   }]);
 }
diff --git a/superset/assets/src/visualizations/wordcloud/transformProps.js b/superset/assets/src/visualizations/wordcloud/transformProps.js
index ef824e9f6b..183c63c373 100644
--- a/superset/assets/src/visualizations/wordcloud/transformProps.js
+++ b/superset/assets/src/visualizations/wordcloud/transformProps.js
@@ -1,12 +1,14 @@
 function transformData(data, formData) {
   const { metric, series } = formData;
+  if (metric && series) {
+    const transformedData = data.map(datum => ({
+      text: datum[series],
+      size: datum[metric.label || metric],
+    }));
 
-  const transformedData = data.map(datum => ({
-    text: datum[series],
-    size: datum[metric.label || metric],
-  }));
-
-  return transformedData;
+    return transformedData;
+  }
+  return [];
 }
 
 export default function transformProps(chartProps) {
@@ -21,7 +23,7 @@ export default function transformProps(chartProps) {
   return {
     width,
     height,
-    data: transformData(payload.data, formData),
+    data: transformData(payload[0].data, formData),
     colorScheme,
     rotation,
     sizeRange: [sizeFrom, sizeTo],
diff --git a/superset/common/query_context.py b/superset/common/query_context.py
index 21b0dac368..f0005c3701 100644
--- a/superset/common/query_context.py
+++ b/superset/common/query_context.py
@@ -1,27 +1,244 @@
-# pylint: disable=R
+# pylint: disable=C,R,W
+from datetime import datetime, timedelta
+import logging
+import pickle as pkl
+import traceback
 from typing import Dict, List
 
+import numpy as np
+import pandas as pd
+
+from superset import app, cache
 from superset import db
 from superset.connectors.connector_registry import ConnectorRegistry
+from superset.utils import core as utils
+from superset.utils.core import DTTM_ALIAS
 from .query_object import QueryObject
 
+config = app.config
+stats_logger = config.get('STATS_LOGGER')
+
 
 class QueryContext:
     """
     The query context contains the query object and additional fields necessary
     to retrieve the data payload for a given viz.
     """
+
+    default_fillna = 0
+    cache_type = 'df'
+    enforce_numerical_metrics = True
+
     # TODO: Type datasource and query_object dictionary with TypedDict when it becomes
     # a vanilla python type https://github.com/python/mypy/issues/5288
     def __init__(
             self,
             datasource: Dict,
             queries: List[Dict],
+            force: bool = False,
+            custom_cache_timeout: int = None,
     ):
         self.datasource = ConnectorRegistry.get_datasource(datasource.get('type'),
                                                            int(datasource.get('id')),
                                                            db.session)
         self.queries = list(map(lambda query_obj: QueryObject(**query_obj), queries))
 
-    def get_data(self):
-        raise NotImplementedError()
+        self.force = force
+
+        self.custom_cache_timeout = custom_cache_timeout
+
+        self.enforce_numerical_metrics = True
+
+    def get_query_result(self, query_object):
+        """Returns a pandas dataframe based on the query object"""
+
+        # Here, we assume that all the queries will use the same datasource, which is
+        # is a valid assumption for current setting. In a long term, we may or maynot
+        # support multiple queries from different data source.
+
+        timestamp_format = None
+        if self.datasource.type == 'table':
+            dttm_col = self.datasource.get_col(query_object.granularity)
+            if dttm_col:
+                timestamp_format = dttm_col.python_date_format
+
+        # The datasource here can be different backend but the interface is common
+        result = self.datasource.query(query_object.to_dict())
+
+        df = result.df
+        # Transform the timestamp we received from database to pandas supported
+        # datetime format. If no python_date_format is specified, the pattern will
+        # be considered as the default ISO date format
+        # If the datetime format is unix, the parse will use the corresponding
+        # parsing logic
+        if df is not None and not df.empty:
+            if DTTM_ALIAS in df.columns:
+                if timestamp_format in ('epoch_s', 'epoch_ms'):
+                    # Column has already been formatted as a timestamp.
+                    df[DTTM_ALIAS] = df[DTTM_ALIAS].apply(pd.Timestamp)
+                else:
+                    df[DTTM_ALIAS] = pd.to_datetime(
+                        df[DTTM_ALIAS], utc=False, format=timestamp_format)
+                if self.datasource.offset:
+                    df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
+                df[DTTM_ALIAS] += query_object.time_shift
+
+            if self.enforce_numerical_metrics:
+                self.df_metrics_to_num(df, query_object)
+
+            df.replace([np.inf, -np.inf], np.nan)
+            df = self.handle_nulls(df)
+        return {
+            'query': result.query,
+            'status': result.status,
+            'error_message': result.error_message,
+            'df': df,
+        }
+
+    def df_metrics_to_num(self, df, query_object):
+        """Converting metrics to numeric when pandas.read_sql cannot"""
+        metrics = [metric for metric in query_object.metrics]
+        for col, dtype in df.dtypes.items():
+            if dtype.type == np.object_ and col in metrics:
+                df[col] = pd.to_numeric(df[col], errors='coerce')
+
+    def handle_nulls(self, df):
+        fillna = self.get_fillna_for_columns(df.columns)
+        return df.fillna(fillna)
+
+    def get_fillna_for_col(self, col):
+        """Returns the value to use as filler for a specific Column.type"""
+        if col and col.is_string:
+            return ' NULL'
+        return self.default_fillna
+
+    def get_fillna_for_columns(self, columns=None):
+        """Returns a dict or scalar that can be passed to DataFrame.fillna"""
+        if columns is None:
+            return self.default_fillna
+        columns_dict = {col.column_name: col for col in self.datasource.columns}
+        fillna = {
+            c: self.get_fillna_for_col(columns_dict.get(c))
+            for c in columns
+        }
+        return fillna
+
+    def get_data(self, df):
+        return df.to_dict(orient='records')
+
+    def get_single_payload(self, query_obj):
+        """Returns a payload of metadata and data"""
+        payload = self.get_df_payload(query_obj)
+        df = payload.get('df')
+        status = payload.get('status')
+        if status != utils.QueryStatus.FAILED:
+            if df is not None and df.empty:
+                payload['error'] = 'No data'
+            else:
+                payload['data'] = self.get_data(df)
+        if 'df' in payload:
+            del payload['df']
+        return payload
+
+    def get_payload(self):
+        """Get all the paylaods from the arrays"""
+        return [self.get_single_payload(query_ojbect) for query_ojbect in self.queries]
+
+    @property
+    def cache_timeout(self):
+        if self.custom_cache_timeout is not None:
+            return self.custom_cache_timeout
+        if self.datasource.cache_timeout is not None:
+            return self.datasource.cache_timeout
+        if (
+                hasattr(self.datasource, 'database') and
+                self.datasource.database.cache_timeout) is not None:
+            return self.datasource.database.cache_timeout
+        return config.get('CACHE_DEFAULT_TIMEOUT')
+
+    def get_df_payload(self, query_obj):
+        """Handles caching around the df paylod retrieval"""
+        cache_key = query_obj.cache_key(
+            datasource=self.datasource.uid) if query_obj else None
+        logging.info('Cache key: {}'.format(cache_key))
+        is_loaded = False
+        stacktrace = None
+        df = None
+        cached_dttm = datetime.utcnow().isoformat().split('.')[0]
+        cache_value = None
+        status = None
+        query = ''
+        error_message = None
+        if cache_key and cache and not self.force:
+            cache_value = cache.get(cache_key)
+            if cache_value:
+                stats_logger.incr('loaded_from_cache')
+                try:
+                    cache_value = pkl.loads(cache_value)
+                    df = cache_value['df']
+                    query = cache_value['query']
+                    status = utils.QueryStatus.SUCCESS
+                    is_loaded = True
+                except Exception as e:
+                    logging.exception(e)
+                    logging.error('Error reading cache: ' +
+                                  utils.error_msg_from_exception(e))
+                logging.info('Serving from cache')
+
+        if query_obj and not is_loaded:
+            try:
+                query_result = self.get_query_result(query_obj)
+                status = query_result['status']
+                query = query_result['query']
+                error_message = query_result['error_message']
+                df = query_result['df']
+                if status != utils.QueryStatus.FAILED:
+                    stats_logger.incr('loaded_from_source')
+                    is_loaded = True
+            except Exception as e:
+                logging.exception(e)
+                if not error_message:
+                    error_message = '{}'.format(e)
+                status = utils.QueryStatus.FAILED
+                stacktrace = traceback.format_exc()
+
+            if (
+                    is_loaded and
+                    cache_key and
+                    cache and
+                    status != utils.QueryStatus.FAILED):
+                try:
+                    cache_value = dict(
+                        dttm=cached_dttm,
+                        df=df if df is not None else None,
+                        query=query,
+                    )
+                    cache_value = pkl.dumps(
+                        cache_value, protocol=pkl.HIGHEST_PROTOCOL)
+
+                    logging.info('Caching {} chars at key {}'.format(
+                        len(cache_value), cache_key))
+
+                    stats_logger.incr('set_cache_key')
+                    cache.set(
+                        cache_key,
+                        cache_value,
+                        timeout=self.cache_timeout)
+                except Exception as e:
+                    # cache.set call can fail if the backend is down or if
+                    # the key is too large or whatever other reasons
+                    logging.warning('Could not cache key {}'.format(cache_key))
+                    logging.exception(e)
+                    cache.delete(cache_key)
+        return {
+            'cache_key': cache_key,
+            'cached_dttm': cache_value['dttm'] if cache_value is not None else None,
+            'cache_timeout': self.cache_timeout,
+            'df': df,
+            'error': error_message,
+            'is_cached': cache_key is not None,
+            'query': query,
+            'status': status,
+            'stacktrace': stacktrace,
+            'rowcount': len(df.index) if df is not None else 0,
+        }
diff --git a/superset/common/query_object.py b/superset/common/query_object.py
index 8116d269c4..edbc460f0b 100644
--- a/superset/common/query_object.py
+++ b/superset/common/query_object.py
@@ -1,13 +1,15 @@
 # pylint: disable=R
+import hashlib
 from typing import Dict, List, Optional
 
+import simplejson as json
+
 from superset import app
 from superset.utils import core as utils
 
+
 # TODO: Type Metrics dictionary with TypedDict when it becomes a vanilla python type
 # https://github.com/python/mypy/issues/5288
-Metric = Dict
-
 
 class QueryObject:
     """
@@ -17,31 +19,76 @@ class QueryObject:
     def __init__(
             self,
             granularity: str,
-            groupby: List[str] = None,
-            metrics: List[Metric] = None,
+            groupby: List[str],
+            metrics: List[Dict],
             filters: List[str] = None,
             time_range: Optional[str] = None,
             time_shift: Optional[str] = None,
             is_timeseries: bool = False,
             row_limit: int = app.config.get('ROW_LIMIT'),
             limit: int = 0,
-            timeseries_limit_metric: Optional[Metric] = None,
+            timeseries_limit_metric: Optional[Dict] = None,
             order_desc: bool = True,
             extras: Optional[Dict] = None,
     ):
         self.granularity = granularity
         self.from_dttm, self.to_dttm = utils.get_since_until(time_range, time_shift)
         self.is_timeseries = is_timeseries
-        self.groupby = groupby or []
-        self.metrics = metrics or []
-        self.filter = filters or []
+        self.time_range = time_range
+        self.time_shift = time_shift
+        self.groupby = groupby
+        self.metrics = metrics
         self.row_limit = row_limit
+        self.filter = filters if filters is not None else []
         self.timeseries_limit = int(limit)
         self.timeseries_limit_metric = timeseries_limit_metric
         self.order_desc = order_desc
         self.prequeries = []
         self.is_prequery = False
-        self.extras = extras
+        self.extras = extras if extras is not None else {}
 
     def to_dict(self):
-        raise NotImplementedError()
+        query_object_dict = {
+            'granularity': self.granularity,
+            'from_dttm': self.from_dttm,
+            'to_dttm': self.to_dttm,
+            'is_timeseries': self.is_timeseries,
+            'groupby': self.groupby,
+            'metrics': self.metrics,
+            'row_limit': self.row_limit,
+            'filter': self.filter,
+            'timeseries_limit': self.timeseries_limit,
+            'timeseries_limit_metric': self.timeseries_limit_metric,
+            'order_desc': self.order_desc,
+            'prequeries': self.prequeries,
+            'is_prequery': self.is_prequery,
+            'extras': self.extras,
+        }
+        query_object_dict.update(self.extras)
+        return query_object_dict
+
+    def cache_key(self, **extra):
+        """
+        The cache key is made out of the key/values in `query_obj`, plus any
+        other key/values in `extra`
+        We remove datetime bounds that are hard values, and replace them with
+        the use-provided inputs to bounds, which may be time-relative (as in
+        "5 days ago" or "now").
+        """
+        cache_dict = self.to_dict()
+        cache_dict.update(extra)
+
+        for k in ['from_dttm', 'to_dttm']:
+            del cache_dict[k]
+
+        cache_dict['time_range'] = self.time_range
+        json_data = self.json_dumps(cache_dict, sort_keys=True)
+        return hashlib.md5(json_data.encode('utf-8')).hexdigest()
+
+    def json_dumps(self, obj, sort_keys=False):
+        return json.dumps(
+            obj,
+            default=utils.json_int_dttm_ser,
+            ignore_nan=True,
+            sort_keys=sort_keys,
+        )
diff --git a/superset/connectors/druid/models.py b/superset/connectors/druid/models.py
index 937c8d854c..7f367b418c 100644
--- a/superset/connectors/druid/models.py
+++ b/superset/connectors/druid/models.py
@@ -886,10 +886,10 @@ def metrics_and_post_aggs(metrics, metrics_dict, druid_version=None):
         for metric in metrics:
             if utils.is_adhoc_metric(metric):
                 adhoc_agg_configs.append(metric)
-            elif metrics_dict[metric].metric_type != POST_AGG_TYPE:
-                saved_agg_names.add(metric)
+            elif metrics_dict[metric['label']].metric_type != POST_AGG_TYPE:
+                saved_agg_names.add(metric['label'])
             else:
-                postagg_names.append(metric)
+                postagg_names.append(metric['label'])
         # Create the post aggregations, maintain order since postaggs
         # may depend on previous ones
         post_aggs = OrderedDict()
@@ -1364,7 +1364,8 @@ def query(self, query_obj):
             cols += [DTTM_ALIAS]
         cols += query_obj.get('groupby') or []
         cols += query_obj.get('columns') or []
-        cols += query_obj.get('metrics') or []
+        metrics = query_obj.get('metrics') or []
+        cols += [metric['label'] for metric in metrics]
 
         cols = utils.get_metric_names(cols)
         cols = [col for col in cols if col in df.columns]
diff --git a/superset/connectors/sqla/models.py b/superset/connectors/sqla/models.py
index cf22add628..66ff1e1962 100644
--- a/superset/connectors/sqla/models.py
+++ b/superset/connectors/sqla/models.py
@@ -566,7 +566,6 @@ def get_sqla_query(  # sqla
 
         cols = {col.column_name: col for col in self.columns}
         metrics_dict = {m.metric_name: m for m in self.metrics}
-
         if not granularity and is_timeseries:
             raise Exception(_(
                 'Datetime column not provided as part table configuration '
@@ -577,10 +576,10 @@ def get_sqla_query(  # sqla
         for m in metrics:
             if utils.is_adhoc_metric(m):
                 metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
-            elif m in metrics_dict:
-                metrics_exprs.append(metrics_dict.get(m).get_sqla_col())
+            elif m['label'] in metrics_dict:
+                metrics_exprs.append(metrics_dict.get(m['label']).get_sqla_col())
             else:
-                raise Exception(_("Metric '{}' is not valid".format(m)))
+                raise Exception(_("Metric '{}' is not valid".format(m['label'])))
         if metrics_exprs:
             main_metric_expr = metrics_exprs[0]
         else:
diff --git a/superset/utils/core.py b/superset/utils/core.py
index bd23d0776c..1d6b18f6ba 100644
--- a/superset/utils/core.py
+++ b/superset/utils/core.py
@@ -856,6 +856,7 @@ def get_main_database(session):
 def is_adhoc_metric(metric):
     return (
         isinstance(metric, dict) and
+        'expressionType' in metric and
         (
             (
                 metric['expressionType'] == ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE'] and
diff --git a/superset/views/api.py b/superset/views/api.py
index 5f684f3cf9..e42bd202ca 100644
--- a/superset/views/api.py
+++ b/superset/views/api.py
@@ -1,14 +1,16 @@
 # pylint: disable=R
 import json
 
-from flask import g, request
+from flask import request
 from flask_appbuilder import expose
 from flask_appbuilder.security.decorators import has_access_api
 
-from superset import appbuilder, security_manager
+from superset import appbuilder, db, security_manager
 from superset.common.query_context import QueryContext
+from superset.legacy import update_time_range
+import superset.models.core as models
 from superset.models.core import Log
-from .base import api, BaseSupersetView, data_payload_response, handle_api_exception
+from .base import api, BaseSupersetView, handle_api_exception
 
 
 class Api(BaseSupersetView):
@@ -16,16 +18,37 @@ class Api(BaseSupersetView):
     @api
     @handle_api_exception
     @has_access_api
-    @expose('/v1/query/', methods=['POST'])
+    @expose('/v1/query', methods=['POST'])
     def query(self):
         """
         Takes a query_obj constructed in the client and returns payload data response
         for the given query_obj.
         """
         query_context = QueryContext(**json.loads(request.form.get('query_context')))
-        security_manager.assert_datasource_permission(query_context.datasource, g.user)
-        payload_json = query_context.get_data()
-        return data_payload_response(payload_json)
+        security_manager.assert_datasource_permission(query_context.datasource)
+        payload_json = query_context.get_payload()
+        return json.dumps(payload_json)
+
+    @Log.log_this
+    @api
+    @handle_api_exception
+    @has_access_api
+    @expose('/v1/form_data', methods=['GET'])
+    def query_form_data(self):
+        """
+        Takes a query_obj constructed in the client and returns payload data response
+        for the given query_obj.
+        """
+        form_data = {}
+        slice_id = request.args.get('slice_id')
+        if slice_id:
+            slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none()
+            if slc:
+                form_data = slc.form_data.copy()
+
+        update_time_range(form_data)
+
+        return json.dumps(form_data)
 
 
 appbuilder.add_view_no_menu(Api)
diff --git a/superset/viz.py b/superset/viz.py
index a3c5f06b94..578474a767 100644
--- a/superset/viz.py
+++ b/superset/viz.py
@@ -252,6 +252,23 @@ def process_query_filters(self):
         utils.split_adhoc_filters_into_base_filters(self.form_data)
 
     def query_obj(self):
+        query_obj = self._query_obj()
+        if (query_obj):
+            metrics = query_obj.get('metrics', [])
+            formatted_metrics = []
+            for metric in metrics:
+                if not isinstance(metric, dict):
+                    metric = {
+                        'label': metric,
+                        'expressionType': 'BUILTIN',
+                    }
+                formatted_metrics.append(metric)
+            query_obj.update({
+                'metrics': formatted_metrics,
+            })
+        return query_obj
+
+    def _query_obj(self):
         """Building a query object"""
         form_data = self.form_data
         self.process_query_filters()
@@ -519,8 +536,8 @@ def should_be_timeseries(self):
                 "uncheck 'Include Time'"))
         return fd.get('include_time')
 
-    def query_obj(self):
-        d = super(TableViz, self).query_obj()
+    def _query_obj(self):
+        d = super(TableViz, self)._query_obj()
         fd = self.form_data
 
         if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
@@ -614,8 +631,8 @@ class TimeTableViz(BaseViz):
     credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
     is_timeseries = True
 
-    def query_obj(self):
-        d = super(TimeTableViz, self).query_obj()
+    def _query_obj(self):
+        d = super(TimeTableViz, self)._query_obj()
         fd = self.form_data
 
         if not fd.get('metrics'):
@@ -655,8 +672,8 @@ class PivotTableViz(BaseViz):
     credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
     is_timeseries = False
 
-    def query_obj(self):
-        d = super(PivotTableViz, self).query_obj()
+    def _query_obj(self):
+        d = super(PivotTableViz, self)._query_obj()
         groupby = self.form_data.get('groupby')
         columns = self.form_data.get('columns')
         metrics = self.form_data.get('metrics')
@@ -707,7 +724,7 @@ class MarkupViz(BaseViz):
     verbose_name = _('Markup')
     is_timeseries = False
 
-    def query_obj(self):
+    def _query_obj(self):
         return None
 
     def get_df(self, query_obj=None):
@@ -741,8 +758,8 @@ class WordCloudViz(BaseViz):
     verbose_name = _('Word Cloud')
     is_timeseries = False
 
-    def query_obj(self):
-        d = super(WordCloudViz, self).query_obj()
+    def _query_obj(self):
+        d = super(WordCloudViz, self)._query_obj()
         d['groupby'] = [self.form_data.get('series')]
         return d
 
@@ -789,13 +806,10 @@ def get_data(self, df):
         data = {}
         records = df.to_dict('records')
         for metric in self.metric_labels:
-            values = {}
-            for obj in records:
-                v = obj[DTTM_ALIAS]
-                if hasattr(v, 'value'):
-                    v = v.value
-                values[str(v / 10**9)] = obj.get(metric)
-            data[metric] = values
+            data[metric] = {
+                str(obj[DTTM_ALIAS] / 10**9): obj.get(metric)
+                for obj in records
+            }
 
         start, end = utils.get_since_until(form_data.get('time_range'),
                                            form_data.get('since'),
@@ -825,8 +839,8 @@ def get_data(self, df):
             'range': range_,
         }
 
-    def query_obj(self):
-        d = super(CalHeatmapViz, self).query_obj()
+    def _query_obj(self):
+        d = super(CalHeatmapViz, self)._query_obj()
         fd = self.form_data
         d['metrics'] = fd.get('metrics')
         return d
@@ -939,9 +953,9 @@ class BubbleViz(NVD3Viz):
     verbose_name = _('Bubble Chart')
     is_timeseries = False
 
-    def query_obj(self):
+    def _query_obj(self):
         form_data = self.form_data
-        d = super(BubbleViz, self).query_obj()
+        d = super(BubbleViz, self)._query_obj()
         d['groupby'] = [
             form_data.get('entity'),
         ]
@@ -989,9 +1003,9 @@ class BulletViz(NVD3Viz):
     verbose_name = _('Bullet Chart')
     is_timeseries = False
 
-    def query_obj(self):
+    def _query_obj(self):
         form_data = self.form_data
-        d = super(BulletViz, self).query_obj()
+        d = super(BulletViz, self)._query_obj()
         self.metric = form_data.get('metric')
 
         def as_strings(field):
@@ -1039,8 +1053,8 @@ class BigNumberViz(BaseViz):
     credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
     is_timeseries = True
 
-    def query_obj(self):
-        d = super(BigNumberViz, self).query_obj()
+    def _query_obj(self):
+        d = super(BigNumberViz, self)._query_obj()
         metric = self.form_data.get('metric')
         if not metric:
             raise Exception(_('Pick a metric!'))
@@ -1058,8 +1072,8 @@ class BigNumberTotalViz(BaseViz):
     credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
     is_timeseries = False
 
-    def query_obj(self):
-        d = super(BigNumberTotalViz, self).query_obj()
+    def _query_obj(self):
+        d = super(BigNumberTotalViz, self)._query_obj()
         metric = self.form_data.get('metric')
         if not metric:
             raise Exception(_('Pick a metric!'))
@@ -1267,7 +1281,7 @@ class MultiLineViz(NVD3Viz):
 
     is_timeseries = True
 
-    def query_obj(self):
+    def _query_obj(self):
         return None
 
     def get_data(self, df):
@@ -1296,8 +1310,8 @@ class NVD3DualLineViz(NVD3Viz):
     sort_series = False
     is_timeseries = True
 
-    def query_obj(self):
-        d = super(NVD3DualLineViz, self).query_obj()
+    def _query_obj(self):
+        d = super(NVD3DualLineViz, self)._query_obj()
         m1 = self.form_data.get('metric')
         m2 = self.form_data.get('metric_2')
         d['metrics'] = [m1, m2]
@@ -1379,8 +1393,8 @@ class NVD3TimePivotViz(NVD3TimeSeriesViz):
     sort_series = True
     verbose_name = _('Time Series - Period Pivot')
 
-    def query_obj(self):
-        d = super(NVD3TimePivotViz, self).query_obj()
+    def _query_obj(self):
+        d = super(NVD3TimePivotViz, self)._query_obj()
         d['metrics'] = [self.form_data.get('metric')]
         return d
 
@@ -1456,9 +1470,9 @@ class HistogramViz(BaseViz):
     verbose_name = _('Histogram')
     is_timeseries = False
 
-    def query_obj(self):
+    def _query_obj(self):
         """Returns the query object for this visualization"""
-        d = super(HistogramViz, self).query_obj()
+        d = super(HistogramViz, self)._query_obj()
         d['row_limit'] = self.form_data.get(
             'row_limit', int(config.get('VIZ_ROW_LIMIT')))
         numeric_columns = self.form_data.get('all_columns_x')
@@ -1470,16 +1484,6 @@ def query_obj(self):
         d['groupby'] = []
         return d
 
-    def labelify(self, keys, column):
-        if isinstance(keys, str):
-            keys = (keys,)
-        # removing undesirable characters
-        labels = [re.sub(r'\W+', r'_', k) for k in keys]
-        if len(self.columns) > 1 or not self.groupby:
-            # Only show numeric column in label if there are many
-            labels = [column] + labels
-        return '__'.join(labels)
-
     def get_data(self, df):
         """Returns the chart data"""
         chart_data = []
@@ -1488,10 +1492,14 @@ def get_data(self, df):
         else:
             groups = [((), df)]
         for keys, data in groups:
+            if isinstance(keys, str):
+                keys = (keys,)
+            # removing undesirable characters
+            keys = [re.sub(r'\W+', r'_', k) for k in keys]
             chart_data.extend([{
-                'key': self.labelify(keys, column),
-                'values': data[column].tolist()}
-                for column in self.columns])
+                'key': '__'.join([c] + keys),
+                'values': data[c].tolist()}
+                for c in self.columns])
         return chart_data
 
 
@@ -1503,8 +1511,8 @@ class DistributionBarViz(DistributionPieViz):
     verbose_name = _('Distribution - Bar Chart')
     is_timeseries = False
 
-    def query_obj(self):
-        d = super(DistributionBarViz, self).query_obj()  # noqa
+    def _query_obj(self):
+        d = super(DistributionBarViz, self)._query_obj()  # noqa
         fd = self.form_data
         if (
             len(d['groupby']) <
@@ -1583,8 +1591,8 @@ def get_data(self, df):
             df['m2'] = df['m1']
         return json.loads(df.to_json(orient='values'))
 
-    def query_obj(self):
-        qry = super(SunburstViz, self).query_obj()
+    def _query_obj(self):
+        qry = super(SunburstViz, self)._query_obj()
         fd = self.form_data
         qry['metrics'] = [fd['metric']]
         secondary_metric = fd.get('secondary_metric')
@@ -1602,8 +1610,8 @@ class SankeyViz(BaseViz):
     is_timeseries = False
     credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
 
-    def query_obj(self):
-        qry = super(SankeyViz, self).query_obj()
+    def _query_obj(self):
+        qry = super(SankeyViz, self)._query_obj()
         if len(qry['groupby']) != 2:
             raise Exception(_('Pick exactly 2 columns as [Source / Target]'))
         qry['metrics'] = [
@@ -1653,8 +1661,8 @@ class DirectedForceViz(BaseViz):
     credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
     is_timeseries = False
 
-    def query_obj(self):
-        qry = super(DirectedForceViz, self).query_obj()
+    def _query_obj(self):
+        qry = super(DirectedForceViz, self)._query_obj()
         if len(self.form_data['groupby']) != 2:
             raise Exception(_("Pick exactly 2 columns to 'Group By'"))
         qry['metrics'] = [self.form_data['metric']]
@@ -1674,8 +1682,8 @@ class ChordViz(BaseViz):
     credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
     is_timeseries = False
 
-    def query_obj(self):
-        qry = super(ChordViz, self).query_obj()
+    def _query_obj(self):
+        qry = super(ChordViz, self)._query_obj()
         fd = self.form_data
         qry['groupby'] = [fd.get('groupby'), fd.get('columns')]
         qry['metrics'] = [self.get_metric_label(fd.get('metric'))]
@@ -1707,8 +1715,8 @@ class CountryMapViz(BaseViz):
     is_timeseries = False
     credits = 'From bl.ocks.org By john-guerra'
 
-    def query_obj(self):
-        qry = super(CountryMapViz, self).query_obj()
+    def _query_obj(self):
+        qry = super(CountryMapViz, self)._query_obj()
         qry['metrics'] = [
             self.form_data['metric']]
         qry['groupby'] = [self.form_data['entity']]
@@ -1735,8 +1743,8 @@ class WorldMapViz(BaseViz):
     is_timeseries = False
     credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
 
-    def query_obj(self):
-        qry = super(WorldMapViz, self).query_obj()
+    def _query_obj(self):
+        qry = super(WorldMapViz, self)._query_obj()
         qry['groupby'] = [self.form_data['entity']]
         return qry
 
@@ -1789,7 +1797,7 @@ class FilterBoxViz(BaseViz):
     credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
     cache_type = 'get_data'
 
-    def query_obj(self):
+    def _query_obj(self):
         return None
 
     def run_extra_queries(self):
@@ -1802,12 +1810,15 @@ def run_extra_queries(self):
             self.dataframes[flt] = df
 
     def filter_query_obj(self):
-        qry = super(FilterBoxViz, self).query_obj()
+        qry = super(FilterBoxViz, self)._query_obj()
         groupby = self.form_data.get('groupby')
         if len(groupby) < 1 and not self.form_data.get('date_filter'):
             raise Exception(_('Pick at least one filter field'))
-        qry['metrics'] = [
-            self.form_data['metric']]
+        metric = {
+            'label': self.form_data['metric'],
+            'expressionType': 'BUILTIN',
+        }
+        qry['metrics'] = [metric]
         return qry
 
     def get_data(self, df):
@@ -1834,7 +1845,7 @@ class IFrameViz(BaseViz):
     credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
     is_timeseries = False
 
-    def query_obj(self):
+    def _query_obj(self):
         return None
 
     def get_df(self, query_obj=None):
@@ -1859,8 +1870,8 @@ class ParallelCoordinatesViz(BaseViz):
         "Syntagmatic's library</a>")
     is_timeseries = False
 
-    def query_obj(self):
-        d = super(ParallelCoordinatesViz, self).query_obj()
+    def _query_obj(self):
+        d = super(ParallelCoordinatesViz, self)._query_obj()
         fd = self.form_data
         d['groupby'] = [fd.get('series')]
         return d
@@ -1880,8 +1891,8 @@ class HeatmapViz(BaseViz):
         'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
         'bl.ocks.org</a>')
 
-    def query_obj(self):
-        d = super(HeatmapViz, self).query_obj()
+    def _query_obj(self):
+        d = super(HeatmapViz, self)._query_obj()
         fd = self.form_data
         d['metrics'] = [fd.get('metric')]
         d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
@@ -1946,8 +1957,8 @@ class MapboxViz(BaseViz):
     credits = (
         '<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
 
-    def query_obj(self):
-        d = super(MapboxViz, self).query_obj()
+    def _query_obj(self):
+        d = super(MapboxViz, self)._query_obj()
         fd = self.form_data
         label_col = fd.get('mapbox_label')
 
@@ -2055,7 +2066,7 @@ class DeckGLMultiLayer(BaseViz):
     is_timeseries = False
     credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
 
-    def query_obj(self):
+    def _query_obj(self):
         return None
 
     def get_data(self, df):
@@ -2173,14 +2184,14 @@ def add_null_filters(self):
             })
             fd['adhoc_filters'].append(filter_)
 
-    def query_obj(self):
+    def _query_obj(self):
         fd = self.form_data
 
         # add NULL filters
         if fd.get('filter_nulls', True):
             self.add_null_filters()
 
-        d = super(BaseDeckGLViz, self).query_obj()
+        d = super(BaseDeckGLViz, self)._query_obj()
         gb = []
 
         for key in self.spatial_control_keys:
@@ -2240,13 +2251,13 @@ class DeckScatterViz(BaseDeckGLViz):
     spatial_control_keys = ['spatial']
     is_timeseries = True
 
-    def query_obj(self):
+    def _query_obj(self):
         fd = self.form_data
         self.is_timeseries = bool(
             fd.get('time_grain_sqla') or fd.get('granularity'))
         self.point_radius_fixed = (
             fd.get('point_radius_fixed') or {'type': 'fix', 'value': 500})
-        return super(DeckScatterViz, self).query_obj()
+        return super(DeckScatterViz, self)._query_obj()
 
     def get_metrics(self):
         self.metric = None
@@ -2285,10 +2296,10 @@ class DeckScreengrid(BaseDeckGLViz):
     spatial_control_keys = ['spatial']
     is_timeseries = True
 
-    def query_obj(self):
+    def _query_obj(self):
         fd = self.form_data
         self.is_timeseries = fd.get('time_grain_sqla') or fd.get('granularity')
-        return super(DeckScreengrid, self).query_obj()
+        return super(DeckScreengrid, self)._query_obj()
 
     def get_properties(self, d):
         return {
@@ -2346,10 +2357,10 @@ class DeckPathViz(BaseDeckGLViz):
         'geohash': geohash_to_json,
     }
 
-    def query_obj(self):
+    def _query_obj(self):
         fd = self.form_data
         self.is_timeseries = fd.get('time_grain_sqla') or fd.get('granularity')
-        d = super(DeckPathViz, self).query_obj()
+        d = super(DeckPathViz, self)._query_obj()
         self.metric = fd.get('metric')
         line_col = fd.get('line_column')
         if d['metrics']:
@@ -2387,11 +2398,11 @@ class DeckPolygon(DeckPathViz):
     deck_viz_key = 'polygon'
     verbose_name = _('Deck.gl - Polygon')
 
-    def query_obj(self):
+    def _query_obj(self):
         fd = self.form_data
         self.elevation = (
             fd.get('point_radius_fixed') or {'type': 'fix', 'value': 500})
-        return super(DeckPolygon, self).query_obj()
+        return super(DeckPolygon, self)._query_obj()
 
     def get_metrics(self):
         metrics = [self.form_data.get('metric')]
@@ -2434,8 +2445,8 @@ class DeckGeoJson(BaseDeckGLViz):
     viz_type = 'deck_geojson'
     verbose_name = _('Deck.gl - GeoJSON')
 
-    def query_obj(self):
-        d = super(DeckGeoJson, self).query_obj()
+    def _query_obj(self):
+        d = super(DeckGeoJson, self)._query_obj()
         d['columns'] += [self.form_data.get('geojson')]
         d['metrics'] = []
         d['groupby'] = []
@@ -2455,11 +2466,11 @@ class DeckArc(BaseDeckGLViz):
     spatial_control_keys = ['start_spatial', 'end_spatial']
     is_timeseries = True
 
-    def query_obj(self):
+    def _query_obj(self):
         fd = self.form_data
         self.is_timeseries = bool(
             fd.get('time_grain_sqla') or fd.get('granularity'))
-        return super(DeckArc, self).query_obj()
+        return super(DeckArc, self)._query_obj()
 
     def get_properties(self, d):
         dim = self.form_data.get('dimension')
@@ -2488,8 +2499,8 @@ class EventFlowViz(BaseViz):
     credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
     is_timeseries = True
 
-    def query_obj(self):
-        query = super(EventFlowViz, self).query_obj()
+    def _query_obj(self):
+        query = super(EventFlowViz, self)._query_obj()
         form_data = self.form_data
 
         event_key = form_data.get('all_columns_x')
@@ -2605,8 +2616,8 @@ class PartitionViz(NVD3TimeSeriesViz):
     viz_type = 'partition'
     verbose_name = _('Partition Diagram')
 
-    def query_obj(self):
-        query_obj = super(PartitionViz, self).query_obj()
+    def _query_obj(self):
+        query_obj = super(PartitionViz, self)._query_obj()
         time_op = self.form_data.get('time_series_option', 'not_time')
         # Return time series data if the user specifies so
         query_obj['is_timeseries'] = time_op != 'not_time'
diff --git a/tests/druid_func_tests.py b/tests/druid_func_tests.py
index 905994551f..adc2a49b76 100644
--- a/tests/druid_func_tests.py
+++ b/tests/druid_func_tests.py
@@ -255,7 +255,10 @@ def test_run_query_no_groupby(self):
         post_aggs = ['some_agg']
         ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
         groupby = []
-        metrics = ['metric1']
+        metrics = [{
+            'label': 'metric1',
+            'expressionType': 'BUILTIN',
+        }]
         ds.get_having_filters = Mock(return_value=[])
         client.query_builder = Mock()
         client.query_builder.last_query = Mock()
@@ -341,7 +344,10 @@ def test_run_query_single_groupby(self):
         post_aggs = ['some_agg']
         ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
         groupby = ['col1']
-        metrics = ['metric1']
+        metrics = [{
+            'label': 'metric1',
+            'expressionType': 'BUILTIN',
+        }]
         ds.get_having_filters = Mock(return_value=[])
         client.query_builder.last_query.query_dict = {'mock': 0}
         # client.topn is called twice
@@ -415,7 +421,10 @@ def test_run_query_multiple_groupby(self):
         post_aggs = ['some_agg']
         ds._metrics_and_post_aggs = Mock(return_value=(aggs, post_aggs))
         groupby = ['col1', 'col2']
-        metrics = ['metric1']
+        metrics = [{
+            'label': 'metric1',
+            'expressionType': 'BUILTIN',
+        }]
         ds.get_having_filters = Mock(return_value=[])
         client.query_builder = Mock()
         client.query_builder.last_query = Mock()
@@ -586,6 +595,10 @@ def test_recursive_get_fields(self):
 
     def test_metrics_and_post_aggs_tree(self):
         metrics = ['A', 'B', 'm1', 'm2']
+        metrics = [{
+            'label': metric,
+            'expressionType': 'BUILTIN',
+        } for metric in metrics]
         metrics_dict = {}
         for i in range(ord('A'), ord('K') + 1):
             emplace(metrics_dict, chr(i), True)
@@ -688,11 +701,17 @@ def test_metrics_and_post_aggs(self):
             'label': 'My Adhoc Metric',
         }
 
-        metrics = ['some_sum']
+        some_sum = {
+            'label': 'some_sum',
+            'expressionType': 'BUILTIN',
+        }
+
+        metrics = [some_sum]
+
         saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
             metrics, metrics_dict)
 
-        assert set(saved_metrics.keys()) == {'some_sum'}
+        assert set(saved_metrics.keys()) == {some_sum['label']}
         assert post_aggs == {}
 
         metrics = [adhoc_metric]
@@ -702,26 +721,36 @@ def test_metrics_and_post_aggs(self):
         assert set(saved_metrics.keys()) == set([adhoc_metric['label']])
         assert post_aggs == {}
 
-        metrics = ['some_sum', adhoc_metric]
+        metrics = [some_sum, adhoc_metric]
         saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
             metrics, metrics_dict)
 
-        assert set(saved_metrics.keys()) == {'some_sum', adhoc_metric['label']}
+        assert set(saved_metrics.keys()) == {some_sum['label'], adhoc_metric['label']}
         assert post_aggs == {}
 
-        metrics = ['quantile_p95']
+        quantile_p95 = {
+            'label': 'quantile_p95',
+            'expressionType': 'BUILTIN',
+        }
+
+        metrics = [quantile_p95]
         saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
             metrics, metrics_dict)
 
-        result_postaggs = set(['quantile_p95'])
+        result_postaggs = set([quantile_p95['label']])
         assert set(saved_metrics.keys()) == {'a_histogram'}
         assert set(post_aggs.keys()) == result_postaggs
 
-        metrics = ['aCustomPostAgg']
+        aCustomPostAgg = {
+            'label': 'aCustomPostAgg',
+            'expressionType': 'BUILTIN',
+        }
+        metrics = [aCustomPostAgg]
+
         saved_metrics, post_aggs = DruidDatasource.metrics_and_post_aggs(
             metrics, metrics_dict)
 
-        result_postaggs = set(['aCustomPostAgg'])
+        result_postaggs = set([aCustomPostAgg['label']])
         assert set(saved_metrics.keys()) == {'aCustomMetric'}
         assert set(post_aggs.keys()) == result_postaggs
 
@@ -804,31 +833,43 @@ def test_run_query_order_by_metrics(self):
         ds.metrics = list(metrics_dict.values())
 
         groupby = ['dim1']
-        metrics = ['count1']
+        metrics = [{
+            'label': 'count1',
+            'expressionType': 'BUILTIN',
+        }]
+
+        timeseries_limit_metric = {
+            'label': 'sum1',
+            'expressionType': 'BUILTIN',
+        }
         granularity = 'all'
         # get the counts of the top 5 'dim1's, order by 'sum1'
         ds.run_query(
             groupby, metrics, granularity, from_dttm, to_dttm,
-            timeseries_limit=5, timeseries_limit_metric='sum1',
+            timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric,
             client=client, order_desc=True, filter=[],
         )
         qry_obj = client.topn.call_args_list[0][1]
         self.assertEqual('dim1', qry_obj['dimension'])
-        self.assertEqual('sum1', qry_obj['metric'])
+        self.assertEqual('sum1', qry_obj['metric']['label'])
         aggregations = qry_obj['aggregations']
         post_aggregations = qry_obj['post_aggregations']
         self.assertEqual({'count1', 'sum1'}, set(aggregations.keys()))
         self.assertEqual(set(), set(post_aggregations.keys()))
 
         # get the counts of the top 5 'dim1's, order by 'div1'
+        timeseries_limit_metric = {
+            'label': 'div1',
+            'expressionType': 'BUILTIN',
+        }
         ds.run_query(
             groupby, metrics, granularity, from_dttm, to_dttm,
-            timeseries_limit=5, timeseries_limit_metric='div1',
+            timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric,
             client=client, order_desc=True, filter=[],
         )
         qry_obj = client.topn.call_args_list[1][1]
         self.assertEqual('dim1', qry_obj['dimension'])
-        self.assertEqual('div1', qry_obj['metric'])
+        self.assertEqual('div1', qry_obj['metric']['label'])
         aggregations = qry_obj['aggregations']
         post_aggregations = qry_obj['post_aggregations']
         self.assertEqual({'count1', 'sum1', 'sum2'}, set(aggregations.keys()))
@@ -836,28 +877,42 @@ def test_run_query_order_by_metrics(self):
 
         groupby = ['dim1', 'dim2']
         # get the counts of the top 5 ['dim1', 'dim2']s, order by 'sum1'
+        timeseries_limit_metric = {
+            'label': 'sum1',
+            'expressionType': 'BUILTIN',
+        }
         ds.run_query(
             groupby, metrics, granularity, from_dttm, to_dttm,
-            timeseries_limit=5, timeseries_limit_metric='sum1',
+            timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric,
             client=client, order_desc=True, filter=[],
         )
         qry_obj = client.groupby.call_args_list[0][1]
         self.assertEqual({'dim1', 'dim2'}, set(qry_obj['dimensions']))
-        self.assertEqual('sum1', qry_obj['limit_spec']['columns'][0]['dimension'])
+        self.assertEqual(
+            'sum1',
+            qry_obj['limit_spec']['columns'][0]['dimension']['label'],
+        )
         aggregations = qry_obj['aggregations']
         post_aggregations = qry_obj['post_aggregations']
         self.assertEqual({'count1', 'sum1'}, set(aggregations.keys()))
         self.assertEqual(set(), set(post_aggregations.keys()))
 
         # get the counts of the top 5 ['dim1', 'dim2']s, order by 'div1'
+        timeseries_limit_metric = {
+            'label': 'div1',
+            'expressionType': 'BUILTIN',
+        }
         ds.run_query(
             groupby, metrics, granularity, from_dttm, to_dttm,
-            timeseries_limit=5, timeseries_limit_metric='div1',
+            timeseries_limit=5, timeseries_limit_metric=timeseries_limit_metric,
             client=client, order_desc=True, filter=[],
         )
         qry_obj = client.groupby.call_args_list[1][1]
         self.assertEqual({'dim1', 'dim2'}, set(qry_obj['dimensions']))
-        self.assertEqual('div1', qry_obj['limit_spec']['columns'][0]['dimension'])
+        self.assertEqual(
+            'div1',
+            qry_obj['limit_spec']['columns'][0]['dimension']['label'],
+        )
         aggregations = qry_obj['aggregations']
         post_aggregations = qry_obj['post_aggregations']
         self.assertEqual({'count1', 'sum1', 'sum2'}, set(aggregations.keys()))
diff --git a/tests/viz_tests.py b/tests/viz_tests.py
index e4232c4214..c6f890bcae 100644
--- a/tests/viz_tests.py
+++ b/tests/viz_tests.py
@@ -310,7 +310,7 @@ def test_adhoc_filters_overwrite_legacy_filters(self):
         self.assertEqual('(value3 in (\'North America\'))', query_obj['extras']['where'])
         self.assertEqual('', query_obj['extras']['having'])
 
-    @patch('superset.viz.BaseViz.query_obj')
+    @patch('superset.viz.BaseViz._query_obj')
     def test_query_obj_merges_percent_metrics(self, super_query_obj):
         datasource = self.get_datasource_mock()
         form_data = {
@@ -326,9 +326,9 @@ def test_query_obj_merges_percent_metrics(self, super_query_obj):
         self.assertEqual([
             'sum__A', 'count', 'avg__C',
             'avg__B', 'max__Y',
-        ], query_obj['metrics'])
+        ], [metric['label'] for metric in query_obj['metrics']])
 
-    @patch('superset.viz.BaseViz.query_obj')
+    @patch('superset.viz.BaseViz._query_obj')
     def test_query_obj_throws_columns_and_metrics(self, super_query_obj):
         datasource = self.get_datasource_mock()
         form_data = {
@@ -345,7 +345,7 @@ def test_query_obj_throws_columns_and_metrics(self, super_query_obj):
         with self.assertRaises(Exception):
             test_viz.query_obj()
 
-    @patch('superset.viz.BaseViz.query_obj')
+    @patch('superset.viz.BaseViz._query_obj')
     def test_query_obj_merges_all_columns(self, super_query_obj):
         datasource = self.get_datasource_mock()
         form_data = {
@@ -362,7 +362,7 @@ def test_query_obj_merges_all_columns(self, super_query_obj):
         self.assertEqual([], query_obj['groupby'])
         self.assertEqual([['colA', 'colB'], ['colC']], query_obj['orderby'])
 
-    @patch('superset.viz.BaseViz.query_obj')
+    @patch('superset.viz.BaseViz._query_obj')
     def test_query_obj_uses_sortby(self, super_query_obj):
         datasource = self.get_datasource_mock()
         form_data = {
@@ -376,7 +376,7 @@ def test_query_obj_uses_sortby(self, super_query_obj):
         query_obj = test_viz.query_obj()
         self.assertEqual([
             'colA', 'colB', '__time__',
-        ], query_obj['metrics'])
+        ], [metric['label'] for metric in query_obj['metrics']])
         self.assertEqual([(
             '__time__', True,
         )], query_obj['orderby'])
@@ -523,7 +523,7 @@ def test_get_data_empty_null_keys(self):
 
 class PartitionVizTestCase(SupersetTestCase):
 
-    @patch('superset.viz.BaseViz.query_obj')
+    @patch('superset.viz.BaseViz._query_obj')
     def test_query_obj_time_series_option(self, super_query_obj):
         datasource = self.get_datasource_mock()
         form_data = {}
@@ -850,7 +850,7 @@ def test_get_data_group_by(self):
         }
         self.assertEqual(expected, data['records'])
 
-    @patch('superset.viz.BaseViz.query_obj')
+    @patch('superset.viz.BaseViz._query_obj')
     def test_query_obj_throws_metrics_and_groupby(self, super_query_obj):
         datasource = self.get_datasource_mock()
         form_data = {


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: notifications-unsubscribe@superset.apache.org
For additional commands, e-mail: notifications-help@superset.apache.org