You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@drill.apache.org by "ASF GitHub Bot (JIRA)" <ji...@apache.org> on 2016/05/18 17:21:12 UTC

[jira] [Commented] (DRILL-1328) Support table statistics

    [ https://issues.apache.org/jira/browse/DRILL-1328?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15289370#comment-15289370 ] 

ASF GitHub Bot commented on DRILL-1328:
---------------------------------------

Github user vkorukanti commented on a diff in the pull request:

    https://github.com/apache/drill/pull/425#discussion_r63745018
  
    --- Diff: exec/java-exec/src/main/java/org/apache/drill/exec/expr/fn/impl/StatisticsAggrFunctions.java ---
    @@ -0,0 +1,295 @@
    +/*******************************************************************************
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + ******************************************************************************/
    +
    +/*
    + * This class is automatically generated from AggrTypeFunctions2.tdd using FreeMarker.
    + */
    +
    +package org.apache.drill.exec.expr.fn.impl;
    +
    +import io.netty.buffer.DrillBuf;
    +import org.apache.drill.exec.expr.DrillAggFunc;
    +import org.apache.drill.exec.expr.DrillSimpleFunc;
    +import org.apache.drill.exec.expr.annotations.FunctionTemplate;
    +import org.apache.drill.exec.expr.annotations.FunctionTemplate.NullHandling;
    +import org.apache.drill.exec.expr.annotations.FunctionTemplate.FunctionScope;
    +import org.apache.drill.exec.expr.annotations.Output;
    +import org.apache.drill.exec.expr.annotations.Param;
    +import org.apache.drill.exec.expr.annotations.Workspace;
    +import org.apache.drill.exec.expr.holders.BigIntHolder;
    +import org.apache.drill.exec.expr.holders.NullableBigIntHolder;
    +import org.apache.drill.exec.expr.holders.NullableVarBinaryHolder;
    +import org.apache.drill.exec.expr.holders.ObjectHolder;
    +import org.apache.drill.exec.vector.complex.reader.FieldReader;
    +
    +import javax.inject.Inject;
    +
    +@SuppressWarnings("unused")
    +public class StatisticsAggrFunctions {
    +  static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(StatisticsAggrFunctions.class);
    +
    +  @FunctionTemplate(name = "statcount", scope = FunctionTemplate.FunctionScope.POINT_AGGREGATE)
    +  public static class StatCount implements DrillAggFunc {
    +    @Param
    +    FieldReader in;
    +    @Workspace
    +    BigIntHolder count;
    +    @Output
    +    NullableBigIntHolder out;
    +
    +    @Override
    +    public void setup() {
    +      count = new BigIntHolder();
    +    }
    +
    +    @Override
    +    public void add() {
    +      count.value++;
    +    }
    +
    +    @Override
    +    public void output() {
    +      out.isSet = 1;
    +      out.value = count.value;
    +    }
    +
    +    @Override
    +    public void reset() {
    +      count.value = 0;
    +    }
    +  }
    +
    +  @FunctionTemplate(name = "nonnullstatcount", scope = FunctionTemplate.FunctionScope.POINT_AGGREGATE)
    +  public static class NonNullStatCount implements DrillAggFunc {
    +    @Param
    +    FieldReader in;
    +    @Workspace
    +    BigIntHolder count;
    +    @Output
    +    NullableBigIntHolder out;
    +
    +    @Override
    +    public void setup() {
    +      count = new BigIntHolder();
    +    }
    +
    +    @Override
    +    public void add() {
    +      if (in.isSet()) {
    +        count.value++;
    +      }
    +    }
    +
    +    @Override
    +    public void output() {
    +      out.isSet = 1;
    +      out.value = count.value;
    +    }
    +
    +    @Override
    +    public void reset() {
    +      count.value = 0;
    +    }
    +  }
    +
    +  @FunctionTemplate(name = "hll", scope = FunctionTemplate.FunctionScope.POINT_AGGREGATE)
    +  public static class HllFieldReader implements DrillAggFunc {
    +    @Param
    +    FieldReader in;
    +    @Workspace
    +    ObjectHolder work;
    +    @Output
    +    NullableVarBinaryHolder out;
    +    @Inject
    +    DrillBuf buffer;
    +
    +    @Override
    +    public void setup() {
    +      work = new ObjectHolder();
    +      work.obj = new com.clearspring.analytics.stream.cardinality.HyperLogLog(10);
    +    }
    +
    +    @Override
    +    public void add() {
    +      if (work.obj != null) {
    +        com.clearspring.analytics.stream.cardinality.HyperLogLog hll =
    +            (com.clearspring.analytics.stream.cardinality.HyperLogLog) work.obj;
    +        int mode = in.getType().getMode().getNumber();
    +        int type = in.getType().getMinorType().getNumber();
    +
    +        switch (mode) {
    +          case org.apache.drill.common.types.TypeProtos.DataMode.OPTIONAL_VALUE:
    +            if (!in.isSet()) {
    +              hll.offer(null);
    +              break;
    +            }
    +            // fall through //
    +          case org.apache.drill.common.types.TypeProtos.DataMode.REQUIRED_VALUE:
    +            switch (type) {
    +              case org.apache.drill.common.types.TypeProtos.MinorType.VARCHAR_VALUE:
    +                hll.offer(in.readText().toString());
    +                break;
    +              default:
    +                work.obj = null;
    +            }
    +            break;
    +          default:
    +            work.obj = null;
    +        }
    +      }
    +    }
    +
    +    @Override
    +    public void output() {
    +      if (work.obj != null) {
    +        com.clearspring.analytics.stream.cardinality.HyperLogLog hll =
    +            (com.clearspring.analytics.stream.cardinality.HyperLogLog) work.obj;
    +
    +        try {
    +          byte[] ba = hll.getBytes();
    +          out.buffer = buffer.reallocIfNeeded(ba.length);
    +          out.start = 0;
    +          out.end = ba.length;
    +          out.buffer.setBytes(0, ba);
    +          out.isSet = 1;
    +        } catch (java.io.IOException e) {
    +          throw new org.apache.drill.common.exceptions.DrillRuntimeException("Failed to get HyperLogLog output", e);
    --- End diff --
    
    Currently we have no mechanism to handle exceptions in UDFs. We generally tried to avoid `try...catch` blocks in UDFs, but in this case we need a `try...catch` block because `HyperLogLog.getBytes()` throws a checked exception. Once [DRILL-3764] (https://issues.apache.org/jira/browse/DRILL-3764) is fixed, we can change this accordingly.


> Support table statistics
> ------------------------
>
>                 Key: DRILL-1328
>                 URL: https://issues.apache.org/jira/browse/DRILL-1328
>             Project: Apache Drill
>          Issue Type: Improvement
>            Reporter: Cliff Buchanan
>             Fix For: Future
>
>         Attachments: 0001-PRE-Set-value-count-in-splitAndTransfer.patch
>
>
> This consists of several subtasks
> * implement operators to generate statistics
> * add "analyze table" support to parser/planner
> * create a metadata provider to allow statistics to be used by optiq in planning optimization
> * implement statistics functions
> Right now, the bulk of this functionality is implemented, but it hasn't been rigorously tested and needs to have some definite answers for some of the parts "around the edges" (how analyze table figures out where the table statistics are located, how a table "append" should work in a read only file system)
> Also, here are a few known caveats:
> * table statistics are collected by creating a sql query based on the string path of the table. This should probably be done with a Table reference.
> * Case sensitivity for column statistics is probably iffy
> * Math for combining two column NDVs into a joint NDV should be checked.
> * Schema changes aren't really being considered yet.
> * adding getDrillTable is probably unnecessary; it might be better to do getTable().unwrap(DrillTable.class)



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)