You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by mm...@apache.org on 2016/12/22 08:32:33 UTC

[04/10] hive git commit: HIVE-15335: Fast Decimal (Matt McCline, reviewed by Sergey Shelukhin, Prasanth Jayachandran, Owen O'Malley)

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
index 674400c..bf954a8 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java
@@ -17,322 +17,1485 @@
  */
 package org.apache.hadoop.hive.common.type;
 
+import java.util.Arrays;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.math.BigDecimal;
 import java.math.BigInteger;
-import java.math.RoundingMode;
 
 /**
- *
- * HiveDecimal. Simple wrapper for BigDecimal. Adds fixed max precision and non scientific string
- * representation
+ * HiveDecimal is a decimal data type with a maximum precision and scale.
+ * <p>
+ * It is the Hive DECIMAL data type.
+ * <p>
+ * The scale is the number of fractional decimal digits.  The digits after the dot.  It is limited
+ * to 38 (MAX_SCALE).
+ * <p>
+ * The precision is the integer (or whole-number) decimal digits plus fractional decimal digits.
+ * It is limited to a total of 38 digits (MAX_PRECISION).
+ * <p>
+ * Hive syntax for declaring DECIMAL has 3 forms:
+ * <p>
+ * {@code
+ *       DECIMAL                            // Use the default precision/scale.}
+ * <p>
+ * {@code
+ *       DECIMAL(precision)                 // Use the default scale.}
+ * <p>
+ * {@code
+ *       DECIMAL(precision, scale)}
+ * }
+ * <p>
+ * The declared scale must be <= precision.
+ * <p>
+ * Use DECIMAL instead of DOUBLE when exact numeric accuracy is required.  Not all decimal numbers
+ * (radix 10) are exactly representable in the binary (radix 2 based) floating point type DOUBLE and
+ * cause accuracy anomalies (i.e. wrong results).  See the Internet for more details.
+ * <p>
+ * HiveDecimal is implemented as a classic Java immutable object.  All operations on HiveDecimal
+ * that produce a different value will create a new HiveDecimal object.
+ * <p>
+ * Decimals are physically stored without any extra leading or trailing zeroes.  The scale of
+ * a decimal is the number of non-trailing zero fractional digits.
+ * <p>
+ * Math operations on decimals typically cause the scale to change as a result of the math and
+ * from trailing fractional digit elimination.
+ * <p>
+ * Typically, Hive, when it wants to make sure a result decimal fits in the column decimal's
+ * precision/scale it calls enforcePrecisionScale.  That method will scale down or trim off
+ * result fractional digits if necessary with rounding when the column has a smaller scale.
+ * And, it will also indicate overflow when the decimal has exceeded the column's maximum precision.
+ * <p>
+ * NOTE: When Hive gets ready to serialize a decimal into text or binary, it usually sometimes
+ * wants trailing fractional zeroes.  See the special notes for toFormatString and
+ * bigIntegerBytesScaled for details.
+ * <p>
+ * ------------------------------------- Version 2 ------------------------------------------------
+ * <p>
+ * This is the 2nd major version of HiveDecimal called V2.  The previous version has been
+ * renamed to HiveDecimalV1 and is kept as a test and behavior reference.
+ * <p>
+ * For good performance we do not represent the decimal using a BigDecimal object like the previous
+ * version V1 did.  Using Java objects to represent our decimal incurs too high a penalty
+ * for memory allocations and general logic.
+ * <p>
+ * The original V1 public methods and fields are annotated with @HiveDecimalVersionV1; new public
+ * methods and fields are annotated with @HiveDecimalVersionV2.
  *
  */
-public class HiveDecimal implements Comparable<HiveDecimal> {
+public final class HiveDecimal extends FastHiveDecimal implements Comparable<HiveDecimal> {
+
+  /*
+   * IMPLEMENTATION NOTE:
+   *    We implement HiveDecimal with the mutable FastHiveDecimal class.  That class uses
+   *    protected on all its methods so they will not be visible in the HiveDecimal class.
+   *
+   *    So even if one casts to FastHiveDecimal, you shouldn't be able to violate the immutability
+   *    of a HiveDecimal class.
+   */
+
+  @HiveDecimalVersionV1
   public static final int MAX_PRECISION = 38;
+  @HiveDecimalVersionV1
   public static final int MAX_SCALE = 38;
 
   /**
    * Default precision/scale when user doesn't specify in the column metadata, such as
    * decimal and decimal(8).
    */
+  @HiveDecimalVersionV1
   public static final int USER_DEFAULT_PRECISION = 10;
+  @HiveDecimalVersionV1
   public static final int USER_DEFAULT_SCALE = 0;
 
   /**
    *  Default precision/scale when system is not able to determine them, such as in case
    *  of a non-generic udf.
    */
+  @HiveDecimalVersionV1
   public static final int SYSTEM_DEFAULT_PRECISION = 38;
+  @HiveDecimalVersionV1
   public static final int SYSTEM_DEFAULT_SCALE = 18;
 
-  public static final HiveDecimal ZERO = new HiveDecimal(BigDecimal.ZERO);
-  public static final HiveDecimal ONE = new HiveDecimal(BigDecimal.ONE);
+  /**
+   * Common values.
+   */
+  @HiveDecimalVersionV1
+  public static final HiveDecimal ZERO = HiveDecimal.create(0);
+  @HiveDecimalVersionV1
+  public static final HiveDecimal ONE = HiveDecimal.create(1);
 
+  /**
+   * ROUND_FLOOR:
+   * <p>
+   *   Round towards negative infinity.
+   * <p>
+   *   The Hive function is FLOOR.
+   * <p>
+   *   Positive numbers: The round fraction is thrown away.
+   * <p>
+   *       (Example here rounds at scale 0)
+   *       Value        FLOOR
+   *        0.3           0
+   *        2             2
+   *        2.1           2
+   * <p>
+   *   Negative numbers: If there is a round fraction, throw it away and subtract 1.
+   * <p>
+   *       (Example here rounds at scale 0)
+   *       Value        FLOOR
+   *       -0.3           -1
+   *       -2             -2
+   *       -2.1           -3
+   */
+  @HiveDecimalVersionV1
   public static final int ROUND_FLOOR = BigDecimal.ROUND_FLOOR;
+
+  /**
+   * ROUND_CEILING:
+   * <p>
+   *   Round towards positive infinity.
+   * <p>
+   *   The Hive function is CEILING.
+   * <p>
+   *   Positive numbers: If there is a round fraction, throw it away and add 1
+   * <p>
+   *       (Example here rounds at scale 0)
+   *       Value        CEILING
+   *        0.3           1
+   *        2             2
+   *        2.1           3
+   * <p>
+   *   Negative numbers: The round fraction is thrown away.
+   * <p>
+   *       (Example here rounds at scale 0)
+   *       Value        CEILING
+   *       -0.3           0
+   *       -2             -2
+   *       -2.1           -2
+   */
+  @HiveDecimalVersionV1
   public static final int ROUND_CEILING = BigDecimal.ROUND_CEILING;
+
+  /**
+   * ROUND_HALF_UP:
+   * <p>
+   *   Round towards "nearest neighbor" unless both neighbors are equidistant then round up.
+   * <p>
+   *   The Hive function is ROUND.
+   * <p>
+   *   For result, throw away round fraction.  If the round fraction is >= 0.5, then add 1 when
+   *   positive and subtract 1 when negative.  So, the sign is irrelevant.
+   * <p>
+   *      (Example here rounds at scale 0)
+   *       Value        ROUND                  Value        ROUND
+   *       0.3           0                     -0.3           0
+   *       2             2                     -2            -2
+   *       2.1           2                     -2.1          -2
+   *       2.49          2                     -2.49         -2
+   *       2.5           3                     -2.5          -3
+   *
+   */
+  @HiveDecimalVersionV1
   public static final int ROUND_HALF_UP = BigDecimal.ROUND_HALF_UP;
+
+  /**
+   * ROUND_HALF_EVEN:
+   *   Round towards the "nearest neighbor" unless both neighbors are equidistant, then round
+   *   towards the even neighbor.
+   * <p>
+   *   The Hive function is BROUND.
+   * <p>
+   *   Known as Banker\u2019s Rounding.
+   * <p>
+   *   When you add values rounded with ROUND_HALF_UP you have a bias that grows as you add more
+   *   numbers.  Banker's Rounding is a way to minimize that bias.  It rounds toward the nearest
+   *   even number when the fraction is 0.5 exactly.  In table below, notice that 2.5 goes DOWN to
+   *   2 (even) but 3.5 goes UP to 4 (even), etc.
+   * <p>
+   *   So, the sign is irrelevant.
+   * <p>
+   *       (Example here rounds at scale 0)
+   *       Value        BROUND                  Value        BROUND
+   *        0.49          0                     -0.49          0
+   *        0.5           0                     -0.5           0
+   *        0.51          1                     -0.51         -1
+   *        1.5           2                     -1.5          -2
+   *        2.5           2                     -2.5          -2
+   *        2.51          3                     -2.51         -3
+   *        3.5           4                     -3.5          -4
+   *        4.5           4                     -4.5          -4
+   *        4.51          5                     -4.51         -5
+   *
+   */
+  @HiveDecimalVersionV1
   public static final int ROUND_HALF_EVEN = BigDecimal.ROUND_HALF_EVEN;
 
-  private BigDecimal bd = BigDecimal.ZERO;
+  //-----------------------------------------------------------------------------------------------
+  // Constructors are marked private; use create methods.
+  //-----------------------------------------------------------------------------------------------
+
+  private HiveDecimal() {
+    super();
+  }
+
+  private HiveDecimal(HiveDecimal dec) {
+    super(dec);
+  }
+
+  private HiveDecimal(FastHiveDecimal fastDec) {
+    super(fastDec);
+  }
+
+  private HiveDecimal(int fastSignum, FastHiveDecimal fastDec) {
+    super(fastSignum, fastDec);
+  }
+
+  private HiveDecimal(
+      int fastSignum, long fast0, long fast1, long fast2,
+      int fastIntegerDigitCount, int fastScale) {
+    super(fastSignum, fast0, fast1, fast2, fastIntegerDigitCount, fastScale);
+  }
+
+  //-----------------------------------------------------------------------------------------------
+  // Create methods.
+  //-----------------------------------------------------------------------------------------------
+
+  /**
+   * Create a HiveDecimal from a FastHiveDecimal object. Used by HiveDecimalWritable.
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal createFromFast(FastHiveDecimal fastDec) {
+    return new HiveDecimal(fastDec);
+  }
+
+  /**
+   * Create a HiveDecimal from BigDecimal object.
+   *
+   * A BigDecimal object has a decimal scale.
+   *
+   * We will have overflow if BigDecimal's integer part exceed MAX_PRECISION digits or
+   * 99,999,999,999,999,999,999,999,999,999,999,999,999 or 10^38 - 1.
+   *
+   * When the BigDecimal value's precision exceeds MAX_PRECISION and there are fractional digits
+   * because of scale > 0, then lower digits are trimmed off with rounding to meet the
+   * MAX_PRECISION requirement.
+   *
+   * Also, BigDecimal supports negative scale -- which means multiplying the value by 10^abs(scale).
+   * And, BigDecimal allows for a non-zero scale for zero.  We normalize that so zero always has
+   * scale 0.
+   *
+   * @param bigDecimal
+   * @return  The HiveDecimal with the BigDecimal's value adjusted down to a maximum precision.
+   *          Otherwise, null is returned for overflow.
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(BigDecimal bigDecimal) {
+    return create(bigDecimal, true);
+  }
+
+  /**
+   * Same as the above create method, except fractional digit rounding can be turned off.
+   * @param bigDecimal
+   * @param allowRounding  True requires all of the bigDecimal value be converted to the decimal
+   *                       without loss of precision.
+   * @return
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(BigDecimal bigDecimal, boolean allowRounding) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBigDecimal(
+        bigDecimal, allowRounding)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * Creates a HiveDecimal from a BigInteger's value with a scale of 0.
+   *
+   * We will have overflow if BigInteger exceed MAX_PRECISION digits or
+   * 99,999,999,999,999,999,999,999,999,999,999,999,999 or 10^38 - 1.
+   *
+   * @param bigInteger
+   * @return  A HiveDecimal object with the exact BigInteger's value.
+   *          Otherwise, null is returned on overflow.
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(BigInteger bigInteger) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBigInteger(
+        bigInteger)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * Creates a HiveDecimal from a BigInteger's value with a specified scale.
+   *
+   * We will have overflow if BigInteger exceed MAX_PRECISION digits or
+   * 99,999,999,999,999,999,999,999,999,999,999,999,999 or 10^38 - 1.
+   *
+   * The resulting decimal will have fractional digits when the specified scale is greater than 0.
+   *
+   * When the BigInteger's value's precision exceeds MAX_PRECISION and there are fractional digits
+   * because of scale > 0, then lower digits are trimmed off with rounding to meet the
+   * MAX_PRECISION requirement.
+   *
+   * @param bigInteger
+   * @param scale
+   * @return  A HiveDecimal object with the BigInteger's value adjusted for scale.
+   *          Otherwise, null is returned on overflow.
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(BigInteger bigInteger, int scale) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBigIntegerAndScale(
+        bigInteger, scale)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * Create a HiveDecimal by parsing a whole string.
+   *
+   * We support parsing a decimal with an exponent because the previous version
+   * (i.e. OldHiveDecimal) uses the BigDecimal parser and was able to.
+   *
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(String string) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromString(
+        string, true)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * Same as the method above, except blanks before and after are tolerated.
+   * @param string
+   * @param trimBlanks  True specifies leading and trailing blanks are to be ignored.
+   * @return
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(String string, boolean trimBlanks) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromString(
+        string, trimBlanks)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * Create a HiveDecimal by parsing the characters in a whole byte array.
+   *
+   * Same rules as create(String string) above.
+   *
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(byte[] bytes) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBytes(
+        bytes, 0, bytes.length, false)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * Same as the method above, except blanks before and after are tolerated.
+   *
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(byte[] bytes, boolean trimBlanks) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBytes(
+        bytes, 0, bytes.length, trimBlanks)) {
+      return null;
+    }
+    return result;
+  }
+
+  /**
+   * This method takes in digits only UTF-8 characters, a sign flag, and a scale and returns
+   * a decimal.
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(boolean isNegative, byte[] bytes, int scale) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromDigitsOnlyBytesAndScale(
+        isNegative, bytes, 0, bytes.length, scale)) {
+      return null;
+    }
+    if (isNegative) {
+      result.fastNegate();
+    }
+    return result;
+  }
+
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(
+      boolean isNegative, byte[] bytes, int offset, int length, int scale) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromDigitsOnlyBytesAndScale(
+        isNegative, bytes, offset, length, scale)) {
+      return null;
+    }
+    return result;
+  }
 
-  private HiveDecimal(BigDecimal bd) {
-    this.bd = bd;
+  /**
+   * Create a HiveDecimal by parsing the characters in a slice of a byte array.
+   *
+   * Same rules as create(String string) above.
+   *
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(byte[] bytes, int offset, int length) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBytes(
+        bytes, offset, length, false)) {
+      return null;
+    }
+    return result;
   }
 
-  public static HiveDecimal create(BigDecimal b) {
-    return create(b, true);
+  /**
+   * Same as the method above, except blanks before and after are tolerated.
+   *
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(
+      byte[] bytes, int offset, int length, boolean trimBlanks) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBytes(
+        bytes, offset, length, trimBlanks)) {
+      return null;
+    }
+    return result;
   }
 
-  public static HiveDecimal create(BigDecimal b, boolean allowRounding) {
-    BigDecimal bd = normalize(b, allowRounding);
-    return bd == null ? null : new HiveDecimal(bd);
+  /**
+   * Create a HiveDecimal object from an int.
+   *
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(int intValue) {
+    HiveDecimal result = new HiveDecimal();
+    result.fastSetFromInt(intValue);
+    return result;
   }
 
-  public static HiveDecimal create(BigInteger unscaled, int scale) {
-    BigDecimal bd = normalize(new BigDecimal(unscaled, scale), true);
-    return bd == null ? null : new HiveDecimal(bd);
+  /**
+   * Create a HiveDecimal object from a long.
+   *
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal create(long longValue) {
+    HiveDecimal result = new HiveDecimal();
+    result.fastSetFromLong(longValue);
+    return result;
   }
 
-  public static HiveDecimal create(String dec) {
-    BigDecimal bd;
-    try {
-      bd = new BigDecimal(dec.trim());
-    } catch (NumberFormatException ex) {
+  /**
+   * Create a HiveDecimal object from a long with a specified scale.
+   *
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(long longValue, int scale) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromLongAndScale(
+        longValue, scale)) {
       return null;
     }
-    bd = normalize(bd, true);
-    return bd == null ? null : new HiveDecimal(bd);
+    return result;
   }
 
-  public static HiveDecimal create(BigInteger bi) {
-    BigDecimal bd = normalize(new BigDecimal(bi), true);
-    return bd == null ? null : new HiveDecimal(bd);
+  /**
+   * Create a HiveDecimal object from a float.
+   * <p>
+   * This method is equivalent to HiveDecimal.create(Float.toString(floatValue))
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(float floatValue) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromFloat(floatValue)) {
+      return null;
+    }
+    return result;
   }
 
-  public static HiveDecimal create(int i) {
-    return new HiveDecimal(new BigDecimal(i));
+  /**
+   * Create a HiveDecimal object from a double.
+   * <p>
+   * This method is equivalent to HiveDecimal.create(Double.toString(doubleValue))
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal create(double doubleValue) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromDouble(doubleValue)) {
+      return null;
+    }
+    return result;
   }
 
-  public static HiveDecimal create(long l) {
-    return new HiveDecimal(new BigDecimal(l));
+  //-----------------------------------------------------------------------------------------------
+  // Serialization methods.
+  //-----------------------------------------------------------------------------------------------
+
+  // The byte length of the scratch byte array that needs to be passed to serializationUtilsRead.
+  @HiveDecimalVersionV2
+  public static final int SCRATCH_BUFFER_LEN_SERIALIZATION_UTILS_READ =
+      FAST_SCRATCH_BUFFER_LEN_SERIALIZATION_UTILS_READ;
+
+  /**
+   * Deserialize data written in the format used by the SerializationUtils methods
+   * readBigInteger/writeBigInteger and create a decimal using the supplied scale.
+   * <p>
+   * ORC uses those SerializationUtils methods for its serialization.
+   * <p>
+   * A scratch bytes array is necessary to do the binary to decimal conversion for better
+   * performance.  Pass a SCRATCH_BUFFER_LEN_SERIALIZATION_UTILS_READ byte array for scratchBytes.
+   * <p>
+   * @return The deserialized decimal or null if the conversion failed.
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal serializationUtilsRead(
+      InputStream inputStream, int scale,
+      byte[] scratchBytes)
+      throws IOException {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSerializationUtilsRead(
+        inputStream, scale,
+        scratchBytes)) {
+      return null;
+    }
+    return result;
   }
 
+  /**
+   * Convert bytes in the format used by BigInteger's toByteArray format (and accepted by its
+   * constructor) into a decimal using the specified scale.
+   * <p>
+   * Our bigIntegerBytes methods create bytes in this format, too.
+   * <p>
+   * This method is designed for high performance and does not create an actual BigInteger during
+   * binary to decimal conversion.
+   *
+   */
+  @HiveDecimalVersionV2
+  public static HiveDecimal createFromBigIntegerBytesAndScale(
+      byte[] bytes, int scale) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBigIntegerBytesAndScale(
+        bytes, 0, bytes.length, scale)) {
+      return null;
+    }
+    return result;
+  }
+
+  @HiveDecimalVersionV2
+  public static HiveDecimal createFromBigIntegerBytesAndScale(
+      byte[] bytes, int offset, int length, int scale) {
+    HiveDecimal result = new HiveDecimal();
+    if (!result.fastSetFromBigIntegerBytesAndScale(
+        bytes, offset, length, scale)) {
+      return null;
+    }
+    return result;
+  }
+
+  // The length of the long array that needs to be passed to serializationUtilsWrite.
+  @HiveDecimalVersionV2
+  public static final int SCRATCH_LONGS_LEN = FAST_SCRATCH_LONGS_LEN;
+
+  /**
+   * Serialize this decimal's BigInteger equivalent unscaled value using the format that the
+   * SerializationUtils methods readBigInteger/writeBigInteger use.
+   * <p>
+   * ORC uses those SerializationUtils methods for its serialization.
+   * <p>
+   * Scratch objects necessary to do the decimal to binary conversion without actually creating a
+   * BigInteger object are passed for better performance.
+   * <p>
+   * Allocate scratchLongs with SCRATCH_LONGS_LEN longs.
+   *
+   */
+  @HiveDecimalVersionV2
+  public boolean serializationUtilsWrite(
+      OutputStream outputStream,
+      long[] scratchLongs)
+          throws IOException {
+    return
+        fastSerializationUtilsWrite(
+            outputStream,
+            scratchLongs);
+  }
+
+  // The length of the scratch byte array that needs to be passed to bigIntegerBytes, etc.
+  @HiveDecimalVersionV2
+  public static final int SCRATCH_BUFFER_LEN_BIG_INTEGER_BYTES =
+      FAST_SCRATCH_BUFFER_LEN_BIG_INTEGER_BYTES;
+
+  /**
+   * Return binary representation of this decimal's BigInteger equivalent unscaled value using
+   * the format that the BigInteger's toByteArray method returns (and the BigInteger constructor
+   * accepts).
+   * <p>
+   * Used by LazyBinary, Avro, and Parquet serialization.
+   * <p>
+   * Scratch objects necessary to do the decimal to binary conversion without actually creating a
+   * BigInteger object are passed for better performance.
+   * <p>
+   * Allocate scratchLongs with SCRATCH_LONGS_LEN longs.
+   * And, allocate buffer with SCRATCH_BUFFER_LEN_BIG_INTEGER_BYTES bytes.
+   * <p>
+   * @param scratchLongs
+   * @param buffer
+   * @return The number of bytes used for the binary result in buffer.  Otherwise, 0 if the
+   *         conversion failed.
+   */
+  @HiveDecimalVersionV2
+  public int bigIntegerBytes(
+      long[] scratchLongs, byte[] buffer) {
+    return
+        fastBigIntegerBytes(
+            scratchLongs, buffer);
+  }
+
+  @HiveDecimalVersionV2
+  public byte[] bigIntegerBytes() {
+    long[] scratchLongs = new long[SCRATCH_LONGS_LEN];
+    byte[] buffer = new byte[SCRATCH_BUFFER_LEN_BIG_INTEGER_BYTES];
+    final int byteLength =
+        fastBigIntegerBytes(
+            scratchLongs, buffer);
+    return Arrays.copyOfRange(buffer, 0, byteLength);
+  }
+
+  /**
+   * Convert decimal to BigInteger binary bytes with a serialize scale, similar to the formatScale
+   * for toFormatString.  It adds trailing zeroes the (emulated) BigInteger toByteArray result
+   * when a serializeScale is greater than current scale.  Or, rounds if scale is less than
+   * current scale.
+   * <p>
+   * Used by Avro and Parquet serialization.
+   * <p>
+   * This emulates the OldHiveDecimal setScale AND THEN OldHiveDecimal getInternalStorage() behavior.
+   *
+   */
+  @HiveDecimalVersionV2
+  public int bigIntegerBytesScaled(
+      int serializeScale,
+      long[] scratchLongs, byte[] buffer) {
+    return
+        fastBigIntegerBytesScaled(
+            serializeScale,
+            scratchLongs, buffer);
+  }
+
+  @HiveDecimalVersionV2
+  public byte[] bigIntegerBytesScaled(int serializeScale) {
+    long[] scratchLongs = new long[SCRATCH_LONGS_LEN];
+    byte[] buffer = new byte[SCRATCH_BUFFER_LEN_BIG_INTEGER_BYTES];
+    int byteLength =
+        fastBigIntegerBytesScaled(
+            serializeScale,
+            scratchLongs, buffer);
+    return Arrays.copyOfRange(buffer, 0, byteLength);
+  }
+
+  //-----------------------------------------------------------------------------------------------
+  // Convert to string/UTF-8 ASCII bytes methods.
+  //-----------------------------------------------------------------------------------------------
+
+  /**
+   * Return a string representation of the decimal.
+   * <p>
+   * It is the equivalent of calling bigDecimalValue().toPlainString -- it does not add exponent
+   * notation -- but is much faster.
+   * <p>
+   * NOTE: If setScale(int serializationScale) was used to create the decimal object, then trailing
+   * fractional digits will be added to display to the serializationScale.  Or, the display may
+   * get rounded.  See the comments for that method.
+   *
+   */
+  @HiveDecimalVersionV1
   @Override
   public String toString() {
-     return bd.toPlainString();
+    if (fastSerializationScale() != -1) {
+
+      // Use the serialization scale and format the string with trailing zeroes (or
+      // round the decimal) if necessary.
+      return
+          fastToFormatString(fastSerializationScale());
+    } else {
+      return
+          fastToString();
+    }
+  }
+
+  @HiveDecimalVersionV2
+  public String toString(
+      byte[] scratchBuffer) {
+    if (fastSerializationScale() != -1) {
+
+      // Use the serialization scale and format the string with trailing zeroes (or
+      // round the decimal) if necessary.
+      return
+          fastToFormatString(
+              fastSerializationScale(),
+              scratchBuffer);
+    } else {
+      return
+          fastToString(scratchBuffer);
+    }
   }
-  
+
+  /**
+   * Return a string representation of the decimal using the specified scale.
+   * <p>
+   * This method is designed to ALWAYS SUCCEED (unless the newScale parameter is out of range).
+   * <p>
+   * Is does the equivalent of a setScale(int newScale).  So, more than 38 digits may be returned.
+   * See that method for more details on how this can happen.
+   * <p>
+   * @param scale The number of digits after the decimal point
+   * @return The scaled decimal representation string representation.
+   */
+  @HiveDecimalVersionV1
+  public String toFormatString(int formatScale) {
+    return
+        fastToFormatString(
+            formatScale);
+  }
+
+  @HiveDecimalVersionV2
+  public String toFormatString(int formatScale, byte[] scratchBuffer) {
+    return
+        fastToFormatString(
+            formatScale,
+            scratchBuffer);
+  }
+
+  @HiveDecimalVersionV2
+  public String toDigitsOnlyString() {
+    return
+        fastToDigitsOnlyString();
+  }
+
+  // The length of the scratch buffer that needs to be passed to toBytes, toFormatBytes,
+  // toDigitsOnlyBytes.
+  @HiveDecimalVersionV2
+  public final static int SCRATCH_BUFFER_LEN_TO_BYTES = FAST_SCRATCH_BUFFER_LEN_TO_BYTES;
+
   /**
-   * Return a string representation of the number with the number of decimal digits as
-   * the given scale. Please note that this is different from toString().
-   * @param scale the number of digits after the decimal point
-   * @return the string representation of exact number of decimal digits
+   * Decimal to ASCII bytes conversion.
+   * <p>
+   * The scratch buffer will contain the result afterwards.  It should be
+   * SCRATCH_BUFFER_LEN_TO_BYTES bytes long.
+   * <p>
+   * The result is produced at the end of the scratch buffer, so the return value is the byte
+   * index of the first byte.  The byte slice is [byteIndex:SCRATCH_BUFFER_LEN_TO_BYTES-1].
+   *
    */
-  public String toFormatString(int scale) {
-    return (bd.scale() == scale ? bd :
-      bd.setScale(scale, RoundingMode.HALF_UP)).toPlainString();
+  @HiveDecimalVersionV2
+  public int toBytes(
+      byte[] scratchBuffer) {
+    return
+        fastToBytes(
+            scratchBuffer);
   }
 
-  public HiveDecimal setScale(int i) {
-    return new HiveDecimal(bd.setScale(i, RoundingMode.HALF_UP));
+  /**
+   * This is the serialization version of decimal to string conversion.
+   * <p>
+   * It adds trailing zeroes when the formatScale is greater than the current scale.  Or, it
+   * does round if the formatScale is less than the current scale.
+   * <p>
+   * Note that you can get more than 38 (MAX_PRECISION) digits in the output with this method.
+   *
+   */
+  @HiveDecimalVersionV2
+  public int toFormatBytes(
+      int formatScale,
+      byte[] scratchBuffer) {
+    return
+        fastToFormatBytes(
+            formatScale,
+            scratchBuffer);
+  }
+
+  /**
+   * Convert decimal to just the digits -- no dot.
+   * <p>
+   * Currently used by BinarySortable serialization.
+   * <p>
+   * A faster way to get just the digits than calling unscaledValue.toString().getBytes().
+   *
+   */
+  @HiveDecimalVersionV2
+  public int toDigitsOnlyBytes(
+      byte[] scratchBuffer) {
+    return
+        fastToDigitsOnlyBytes(
+            scratchBuffer);
   }
 
+  //-----------------------------------------------------------------------------------------------
+  // Comparison methods.
+  //-----------------------------------------------------------------------------------------------
+
+  @HiveDecimalVersionV1
   @Override
   public int compareTo(HiveDecimal dec) {
-    return bd.compareTo(dec.bd);
+    return fastCompareTo(dec);
+  }
+
+  /**
+   * Hash code based on (new) decimal representation.
+   * <p>
+   * Faster than hashCode().
+   * <p>
+   * Used by map join and other Hive internal purposes where performance is important.
+   * <p>
+   * IMPORTANT: See comments for hashCode(), too.
+   */
+  @HiveDecimalVersionV2
+  public int newFasterHashCode() {
+    return fastNewFasterHashCode();
   }
 
+  /**
+   * This is returns original hash code as returned by HiveDecimalV1.
+   * <p>
+   * We need this when the HiveDecimalV1 hash code has been exposed and and written or affected
+   * how data is written.
+   * <p>
+   * This method supports compatibility.
+   * <p>
+   * Examples: bucketing, Hive hash() function, and Hive statistics.
+   * <p>
+   * NOTE: It is necessary to create a BigDecimal object and use its hash code, so this method is
+   *       slow.
+   */
+  @HiveDecimalVersionV1
   @Override
   public int hashCode() {
-    return bd.hashCode();
+    return fastHashCode();
   }
 
+  /**
+   * Are two decimal content (values) equal?
+   * <p>
+   * @obj   The 2nd decimal.
+   * @return  When obj is null or not class HiveDecimal, the return is false.
+   *          Otherwise, returns true when the decimal values are exactly equal.
+   */
+  @HiveDecimalVersionV1
   @Override
   public boolean equals(Object obj) {
     if (obj == null || obj.getClass() != getClass()) {
       return false;
     }
-    return bd.equals(((HiveDecimal) obj).bd);
+    return fastEquals((HiveDecimal) obj);
   }
 
+
+  //-----------------------------------------------------------------------------------------------
+  // Attribute methods.
+  //-----------------------------------------------------------------------------------------------
+
+  /**
+   * Returns the scale of the decimal.  Range 0 .. MAX_SCALE.
+   *
+   */
+  @HiveDecimalVersionV1
   public int scale() {
-    return bd.scale();
+    return fastScale();
+  }
+
+  /**
+   * Returns the number of integer digits in the decimal.
+   * <p>
+   * When the integer portion is zero, this method returns 0.
+   *
+   */
+  @HiveDecimalVersionV2
+  public int integerDigitCount() {
+    return fastIntegerDigitCount();
   }
 
   /**
    * Returns the number of digits (integer and fractional) in the number, which is equivalent
-   * to SQL decimal precision. Note that this is different from BigDecimal.precision(),
-   * which returns the precision of the unscaled value (BigDecimal.valueOf(0.01).precision() = 1,
-   * whereas HiveDecimal.create("0.01").precision() = 2).
-   * If you want the BigDecimal precision, use HiveDecimal.bigDecimalValue().precision()
-   * @return
+   * to SQL decimal precision.
+   * <p>
+   * Note that this method is different from rawPrecision(), which returns the number of digits
+   * ignoring the scale.  Note that rawPrecision returns 0 when the value is 0.
+   *
+   *     Decimal            precision              rawPrecision
+   *        0                    1                         0
+   *        1                    1                         1
+   *       -7                    1                         1
+   *       0.1                   1                         1
+   *       0.04                  2                         1
+   *       0.00380               5                         3
+   *     104.0009                7                         7
+   * <p>
+   * If you just want the actual number of digits, use rawPrecision().
+   *
    */
+  @HiveDecimalVersionV1
   public int precision() {
-    int bdPrecision = bd.precision();
-    int bdScale = bd.scale();
+    return fastSqlPrecision();
+  }
 
-    if (bdPrecision < bdScale) {
-      // This can happen for numbers less than 0.1
-      // For 0.001234: bdPrecision=4, bdScale=6
-      // In this case, we'll set the type to have the same precision as the scale.
-      return bdScale;
-    }
-    return bdPrecision;
+  // See comments for sqlPrecision.
+  @HiveDecimalVersionV2
+  public int rawPrecision() {
+    return fastRawPrecision();
   }
 
-  /** Note - this method will corrupt the value if it doesn't fit. */
-  public int intValue() {
-    return bd.intValue();
+  /**
+   * Get the sign of the decimal.
+   * <p>
+   * @return 0 if the decimal is equal to 0, -1 if less than zero, and 1 if greater than 0
+   */
+  @HiveDecimalVersionV1
+  public int signum() {
+    return fastSignum();
   }
 
-  public double doubleValue() {
-    return bd.doubleValue();
+  //-----------------------------------------------------------------------------------------------
+  // Value conversion methods.
+  //-----------------------------------------------------------------------------------------------
+
+  /**
+   * Is the decimal value a byte? Range -128            to      127.
+   *                                    Byte.MIN_VALUE          Byte.MAX_VALUE
+   * <p>
+   * Emulates testing for no value corruption:
+   *      bigDecimalValue().setScale(0).equals(BigDecimal.valueOf(bigDecimalValue().byteValue()))
+   * <p>
+   * NOTE: Fractional digits are ignored in the test since byteValue() will
+   *       remove them (round down).
+   * <p>
+   * @return True when byteValue() will return a correct byte.
+   */
+  @HiveDecimalVersionV2
+  public boolean isByte() {
+    return fastIsByte();
   }
 
-  /** Note - this method will corrupt the value if it doesn't fit. */
-  public long longValue() {
-    return bd.longValue();
+  /**
+   * A byte variation of longValue()
+   * <p>
+   * This method will return a corrupted value unless isByte() is true.
+   */
+  @HiveDecimalVersionV1
+  public byte byteValue() {
+    return fastByteValueClip();
+  }
+
+  /**
+   * Is the decimal value a short? Range -32,768         to     32,767.
+   *                                     Short.MIN_VALUE        Short.MAX_VALUE
+   * <p>
+   * Emulates testing for no value corruption:
+   *      bigDecimalValue().setScale(0).equals(BigDecimal.valueOf(bigDecimalValue().shortValue()))
+   * <p>
+   * NOTE: Fractional digits are ignored in the test since shortValue() will
+   *       remove them (round down).
+   * <p>
+   * @return True when shortValue() will return a correct short.
+   */
+  @HiveDecimalVersionV2
+  public boolean isShort() {
+    return fastIsShort();
   }
 
-  /** Note - this method will corrupt the value if it doesn't fit. */
+  /**
+   * A short variation of longValue().
+   * <p>
+   * This method will return a corrupted value unless isShort() is true.
+   */
+  @HiveDecimalVersionV1
   public short shortValue() {
-    return bd.shortValue();
+    return fastShortValueClip();
   }
 
-  public float floatValue() {
-    return bd.floatValue();
+  /**
+   * Is the decimal value a int? Range -2,147,483,648     to   2,147,483,647.
+   *                                   Integer.MIN_VALUE       Integer.MAX_VALUE
+   * <p>
+   * Emulates testing for no value corruption:
+   *      bigDecimalValue().setScale(0).equals(BigDecimal.valueOf(bigDecimalValue().intValue()))
+   * <p>
+   * NOTE: Fractional digits are ignored in the test since intValue() will
+   *       remove them (round down).
+   * <p>
+   * @return True when intValue() will return a correct int.
+   */
+  @HiveDecimalVersionV2
+  public boolean isInt() {
+    return fastIsInt();
   }
 
-  public BigDecimal bigDecimalValue() {
-    return bd;
+  /**
+   * An int variation of longValue().
+   * <p>
+   * This method will return a corrupted value unless isInt() is true.
+   */
+  @HiveDecimalVersionV1
+  public int intValue() {
+    return fastIntValueClip();
   }
 
-  public byte byteValue() {
-    return bd.byteValue();
+  /**
+   * Is the decimal value a long? Range -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807.
+   *                                    Long.MIN_VALUE                Long.MAX_VALUE
+   * <p>
+   * Emulates testing for no value corruption:
+   *      bigDecimalValue().setScale(0).equals(BigDecimal.valueOf(bigDecimalValue().longValue()))
+   * <p>
+   * NOTE: Fractional digits are ignored in the test since longValue() will
+   *       remove them (round down).
+   * <p>
+   * @return True when longValue() will return a correct long.
+   */
+  @HiveDecimalVersionV2
+  public boolean isLong() {
+    return fastIsLong();
   }
 
-  public HiveDecimal setScale(int adjustedScale, int rm) {
-    return create(bd.setScale(adjustedScale, rm));
+  /**
+   * Return the long value of a decimal.
+   * <p>
+   * This method will return a corrupted value unless isLong() is true.
+   */
+  @HiveDecimalVersionV1
+  public long longValue() {
+    return fastLongValueClip();
   }
 
-  public HiveDecimal subtract(HiveDecimal dec) {
-    return create(bd.subtract(dec.bd));
+  @HiveDecimalVersionV1
+  public long longValueExact() {
+    if (!isLong()) {
+      throw new ArithmeticException();
+    }
+    return fastLongValueClip();
   }
 
-  public HiveDecimal multiply(HiveDecimal dec) {
-    return create(bd.multiply(dec.bd), false);
+  /**
+   * Return a float representing the decimal.  Due the limitations of float, some values will not
+   * be accurate.
+   *
+   */
+  @HiveDecimalVersionV1
+  public float floatValue() {
+    return fastFloatValue();
+   }
+
+  /**
+   * Return a double representing the decimal.  Due the limitations of double, some values will not
+   * be accurate.
+   *
+   */
+  @HiveDecimalVersionV1
+  public double doubleValue() {
+    return fastDoubleValue();
   }
 
-  public BigInteger unscaledValue() {
-    return bd.unscaledValue();
+  /**
+   * Return a BigDecimal representing the decimal.  The BigDecimal class is able to accurately
+   * represent the decimal.
+   *
+   * NOTE: We are not representing our decimal as BigDecimal now as OldHiveDecimal did, so this
+   * is now slower.
+   *
+   */
+  @HiveDecimalVersionV1
+  public BigDecimal bigDecimalValue() {
+    return fastBigDecimalValue();
   }
 
-  public HiveDecimal scaleByPowerOfTen(int n) {
-    return create(bd.scaleByPowerOfTen(n));
+  /**
+   * Get a BigInteger representing the decimal's digits without a dot.
+   * <p>
+   * @return Returns a signed BigInteger.
+   */
+  @HiveDecimalVersionV1
+  public BigInteger unscaledValue() {
+    return fastBigIntegerValue();
   }
 
-  public HiveDecimal abs() {
-    return create(bd.abs());
+  /**
+   * Return a decimal with only the fractional digits.
+   * <p>
+   * Zero is returned when there are no fractional digits (i.e. scale is 0).
+   *
+   */
+  @HiveDecimalVersionV2
+  public HiveDecimal fractionPortion() {
+    HiveDecimal result = new HiveDecimal();
+    result.fastFractionPortion();
+    return result;
   }
 
-  public HiveDecimal negate() {
-    return create(bd.negate());
+  /**
+   * Return a decimal with only the integer digits.
+   * <p>
+   * Any fractional digits are removed.  E.g. 2.083 scale 3 returns as 2 scale 0.
+   *
+   */
+  @HiveDecimalVersionV2
+  public HiveDecimal integerPortion() {
+    HiveDecimal result = new HiveDecimal();
+    result.fastIntegerPortion();
+    return result;
   }
 
+  //-----------------------------------------------------------------------------------------------
+  // Math methods.
+  //-----------------------------------------------------------------------------------------------
+
+  /**
+   * Add the current decimal and another decimal and return the result.
+   *
+   */
+  @HiveDecimalVersionV1
   public HiveDecimal add(HiveDecimal dec) {
-    return create(bd.add(dec.bd));
+    HiveDecimal result = new HiveDecimal();
+    if (!fastAdd(
+        dec,
+        result)) {
+      return null;
+    }
+    return result;
   }
 
-  public HiveDecimal pow(int n) {
-    BigDecimal result = normalize(bd.pow(n), false);
-    return result == null ? null : new HiveDecimal(result);
+  /**
+   * Subtract from the current decimal another decimal and return the result.
+   *
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal subtract(HiveDecimal dec) {
+    HiveDecimal result = new HiveDecimal();
+    if (!fastSubtract(
+        dec,
+        result)) {
+      return null;
+    }
+    return result;
   }
 
-  public HiveDecimal remainder(HiveDecimal dec) {
-    return create(bd.remainder(dec.bd));
+  /**
+   * Multiply two decimals.
+   * <p>
+   * NOTE: Overflow Determination for Multiply
+   * <p>
+   *   OldDecimal.multiply performs the multiply with BigDecimal but DOES NOT ALLOW ROUNDING
+   *   (i.e. no throwing away lower fractional digits).
+   * <p>
+   *   CONSIDER: Allowing rounding.  This would eliminate cases today where we return null for
+   *             the multiplication result.
+   * <p>
+   * IMPLEMENTATION NOTE: HiveDecimalV1 code does this:
+   * <p>
+   * return create(bd.multiply(dec.bd), false);
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal multiply(HiveDecimal dec) {
+    HiveDecimal result = new HiveDecimal();
+    if (!fastMultiply(
+        dec,
+        result)) {
+      return null;
+    }
+    return result;
   }
 
-  public HiveDecimal divide(HiveDecimal dec) {
-    return create(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP), true);
+  /**
+   * Multiplies a decimal by a power of 10.
+   * <p>
+   * The decimal 19350 scale 0 will return 193.5 scale 1 when power is -2 (negative).
+   * <p>
+   * The decimal 1.000923 scale 6 will return 10009.23 scale 2 when power is 4 (positive).
+   * <p>
+   * @param power
+   * @return Returns a HiveDecimal whose value is value * 10^power.
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal scaleByPowerOfTen(int power) {
+    if (power == 0 || fastSignum() == 0) {
+      // No change for multiply by 10^0 or value 0.
+      return this;
+    }
+    HiveDecimal result = new HiveDecimal();
+    if (!fastScaleByPowerOfTen(
+        power,
+        result)) {
+      return null;
+    }
+    return result;
   }
 
   /**
-   * Get the sign of the underlying decimal.
-   * @return 0 if the decimal is equal to 0, -1 if less than zero, and 1 if greater than 0
+   * Take the absolute value of a decimal.
+   * <p>
+   * @return When the decimal is negative, returns a new HiveDecimal with the positive value.
+   *         Otherwise, returns the current 0 or positive value object;
    */
-  public int signum() {
-    return bd.signum();
+  @HiveDecimalVersionV1
+  public HiveDecimal abs() {
+    if (fastSignum() != -1) {
+      return this;
+    }
+    HiveDecimal result = new HiveDecimal(this);
+    result.fastAbs();
+    return result;
   }
 
-  private static BigDecimal trim(BigDecimal d) {
-    if (d.compareTo(BigDecimal.ZERO) == 0) {
-      // Special case for 0, because java doesn't strip zeros correctly on that number.
-      d = BigDecimal.ZERO;
-    } else {
-      d = d.stripTrailingZeros();
-      if (d.scale() < 0) {
-        // no negative scale decimals
-        d = d.setScale(0);
-      }
+  /**
+   * Reverse the sign of a decimal.
+   * <p>
+   * @return Returns a new decimal with the sign flipped.  When the value is 0, the current
+   * object is returned.
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal negate() {
+    if (fastSignum() == 0) {
+      return this;
     }
-    return d;
+    HiveDecimal result = new HiveDecimal(this);
+    result.fastNegate();
+    return result;
   }
 
-  private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) {
-    if (bd == null) {
-      return null;
-    }
+  //-----------------------------------------------------------------------------------------------
+  // Rounding / setScale methods.
+  //-----------------------------------------------------------------------------------------------
 
-    bd = trim(bd);
+  /**
+   * DEPRECATED for V2.
+   * <p>
+   * Create a decimal from another decimal whose only change is it is MARKED and will display /
+   * serialize with a specified scale that will add trailing zeroes (or round) if necessary.
+   * <p>
+   * After display / serialization, the MARKED object is typically thrown away.
+   * <p>
+   * A MARKED decimal ONLY affects these 2 methods since these were the only ways setScale was
+   * used in the old code.
+   * <p>
+   *    toString
+   *    unscaleValue
+   * <p>
+   * This method has been deprecated because has poor performance by creating a throw away object.
+   * <p>
+   * For setScale(scale).toString() use toFormatString(scale) instead.
+   * For setScale(scale).unscaledValue().toByteArray() use V2 bigIntegerBytesScaled(scale) instead.
+   * <p>
+   * For better performance, use the V2 form of toFormatString that takes a scratch buffer,
+   * or even better use toFormatBytes.
+   * <p>
+   * And, use the form of bigIntegerBytesScaled that takes scratch objects for better performance.
+   *
+   */
+  @Deprecated
+  @HiveDecimalVersionV1
+  public HiveDecimal setScale(int serializationScale) {
+    HiveDecimal result = new HiveDecimal(this);
+    result.fastSetSerializationScale(serializationScale);
+    return result;
+  }
 
-    int intDigits = bd.precision() - bd.scale();
+  /**
+   * Do decimal rounding and return the result.
+   * <p>
+   * When the roundingPoint is 0 or positive, we round away lower fractional digits if the
+   * roundingPoint is less than current scale.  In this case, we will round the result using the
+   * specified rounding mode.
+   * <p>
+   * When the roundingPoint is negative, the rounding will occur within the integer digits.  Integer
+   * digits below the roundPoint will be cleared.  If the rounding occurred, a one will be added
+   * just above the roundingPoint.  Note this may cause overflow.
+   * <p>
+   * No effect when the roundingPoint equals the current scale.  The current object is returned.
+   * <p>
+   * The name setScale is taken from BigDecimal.setScale -- a better name would have been round.
+   *
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal setScale(
+      int roundingPoint, int roundingMode) {
+    if (fastScale() == roundingPoint) {
+      // No change.
+      return this;
+    }
 
-    if (intDigits > MAX_PRECISION) {
+    // Even if we are just setting the scale when newScale is greater than the current scale,
+    // we need a new object to obey our immutable behavior.
+    HiveDecimal result = new HiveDecimal();
+    if (!fastRound(
+        roundingPoint, roundingMode,
+        result)) {
       return null;
     }
+    return result;
+  }
 
-    int maxScale = Math.min(MAX_SCALE, Math.min(MAX_PRECISION - intDigits, bd.scale()));
-    if (bd.scale() > maxScale ) {
-      if (allowRounding) {
-        bd = bd.setScale(maxScale, RoundingMode.HALF_UP);
-        // Trimming is again necessary, because rounding may introduce new trailing 0's.
-        bd = trim(bd);
-      } else {
-        bd = null;
-      }
+  /**
+   * Return the result of decimal^exponent
+   * <p>
+   * CONSIDER: Currently, negative exponent is not supported.
+   * CONSIDER: Does anybody use this method?
+   *
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal pow(int exponent) {
+    HiveDecimal result = new HiveDecimal(this);
+    if (!fastPow(
+        exponent, result)) {
+      return null;
     }
-
-    return bd;
+    return result;
   }
 
-  private static BigDecimal enforcePrecisionScale(BigDecimal bd, int maxPrecision, int maxScale) {
-    if (bd == null) {
+  /**
+   * Divides this decimal by another decimal and returns a new decimal with the result.
+   *
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal divide(HiveDecimal divisor) {
+    HiveDecimal result = new HiveDecimal();
+    if (!fastDivide(
+        divisor,
+        result)) {
       return null;
     }
+    return result;
+  }
 
-    /**
-     * Specially handling the case that bd=0, and we are converting it to a type where precision=scale,
-     * such as decimal(1, 1).
-     */
-    if (bd.compareTo(BigDecimal.ZERO) == 0 && bd.scale() == 0 && maxPrecision == maxScale) {
-      return bd.setScale(maxScale);
+  /**
+   * Divides this decimal by another decimal and returns a new decimal with the remainder of the
+   * division.
+   * <p>
+   * value is (decimal % divisor)
+   * <p>
+   * The remainder is equivalent to BigDecimal:
+   *    bigDecimalValue().subtract(bigDecimalValue().divideToIntegralValue(divisor).multiply(divisor))
+   *
+   */
+  @HiveDecimalVersionV1
+  public HiveDecimal remainder(HiveDecimal divisor) {
+    HiveDecimal result = new HiveDecimal();
+    if (!fastRemainder(
+        divisor,
+        result)) {
+      return null;
     }
+    return result;
+  }
+
+  //-----------------------------------------------------------------------------------------------
+  // Precision/scale enforcement methods.
+  //-----------------------------------------------------------------------------------------------
 
-    bd = trim(bd);
+  /**
+   * Determine if a decimal fits within a specified maxPrecision and maxScale, and round
+   * off fractional digits if necessary to make the decimal fit.
+   * <p>
+   * The relationship between the enforcement maxPrecision and maxScale is restricted. The
+   * specified maxScale must be less than or equal to the maxPrecision.
+   * <p>
+   * Normally, decimals that result from creation operation, arithmetic operations, etc are
+   * "free range" up to MAX_PRECISION and MAX_SCALE.  Each operation checks if the result decimal
+   * is beyond MAX_PRECISION and MAX_SCALE.  If so the result decimal is rounded off using
+   * ROUND_HALF_UP.  If the round digit is 5 or more, one is added to the lowest remaining digit.
+   * The round digit is the digit just below the round point. Result overflow can occur if a
+   * result decimal's integer portion exceeds MAX_PRECISION.
+   * <p>
+   * This method supports enforcing to a declared Hive DECIMAL's precision/scale.
+   * E.g. DECIMAL(10,4)
+   * <p>
+   * Here are the enforcement/rounding checks of this method:
+   * <p>
+   *   1) Maximum integer digits = maxPrecision - maxScale
+   * <p>
+   *      If the decimal's integer digit count exceeds this, the decimal does not fit (overflow).
+   * <p>
+   *   2) If decimal's scale is greater than maxScale, then excess fractional digits are
+   *      rounded off.  When rounding increases the remaining decimal, it may exceed the
+   *      limits and overflow.
+   * <p>
+   * @param dec
+   * @param maxPrecision
+   * @param maxScale
+   * @return The original decimal if no adjustment is necessary.
+   *         A rounded off decimal if adjustment was necessary.
+   *         Otherwise, null if the decimal doesn't fit within maxPrecision / maxScale or rounding
+   *         caused a result that exceeds the specified limits or MAX_PRECISION integer digits.
+   */
+  @HiveDecimalVersionV1
+  public static HiveDecimal enforcePrecisionScale(
+      HiveDecimal dec, int maxPrecision, int maxScale) {
 
-    if (bd.scale() > maxScale) {
-      bd = bd.setScale(maxScale, RoundingMode.HALF_UP);
+    if (maxPrecision < 1 || maxPrecision > MAX_PRECISION) {
+      throw new IllegalArgumentException(STRING_ENFORCE_PRECISION_OUT_OF_RANGE);
     }
 
-    int maxIntDigits = maxPrecision - maxScale;
-    int intDigits = bd.precision() - bd.scale();
-    if (intDigits > maxIntDigits) {
-      return null;
+    if (maxScale < 0 || maxScale > HiveDecimal.MAX_SCALE) {
+      throw new IllegalArgumentException(STRING_ENFORCE_SCALE_OUT_OF_RANGE);
     }
 
-    return bd;
-  }
+    if (maxPrecision < maxScale) {
+      throw new IllegalArgumentException(STRING_ENFORCE_SCALE_LESS_THAN_EQUAL_PRECISION);
+    }
 
-  public static HiveDecimal enforcePrecisionScale(HiveDecimal dec, int maxPrecision, int maxScale) {
     if (dec == null) {
       return null;
     }
 
-    // Minor optimization, avoiding creating new objects.
-    if (dec.precision() - dec.scale() <= maxPrecision - maxScale &&
-        dec.scale() <= maxScale) {
+    FastCheckPrecisionScaleStatus status =
+        dec.fastCheckPrecisionScale(
+            maxPrecision, maxScale);
+    switch (status) {
+    case NO_CHANGE:
       return dec;
-    }
-
-    BigDecimal bd = enforcePrecisionScale(dec.bd, maxPrecision, maxScale);
-    if (bd == null) {
+    case OVERFLOW:
       return null;
+    case UPDATE_SCALE_DOWN:
+      {
+        HiveDecimal result = new HiveDecimal();
+        if (!dec.fastUpdatePrecisionScale(
+          maxPrecision, maxScale, status,
+          result)) {
+          return null;
+        }
+        return result;
+      }
+    default:
+      throw new RuntimeException("Unknown fast decimal check precision and scale status " + status);
     }
-
-    return HiveDecimal.create(bd);
   }
 
-  public long longValueExact() {
-    return bd.longValueExact();
+  //-----------------------------------------------------------------------------------------------
+  // Validation methods.
+  //-----------------------------------------------------------------------------------------------
+
+  /**
+   * Throws an exception if the current decimal value is invalid.
+   */
+  @HiveDecimalVersionV2
+  public void validate() {
+    if (!fastIsValid()) {
+      fastRaiseInvalidException();
+    }
   }
-}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalV1.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalV1.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalV1.java
new file mode 100644
index 0000000..f99ffee
--- /dev/null
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalV1.java
@@ -0,0 +1,386 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.common.type;
+
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.math.RoundingMode;
+
+/**
+ *
+ * HiveDecimal. Simple wrapper for BigDecimal. Adds fixed max precision and non scientific string
+ * representation
+ *
+ */
+public final class HiveDecimalV1 implements Comparable<HiveDecimalV1> {
+  @HiveDecimalVersionV1
+  public static final int MAX_PRECISION = 38;
+  @HiveDecimalVersionV1
+  public static final int MAX_SCALE = 38;
+
+  /**
+   * Default precision/scale when user doesn't specify in the column metadata, such as
+   * decimal and decimal(8).
+   */
+  @HiveDecimalVersionV1
+  public static final int USER_DEFAULT_PRECISION = 10;
+  @HiveDecimalVersionV1
+  public static final int USER_DEFAULT_SCALE = 0;
+
+  /**
+   *  Default precision/scale when system is not able to determine them, such as in case
+   *  of a non-generic udf.
+   */
+  @HiveDecimalVersionV1
+  public static final int SYSTEM_DEFAULT_PRECISION = 38;
+  @HiveDecimalVersionV1
+  public static final int SYSTEM_DEFAULT_SCALE = 18;
+
+  @HiveDecimalVersionV1
+  public static final HiveDecimalV1 ZERO = new HiveDecimalV1(BigDecimal.ZERO);
+  @HiveDecimalVersionV1
+  public static final HiveDecimalV1 ONE = new HiveDecimalV1(BigDecimal.ONE);
+
+  @HiveDecimalVersionV1
+  public static final int ROUND_FLOOR = BigDecimal.ROUND_FLOOR;
+  @HiveDecimalVersionV1
+  public static final int ROUND_CEILING = BigDecimal.ROUND_CEILING;
+  @HiveDecimalVersionV1
+  public static final int ROUND_HALF_UP = BigDecimal.ROUND_HALF_UP;
+  @HiveDecimalVersionV1
+  public static final int ROUND_HALF_EVEN = BigDecimal.ROUND_HALF_EVEN;
+
+  private BigDecimal bd = BigDecimal.ZERO;
+
+  private HiveDecimalV1(BigDecimal bd) {
+    this.bd = bd;
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(BigDecimal b) {
+    return create(b, true);
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(BigDecimal b, boolean allowRounding) {
+    BigDecimal bd = normalize(b, allowRounding);
+    return bd == null ? null : new HiveDecimalV1(bd);
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(BigInteger unscaled, int scale) {
+    BigDecimal bd = normalize(new BigDecimal(unscaled, scale), true);
+    return bd == null ? null : new HiveDecimalV1(bd);
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(String dec) {
+    BigDecimal bd;
+    try {
+      bd = new BigDecimal(dec.trim());
+    } catch (NumberFormatException ex) {
+      return null;
+    }
+    bd = normalize(bd, true);
+    return bd == null ? null : new HiveDecimalV1(bd);
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(BigInteger bi) {
+    BigDecimal bd = normalize(new BigDecimal(bi), true);
+    return bd == null ? null : new HiveDecimalV1(bd);
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(int i) {
+    return new HiveDecimalV1(new BigDecimal(i));
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 create(long l) {
+    return new HiveDecimalV1(new BigDecimal(l));
+  }
+
+  @HiveDecimalVersionV1
+  @Override
+  public String toString() {
+     return bd.toPlainString();
+  }
+  
+  /**
+   * Return a string representation of the number with the number of decimal digits as
+   * the given scale. Please note that this is different from toString().
+   * @param scale the number of digits after the decimal point
+   * @return the string representation of exact number of decimal digits
+   */
+  @HiveDecimalVersionV1
+  public String toFormatString(int scale) {
+    return (bd.scale() == scale ? bd :
+      bd.setScale(scale, RoundingMode.HALF_UP)).toPlainString();
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 setScale(int i) {
+    return new HiveDecimalV1(bd.setScale(i, RoundingMode.HALF_UP));
+  }
+
+  @HiveDecimalVersionV1
+  @Override
+  public int compareTo(HiveDecimalV1 dec) {
+    return bd.compareTo(dec.bd);
+  }
+
+  @HiveDecimalVersionV1
+  @Override
+  public int hashCode() {
+    return bd.hashCode();
+  }
+
+  @HiveDecimalVersionV1
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == null || obj.getClass() != getClass()) {
+      return false;
+    }
+    return bd.equals(((HiveDecimalV1) obj).bd);
+  }
+
+  @HiveDecimalVersionV1
+  public int scale() {
+    return bd.scale();
+  }
+
+  /**
+   * Returns the number of digits (integer and fractional) in the number, which is equivalent
+   * to SQL decimal precision. Note that this is different from BigDecimal.precision(),
+   * which returns the precision of the unscaled value (BigDecimal.valueOf(0.01).precision() = 1,
+   * whereas HiveDecimal.create("0.01").precision() = 2).
+   * If you want the BigDecimal precision, use HiveDecimal.bigDecimalValue().precision()
+   * @return
+   */
+  @HiveDecimalVersionV1
+  public int precision() {
+    int bdPrecision = bd.precision();
+    int bdScale = bd.scale();
+
+    if (bdPrecision < bdScale) {
+      // This can happen for numbers less than 0.1
+      // For 0.001234: bdPrecision=4, bdScale=6
+      // In this case, we'll set the type to have the same precision as the scale.
+      return bdScale;
+    }
+    return bdPrecision;
+  }
+
+  /** Note - this method will corrupt the value if it doesn't fit. */
+  @HiveDecimalVersionV1
+  public int intValue() {
+    return bd.intValue();
+  }
+
+  @HiveDecimalVersionV1
+  public double doubleValue() {
+    return bd.doubleValue();
+  }
+
+  /** Note - this method will corrupt the value if it doesn't fit. */
+  @HiveDecimalVersionV1
+  public long longValue() {
+    return bd.longValue();
+  }
+
+  /** Note - this method will corrupt the value if it doesn't fit. */
+  @HiveDecimalVersionV1
+  public short shortValue() {
+    return bd.shortValue();
+  }
+
+  @HiveDecimalVersionV1
+  public float floatValue() {
+    return bd.floatValue();
+  }
+
+  @HiveDecimalVersionV1
+  public BigDecimal bigDecimalValue() {
+    return bd;
+  }
+
+  @HiveDecimalVersionV1
+  public byte byteValue() {
+    return bd.byteValue();
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 setScale(int adjustedScale, int rm) {
+    return create(bd.setScale(adjustedScale, rm));
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 subtract(HiveDecimalV1 dec) {
+    return create(bd.subtract(dec.bd));
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 multiply(HiveDecimalV1 dec) {
+    return create(bd.multiply(dec.bd), false);
+  }
+
+  @HiveDecimalVersionV1
+  public BigInteger unscaledValue() {
+    return bd.unscaledValue();
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 scaleByPowerOfTen(int n) {
+    return create(bd.scaleByPowerOfTen(n));
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 abs() {
+    return create(bd.abs());
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 negate() {
+    return create(bd.negate());
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 add(HiveDecimalV1 dec) {
+    return create(bd.add(dec.bd));
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 pow(int n) {
+    BigDecimal result = normalize(bd.pow(n), false);
+    return result == null ? null : new HiveDecimalV1(result);
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 remainder(HiveDecimalV1 dec) {
+    return create(bd.remainder(dec.bd));
+  }
+
+  @HiveDecimalVersionV1
+  public HiveDecimalV1 divide(HiveDecimalV1 dec) {
+    return create(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP), true);
+  }
+
+  /**
+   * Get the sign of the underlying decimal.
+   * @return 0 if the decimal is equal to 0, -1 if less than zero, and 1 if greater than 0
+   */
+  @HiveDecimalVersionV1
+  public int signum() {
+    return bd.signum();
+  }
+
+  private static BigDecimal trim(BigDecimal d) {
+    if (d.compareTo(BigDecimal.ZERO) == 0) {
+      // Special case for 0, because java doesn't strip zeros correctly on that number.
+      d = BigDecimal.ZERO;
+    } else {
+      d = d.stripTrailingZeros();
+      if (d.scale() < 0) {
+        // no negative scale decimals
+        d = d.setScale(0);
+      }
+    }
+    return d;
+  }
+
+  private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) {
+    if (bd == null) {
+      return null;
+    }
+
+    bd = trim(bd);
+
+    int intDigits = bd.precision() - bd.scale();
+
+    if (intDigits > MAX_PRECISION) {
+      return null;
+    }
+
+    int maxScale = Math.min(MAX_SCALE, Math.min(MAX_PRECISION - intDigits, bd.scale()));
+    if (bd.scale() > maxScale ) {
+      if (allowRounding) {
+        bd = bd.setScale(maxScale, RoundingMode.HALF_UP);
+        // Trimming is again necessary, because rounding may introduce new trailing 0's.
+        bd = trim(bd);
+      } else {
+        bd = null;
+      }
+    }
+
+    return bd;
+  }
+
+  private static BigDecimal enforcePrecisionScale(BigDecimal bd, int maxPrecision, int maxScale) {
+    if (bd == null) {
+      return null;
+    }
+
+    /**
+     * Specially handling the case that bd=0, and we are converting it to a type where precision=scale,
+     * such as decimal(1, 1).
+     */
+    if (bd.compareTo(BigDecimal.ZERO) == 0 && bd.scale() == 0 && maxPrecision == maxScale) {
+      return bd.setScale(maxScale);
+    }
+
+    bd = trim(bd);
+
+    if (bd.scale() > maxScale) {
+      bd = bd.setScale(maxScale, RoundingMode.HALF_UP);
+    }
+
+    int maxIntDigits = maxPrecision - maxScale;
+    int intDigits = bd.precision() - bd.scale();
+    if (intDigits > maxIntDigits) {
+      return null;
+    }
+
+    return bd;
+  }
+
+  @HiveDecimalVersionV1
+  public static HiveDecimalV1 enforcePrecisionScale(HiveDecimalV1 dec, int maxPrecision, int maxScale) {
+    if (dec == null) {
+      return null;
+    }
+
+    // Minor optimization, avoiding creating new objects.
+    if (dec.precision() - dec.scale() <= maxPrecision - maxScale &&
+        dec.scale() <= maxScale) {
+      return dec;
+    }
+
+    BigDecimal bd = enforcePrecisionScale(dec.bd, maxPrecision, maxScale);
+    if (bd == null) {
+      return null;
+    }
+
+    return HiveDecimalV1.create(bd);
+  }
+
+  @HiveDecimalVersionV1
+  public long longValueExact() {
+    return bd.longValueExact();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV1.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV1.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV1.java
new file mode 100644
index 0000000..82b769a
--- /dev/null
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV1.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.type;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+/**
+ * Marks methods including static methods and fields as being part of version 1 HiveDecimal.
+ *
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+public @interface HiveDecimalVersionV1 {
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV2.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV2.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV2.java
new file mode 100644
index 0000000..a47513e
--- /dev/null
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/HiveDecimalVersionV2.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.common.type;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+
+/**
+ * Marks methods including static methods and fields as being part of version 2 HiveDecimal.
+ *
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+public @interface HiveDecimalVersionV2 {
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java b/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java
index 53a7823..8d950a2 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/common/type/RandomTypeUtil.java
@@ -57,19 +57,7 @@ public class RandomTypeUtil {
 
   private static final String DECIMAL_CHARS = "0123456789";
 
-  public static class HiveDecimalAndPrecisionScale {
-    public HiveDecimal hiveDecimal;
-    public int precision;
-    public int scale;
-
-    HiveDecimalAndPrecisionScale(HiveDecimal hiveDecimal, int precision, int scale) {
-      this.hiveDecimal = hiveDecimal;
-      this.precision = precision;
-      this.scale = scale;
-    }
-  }
-
-  public static HiveDecimalAndPrecisionScale getRandHiveDecimal(Random r) {
+  public static HiveDecimal getRandHiveDecimal(Random r) {
     int precision;
     int scale;
     while (true) {
@@ -93,18 +81,7 @@ public class RandomTypeUtil {
         sb.append(getRandString(r, DECIMAL_CHARS, scale));
       }
 
-      HiveDecimal bd = HiveDecimal.create(sb.toString());
-      precision = bd.precision();
-      scale = bd.scale();
-      if (scale > precision) {
-        // Sometimes weird decimals are produced?
-        continue;
-      }
-
-      // For now, punt.
-      precision = HiveDecimal.SYSTEM_DEFAULT_PRECISION;
-      scale = HiveDecimal.SYSTEM_DEFAULT_SCALE;
-      return new HiveDecimalAndPrecisionScale(bd, precision, scale);
+      return HiveDecimal.create(sb.toString());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
index 2488631..e4f8d82 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/DecimalColumnVector.java
@@ -17,10 +17,12 @@
  */
 
 package org.apache.hadoop.hive.ql.exec.vector;
+
 import java.math.BigInteger;
 
 import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.FastHiveDecimal;
 
 public class DecimalColumnVector extends ColumnVector {
 
@@ -45,7 +47,7 @@ public class DecimalColumnVector extends ColumnVector {
     this.scale = (short) scale;
     vector = new HiveDecimalWritable[size];
     for (int i = 0; i < size; i++) {
-      vector[i] = new HiveDecimalWritable(HiveDecimal.ZERO);
+      vector[i] = new HiveDecimalWritable(0);  // Initially zero.
     }
   }
 
@@ -71,15 +73,14 @@ public class DecimalColumnVector extends ColumnVector {
       inputElementNum = 0;
     }
     if (inputVector.noNulls || !inputVector.isNull[inputElementNum]) {
-      HiveDecimal hiveDec =
-          ((DecimalColumnVector) inputVector).vector[inputElementNum]
-              .getHiveDecimal(precision, scale);
-      if (hiveDec == null) {
+      vector[outElementNum].set(
+          ((DecimalColumnVector) inputVector).vector[inputElementNum],
+          precision, scale);
+      if (!vector[outElementNum].isSet()) {
         isNull[outElementNum] = true;
         noNulls = false;
       } else {
         isNull[outElementNum] = false;
-        vector[outElementNum].set(hiveDec);
       }
     } else {
       isNull[outElementNum] = true;
@@ -100,34 +101,28 @@ public class DecimalColumnVector extends ColumnVector {
   }
 
   public void set(int elementNum, HiveDecimalWritable writeable) {
-    if (writeable == null) {
+    vector[elementNum].set(writeable, precision, scale);
+    if (!vector[elementNum].isSet()) {
       noNulls = false;
       isNull[elementNum] = true;
     } else {
-      HiveDecimal hiveDec = writeable.getHiveDecimal(precision, scale);
-      if (hiveDec == null) {
-        noNulls = false;
-        isNull[elementNum] = true;
-      } else {
-        vector[elementNum].set(hiveDec);
-      }
+      isNull[elementNum] = false;
     }
   }
 
   public void set(int elementNum, HiveDecimal hiveDec) {
-    HiveDecimal checkedDec = HiveDecimal.enforcePrecisionScale(hiveDec, precision, scale);
-    if (checkedDec == null) {
+    vector[elementNum].set(hiveDec, precision, scale);
+    if (!vector[elementNum].isSet()) {
       noNulls = false;
       isNull[elementNum] = true;
     } else {
-      vector[elementNum].set(checkedDec);
+      isNull[elementNum] = false;
     }
   }
 
   public void setNullDataValue(int elementNum) {
     // E.g. For scale 2 the minimum is "0.01"
-    HiveDecimal minimumNonZeroValue = HiveDecimal.create(BigInteger.ONE, scale);
-    vector[elementNum].set(minimumNonZeroValue);
+    vector[elementNum].setFromLongAndScale(1L, scale);
   }
 
   @Override
@@ -144,7 +139,7 @@ public class DecimalColumnVector extends ColumnVector {
       System.arraycopy(oldArray, 0, vector, 0 , oldArray.length);
     }
     for (int i = initPos; i < vector.length; ++i) {
-      vector[i] = new HiveDecimalWritable(HiveDecimal.ZERO);
+      vector[i] = new HiveDecimalWritable(0);  // Initially zero.
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hive/blob/4ba713cc/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
----------------------------------------------------------------------
diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java b/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
index 41db9ca..c16d67e 100644
--- a/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
+++ b/storage-api/src/java/org/apache/hadoop/hive/ql/util/TimestampUtils.java
@@ -19,6 +19,8 @@
 package org.apache.hadoop.hive.ql.util;
 
 import org.apache.hadoop.hive.common.type.HiveDecimal;
+import org.apache.hadoop.hive.common.type.HiveDecimalV1;
+import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable;
 
 import java.math.BigDecimal;
 import java.sql.Timestamp;
@@ -68,9 +70,86 @@ public class TimestampUtils {
     }
   }
 
-  public static Timestamp decimalToTimestamp(HiveDecimal d) {
+  /**
+   * Take a HiveDecimal and return the timestamp representation where the fraction part is the
+   * nanoseconds and integer part is the number of seconds.
+   * @param dec
+   * @return
+   */
+  public static Timestamp decimalToTimestamp(HiveDecimal dec) {
+
+    HiveDecimalWritable nanosWritable = new HiveDecimalWritable(dec);
+    nanosWritable.mutateFractionPortion();               // Clip off seconds portion.
+    nanosWritable.mutateScaleByPowerOfTen(9);            // Bring nanoseconds into integer portion.
+    if (!nanosWritable.isSet() || !nanosWritable.isInt()) {
+      return null;
+    }
+    int nanos = nanosWritable.intValue();
+    if (nanos < 0) {
+      nanos += 1000000000;
+    }
+    nanosWritable.setFromLong(nanos);
+
+    HiveDecimalWritable nanoInstant = new HiveDecimalWritable(dec);
+    nanoInstant.mutateScaleByPowerOfTen(9);
+
+    nanoInstant.mutateSubtract(nanosWritable);
+    nanoInstant.mutateScaleByPowerOfTen(-9);              // Back to seconds.
+    if (!nanoInstant.isSet() || !nanoInstant.isLong()) {
+      return null;
+    }
+    long seconds = nanoInstant.longValue();
+    Timestamp t = new Timestamp(seconds * 1000);
+    t.setNanos(nanos);
+    return t;
+  }
+
+  /**
+   * Take a HiveDecimalWritable and return the timestamp representation where the fraction part
+   * is the nanoseconds and integer part is the number of seconds.
+   *
+   * This is a HiveDecimalWritable variation with supplied scratch objects.
+   * @param decdecWritable
+   * @param scratchDecWritable1
+   * @param scratchDecWritable2
+   * @return
+   */
+  public static Timestamp decimalToTimestamp(
+      HiveDecimalWritable decWritable,
+      HiveDecimalWritable scratchDecWritable1, HiveDecimalWritable scratchDecWritable2) {
+
+    HiveDecimalWritable nanosWritable = scratchDecWritable1;
+    nanosWritable.set(decWritable);
+    nanosWritable.mutateFractionPortion();               // Clip off seconds portion.
+    nanosWritable.mutateScaleByPowerOfTen(9);            // Bring nanoseconds into integer portion.
+    if (!nanosWritable.isSet() || !nanosWritable.isInt()) {
+      return null;
+    }
+    int nanos = nanosWritable.intValue();
+    if (nanos < 0) {
+      nanos += 1000000000;
+    }
+    nanosWritable.setFromLong(nanos);
+
+    HiveDecimalWritable nanoInstant = scratchDecWritable2;
+    nanoInstant.set(decWritable);
+    nanoInstant.mutateScaleByPowerOfTen(9);
+
+    nanoInstant.mutateSubtract(nanosWritable);
+    nanoInstant.mutateScaleByPowerOfTen(-9);              // Back to seconds.
+    if (!nanoInstant.isSet() || !nanoInstant.isLong()) {
+      return null;
+    }
+    long seconds = nanoInstant.longValue();
+
+    Timestamp timestamp = new Timestamp(seconds * 1000L);
+    timestamp.setNanos(nanos);
+    return timestamp;
+  }
+
+  public static Timestamp decimalToTimestamp(HiveDecimalV1 dec) {
     try {
-      BigDecimal nanoInstant = d.bigDecimalValue().multiply(BILLION_BIG_DECIMAL);
+      BigDecimal nanoInstant = dec.bigDecimalValue().multiply(BILLION_BIG_DECIMAL);
       int nanos = nanoInstant.remainder(BILLION_BIG_DECIMAL).intValue();
       if (nanos < 0) {
         nanos += 1000000000;