You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@drill.apache.org by pr...@apache.org on 2017/12/21 05:19:34 UTC
[07/15] drill git commit: DRILL-5657: Size-aware vector writer
structure
http://git-wip-us.apache.org/repos/asf/drill/blob/40de8ca4/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestOffsetVectorWriter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestOffsetVectorWriter.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestOffsetVectorWriter.java
new file mode 100644
index 0000000..82d4d08
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestOffsetVectorWriter.java
@@ -0,0 +1,425 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test.rowSet.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.vector.UInt4Vector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter.ColumnWriterListener;
+import org.apache.drill.exec.vector.accessor.ValueType;
+import org.apache.drill.exec.vector.accessor.writer.OffsetVectorWriter;
+import org.apache.drill.test.SubOperatorTest;
+import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.apache.drill.test.rowSet.test.TestFixedWidthWriter.TestIndex;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import io.netty.buffer.DrillBuf;
+
+/**
+ * The offset vector writer is unique: it follows the same API as
+ * the other writers, but has a unique twist because offsets are written
+ * into the slot one after the other vectors. That is, if we are writing
+ * row 5, the offset vector writer writes to position 6. This is done to
+ * write the end offset of row 5 as the start offset of row 6. (It does,
+ * however, waste space as we need twice the number of elements in the
+ * offset vector as other vectors when writing power-of-two record
+ * counts.)
+ */
+
+public class TestOffsetVectorWriter extends SubOperatorTest {
+
+ /**
+ * Party over enough memory that the uninitialized nature of
+ * vectors under the new writers will cause test to fail if
+ * the writer's don't correctly fill in all values.
+ */
+
+ @BeforeClass
+ public static void setup() {
+ DrillBuf bufs[] = new DrillBuf[100];
+ for (int i = 0; i < bufs.length; i++) {
+ bufs[i] = fixture.allocator().buffer(ValueVector.MAX_BUFFER_SIZE);
+ for (int j = 0; j < ValueVector.MAX_BUFFER_SIZE; j++) {
+ bufs[i].setByte(j, (byte) (j & 0x7f));
+ }
+ }
+ for (int i = 0; i < bufs.length; i++) {
+ bufs[i].close();
+ }
+ }
+
+ /**
+ * Basic test to write a contiguous set of offsets, enough to cause
+ * the vector to double in size twice, then read back the values.
+ */
+
+ @Test
+ public void testWrite() {
+ try (UInt4Vector vector = allocVector(1000)) {
+
+ TestIndex index = new TestIndex();
+ OffsetVectorWriter writer = makeWriter(vector, index);
+
+ // Start write sets initial position to 0.
+
+ writer.startWrite();
+ assertEquals(0, vector.getAccessor().get(0));
+
+ // Pretend to write offsets for values of width 10. We write
+ // the end position of each field.
+ // Write enough that the vector is resized.
+
+ long origAddr = vector.getBuffer().addr();
+ for (int i = 0; i < 3000; i++) {
+ index.index = i;
+ writer.startRow();
+ assertEquals(i * 10, writer.nextOffset());
+ writer.setNextOffset((i+1) * 10);
+ assertEquals((i+1) * 10, writer.nextOffset());
+ writer.saveRow();
+ }
+ writer.endWrite();
+
+ // Should have been reallocated.
+
+ assertNotEquals(origAddr, vector.getBuffer().addr());
+
+ // Verify values
+
+ for (int i = 0; i < 3001; i++) {
+ assertEquals(i * 10, vector.getAccessor().get(i));
+ }
+ }
+ }
+
+ @Test
+ public void testRestartRow() {
+ try (UInt4Vector vector = allocVector(1000)) {
+
+ TestIndex index = new TestIndex();
+ OffsetVectorWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Write rows, rewriting every other row.
+
+ writer.startRow();
+ index.index = 0;
+ for (int i = 0; i < 50; i++) {
+ if (i % 2 == 0) {
+ assertEquals(i == 0 ? 0 : (i - 1) * 10, writer.nextOffset());
+ writer.setNextOffset((i + 1) * 10);
+ writer.saveRow();
+ writer.startRow();
+ index.index++;
+ } else {
+ writer.setNextOffset((i + 1) * 10);
+ writer.restartRow();
+ }
+ }
+ writer.endWrite();
+
+ // Verify values
+
+ assertEquals(0, vector.getAccessor().get(0));
+ for (int i = 1; i < 25; i++) {
+ assertEquals((2 * i - 1) * 10, vector.getAccessor().get(i));
+ }
+ }
+ }
+
+
+ /**
+ * Offset vectors have specific behavior when back-filling missing values:
+ * the last offset must be carried forward into the missing slots. The
+ * slots cannot be zero-filled, or entries will end up with a negative
+ * length.
+ */
+
+ @Test
+ public void testFillEmpties() {
+ try (UInt4Vector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ OffsetVectorWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Pretend to write offsets for values of width 10, but
+ // skip four out of five values, forcing backfill.
+ // The loop will cause the vector to double in size.
+ // The number of values is odd, forcing the writer to
+ // back-fill at the end as well as between values.
+
+ long origAddr = vector.getBuffer().addr();
+ for (int i = 5; i < 3001; i += 5) {
+ index.index = i;
+ writer.startRow();
+ int startOffset = writer.nextOffset();
+ assertEquals((i/5 - 1) * 10, startOffset);
+ writer.setNextOffset(startOffset + 10);
+ writer.saveRow();
+ }
+ index.index = 3003;
+ writer.endWrite();
+
+ // Should have been reallocated.
+
+ assertNotEquals(origAddr, vector.getBuffer().addr());
+
+ // Verify values
+
+ for (int i = 0; i < 3004; i++) {
+ assertEquals(((i-1)/5) * 10, vector.getAccessor().get(i));
+ }
+ }
+ }
+
+ /**
+ * The rollover method is used during vector overflow.
+ */
+
+ @Test
+ public void testRollover() {
+ try (UInt4Vector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ OffsetVectorWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Simulate doing an overflow of ten values.
+
+ for (int i = 0; i < 10; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setNextOffset((i+1) * 10);
+ writer.saveRow();
+ }
+
+ // Overflow occurs after writing the 11th row
+
+ index.index = 10;
+ writer.startRow();
+ writer.setNextOffset(110);
+
+ // Overflow occurs
+
+ writer.preRollover();
+
+ // Simulate rollover
+
+ for (int i = 0; i < 15; i++) {
+ vector.getMutator().set(i, 0xdeadbeef);
+ }
+
+ // Simulate shifting the last value down (which changes
+ // the offset.)
+
+ vector.getMutator().set(1, 10);
+
+ // Post rollover, slot 0 should be initialized
+
+ writer.postRollover();
+ index.index = 0;
+ writer.saveRow();
+
+ // Simulate resuming with a few more values.
+
+ for (int i = 1; i < 5; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setNextOffset((i + 1) * 10);
+ writer.saveRow();
+ }
+ writer.endWrite();
+
+ // Verify the results
+
+ for (int i = 0; i < 6; i++) {
+ assertEquals(i * 10, vector.getAccessor().get(i));
+ }
+ }
+ }
+
+ /**
+ * Simulate the case in which the tail end of an overflow
+ * batch has empties. <tt>preRollover()</tt> should back-fill
+ * them with the next offset prior to rollover.
+ */
+
+ @Test
+ public void testRolloverWithEmpties() {
+ try (UInt4Vector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ OffsetVectorWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Simulate doing an overflow of 15 values,
+ // of which 5 are empty.
+
+ for (int i = 0; i < 10; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setNextOffset((i+1) * 10);
+ writer.saveRow();
+ }
+
+ for (int i = 10; i < 15; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.saveRow();
+ }
+
+ // Overflow occurs before writing the 16th row
+
+ index.index = 15;
+ writer.startRow();
+
+ // Overflow occurs. This should fill empty offsets.
+
+ writer.preRollover();
+
+ // Verify the first "batch" results
+
+ for (int i = 0; i < 11; i++) {
+ assertEquals(i * 10, vector.getAccessor().get(i));
+ }
+ for (int i = 11; i < 16; i++) {
+ assertEquals("i = " + i, 100, vector.getAccessor().get(i));
+ }
+
+ // Simulate rollover
+
+ for (int i = 0; i < 20; i++) {
+ vector.getMutator().set(i, 0xdeadbeef);
+ }
+
+ // Post rollover, slot 0 should be initialized.
+ // This is a rollover. This row must set the value
+ // for the new row 0 (which was presumably set/filled
+ // after the overflow.)
+
+ writer.postRollover();
+ index.index = 0;
+ writer.setNextOffset(0);
+ writer.saveRow();
+
+ // Skip more values.
+
+ for (int i = 1; i < 5; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.saveRow();
+ }
+
+ // Simulate resuming with a few more values.
+
+ for (int i = 5; i < 10; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setNextOffset((i - 4) * 10);
+ writer.saveRow();
+ }
+ writer.endWrite();
+
+ // Verify the results
+
+ for (int i = 0; i < 6; i++) {
+ assertEquals(0, vector.getAccessor().get(i));
+ }
+ for (int i = 6; i < 11; i++) {
+ assertEquals((i - 5) * 10, vector.getAccessor().get(i));
+ }
+ }
+ }
+
+ /**
+ * Test resize monitoring. Add a listener to an offsets writer,
+ * capture each resize, and refuse a resize when the number
+ * of ints exceeds 8K values. This will trigger an overflow,
+ * which will throw an exception which we then check for.
+ */
+
+ @Test
+ public void testSizeLimit() {
+ try (UInt4Vector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ OffsetVectorWriter writer = makeWriter(vector, index);
+ writer.bindListener(new ColumnWriterListener() {
+ int totalAlloc = 4096;
+
+ @Override
+ public void overflowed(ScalarWriter writer) {
+ throw new IllegalStateException("overflow called");
+ }
+
+ @Override
+ public boolean canExpand(ScalarWriter writer, int delta) {
+// System.out.println("Delta: " + delta);
+ totalAlloc += delta;
+ return totalAlloc < 16_384 * 4;
+ }
+ });
+ writer.startWrite();
+ try {
+ for (int i = 0; ; i++ ) {
+ index.index = i;
+ writer.startRow();
+ writer.setNextOffset(i);
+ writer.saveRow();
+ }
+ }
+ catch(IllegalStateException e) {
+ assertTrue(e.getMessage().contains("overflow called"));
+ }
+
+ // Should have failed on 8191, which doubled vector
+ // to 16K, which was rejected. Note the 8191 value,
+ // because offsets are one ahead of the index.
+
+ assertEquals(8191, index.index);
+ }
+ }
+
+ private UInt4Vector allocVector(int size) {
+ MaterializedField field = SchemaBuilder.columnSchema("x", MinorType.UINT4,
+ DataMode.REQUIRED);
+ UInt4Vector vector = new UInt4Vector(field, fixture.allocator());
+ vector.allocateNew(size);
+
+ // Party on the bytes of the vector so we start dirty
+
+ for (int i = 0; i < size; i++) {
+ vector.getMutator().set(i, 0xdeadbeef);
+ }
+ assertNotEquals(0, vector.getAccessor().get(0));
+ return vector;
+ }
+
+ private OffsetVectorWriter makeWriter(UInt4Vector vector, TestIndex index) {
+ OffsetVectorWriter writer = new OffsetVectorWriter(vector);
+ writer.bindIndex(index);
+
+ assertEquals(ValueType.INTEGER, writer.valueType());
+ return writer;
+ }
+}
http://git-wip-us.apache.org/repos/asf/drill/blob/40de8ca4/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestScalarAccessors.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestScalarAccessors.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestScalarAccessors.java
new file mode 100644
index 0000000..939377a
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestScalarAccessors.java
@@ -0,0 +1,1266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test.rowSet.test;
+
+import static org.junit.Assert.*;
+
+import java.math.BigDecimal;
+import java.util.Arrays;
+
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MajorType;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.record.BatchSchema;
+import org.apache.drill.exec.vector.accessor.ScalarElementReader;
+import org.apache.drill.exec.vector.accessor.ScalarReader;
+import org.apache.drill.exec.vector.accessor.ValueType;
+import org.apache.drill.test.SubOperatorTest;
+import org.apache.drill.test.rowSet.RowSetReader;
+import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.joda.time.Period;
+import org.apache.drill.test.rowSet.RowSet.SingleRowSet;
+import org.junit.Test;
+
+/**
+ * Verify that simple scalar (non-repeated) column readers
+ * and writers work as expected. The focus is on the generated
+ * and type-specific functions for each type.
+ */
+
+// The following types are not fully supported in Drill
+// TODO: Var16Char
+// TODO: Bit
+// TODO: Decimal28Sparse
+// TODO: Decimal38Sparse
+
+public class TestScalarAccessors extends SubOperatorTest {
+
+ @Test
+ public void testTinyIntRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.TINYINT)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(0)
+ .addRow(Byte.MAX_VALUE)
+ .addRow(Byte.MIN_VALUE)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.INTEGER, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, colReader.getInt());
+
+ assertTrue(reader.next());
+ assertEquals(Byte.MAX_VALUE, colReader.getInt());
+ assertEquals((int) Byte.MAX_VALUE, colReader.getObject());
+ assertEquals(Byte.toString(Byte.MAX_VALUE), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(Byte.MIN_VALUE, colReader.getInt());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ private void nullableIntTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(10)
+ .addSingleCol(null)
+ .addRow(30)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(10, colReader.getInt());
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+ // Data value is undefined, may be garbage
+
+ assertTrue(reader.next());
+ assertEquals(30, colReader.getInt());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableTinyInt() {
+ nullableIntTester(MinorType.TINYINT);
+ }
+
+ private void intArrayTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new int[] {})
+ .addSingleCol(new int[] {0, 20, 30})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.INTEGER, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(0, colReader.getInt(0));
+ assertEquals(20, colReader.getInt(1));
+ assertEquals(30, colReader.getInt(2));
+ assertEquals(0, colReader.getObject(0));
+ assertEquals(20, colReader.getObject(1));
+ assertEquals(30, colReader.getObject(2));
+ assertEquals("0", colReader.getAsString(0));
+ assertEquals("20", colReader.getAsString(1));
+ assertEquals("30", colReader.getAsString(2));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testTinyIntArray() {
+ intArrayTester(MinorType.TINYINT);
+ }
+
+ @Test
+ public void testSmallIntRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.SMALLINT)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(0)
+ .addRow(Short.MAX_VALUE)
+ .addRow(Short.MIN_VALUE)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.INTEGER, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, colReader.getInt());
+
+ assertTrue(reader.next());
+ assertEquals(Short.MAX_VALUE, colReader.getInt());
+ assertEquals((int) Short.MAX_VALUE, colReader.getObject());
+ assertEquals(Short.toString(Short.MAX_VALUE), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(Short.MIN_VALUE, colReader.getInt());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableSmallInt() {
+ nullableIntTester(MinorType.SMALLINT);
+ }
+
+ @Test
+ public void testSmallArray() {
+ intArrayTester(MinorType.SMALLINT);
+ }
+
+ @Test
+ public void testIntRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.INT)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(0)
+ .addRow(Integer.MAX_VALUE)
+ .addRow(Integer.MIN_VALUE)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.INTEGER, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, reader.scalar(0).getInt());
+
+ assertTrue(reader.next());
+ assertEquals(Integer.MAX_VALUE, colReader.getInt());
+ assertEquals(Integer.MAX_VALUE, colReader.getObject());
+ assertEquals(Integer.toString(Integer.MAX_VALUE), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(Integer.MIN_VALUE, colReader.getInt());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableInt() {
+ nullableIntTester(MinorType.INT);
+ }
+
+ @Test
+ public void testIntArray() {
+ intArrayTester(MinorType.INT);
+ }
+
+ private void longRWTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(0L)
+ .addRow(Long.MAX_VALUE)
+ .addRow(Long.MIN_VALUE)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.LONG, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, colReader.getLong());
+
+ assertTrue(reader.next());
+ assertEquals(Long.MAX_VALUE, colReader.getLong());
+ assertEquals(Long.MAX_VALUE, colReader.getObject());
+ assertEquals(Long.toString(Long.MAX_VALUE), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(Long.MIN_VALUE, colReader.getLong());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testLongRW() {
+ longRWTester(MinorType.BIGINT);
+ }
+
+ private void nullableLongTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(10L)
+ .addSingleCol(null)
+ .addRow(30L)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(10, colReader.getLong());
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+ // Data value is undefined, may be garbage
+
+ assertTrue(reader.next());
+ assertEquals(30, colReader.getLong());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableLong() {
+ nullableLongTester(MinorType.BIGINT);
+ }
+
+ private void longArrayTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new long[] {})
+ .addSingleCol(new long[] {0, 20, 30})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.LONG, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(0, colReader.getLong(0));
+ assertEquals(20, colReader.getLong(1));
+ assertEquals(30, colReader.getLong(2));
+ assertEquals(0L, colReader.getObject(0));
+ assertEquals(20L, colReader.getObject(1));
+ assertEquals(30L, colReader.getObject(2));
+ assertEquals("0", colReader.getAsString(0));
+ assertEquals("20", colReader.getAsString(1));
+ assertEquals("30", colReader.getAsString(2));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testLongArray() {
+ longArrayTester(MinorType.BIGINT);
+ }
+
+ @Test
+ public void testFloatRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.FLOAT4)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(0F)
+ .addRow(Float.MAX_VALUE)
+ .addRow(Float.MIN_VALUE)
+ .addRow(100F)
+ .build();
+ assertEquals(4, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.DOUBLE, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, colReader.getDouble(), 0.000001);
+
+ assertTrue(reader.next());
+ assertEquals(Float.MAX_VALUE, colReader.getDouble(), 0.000001);
+ assertEquals((double) Float.MAX_VALUE, (double) colReader.getObject(), 0.000001);
+
+ assertTrue(reader.next());
+ assertEquals(Float.MIN_VALUE, colReader.getDouble(), 0.000001);
+
+ assertTrue(reader.next());
+ assertEquals(100, colReader.getDouble(), 0.000001);
+ assertEquals("100.0", colReader.getAsString());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ private void nullableDoubleTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(10F)
+ .addSingleCol(null)
+ .addRow(30F)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(10, colReader.getDouble(), 0.000001);
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+ // Data value is undefined, may be garbage
+
+ assertTrue(reader.next());
+ assertEquals(30, colReader.getDouble(), 0.000001);
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableFloat() {
+ nullableDoubleTester(MinorType.FLOAT4);
+ }
+
+ private void doubleArrayTester(MinorType type) {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", type)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new double[] {})
+ .addSingleCol(new double[] {0, 20.5, 30.0})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.DOUBLE, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(0, colReader.getDouble(0), 0.00001);
+ assertEquals(20.5, colReader.getDouble(1), 0.00001);
+ assertEquals(30.0, colReader.getDouble(2), 0.00001);
+ assertEquals(0, (double) colReader.getObject(0), 0.00001);
+ assertEquals(20.5, (double) colReader.getObject(1), 0.00001);
+ assertEquals(30.0, (double) colReader.getObject(2), 0.00001);
+ assertEquals("0.0", colReader.getAsString(0));
+ assertEquals("20.5", colReader.getAsString(1));
+ assertEquals("30.0", colReader.getAsString(2));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testFloatArray() {
+ doubleArrayTester(MinorType.FLOAT4);
+ }
+
+ @Test
+ public void testDoubleRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.FLOAT8)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(0D)
+ .addRow(Double.MAX_VALUE)
+ .addRow(Double.MIN_VALUE)
+ .addRow(100D)
+ .build();
+ assertEquals(4, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.DOUBLE, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, colReader.getDouble(), 0.000001);
+
+ assertTrue(reader.next());
+ assertEquals(Double.MAX_VALUE, colReader.getDouble(), 0.000001);
+ assertEquals(Double.MAX_VALUE, (double) colReader.getObject(), 0.000001);
+
+ assertTrue(reader.next());
+ assertEquals(Double.MIN_VALUE, colReader.getDouble(), 0.000001);
+
+ assertTrue(reader.next());
+ assertEquals(100, colReader.getDouble(), 0.000001);
+ assertEquals("100.0", colReader.getAsString());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableDouble() {
+ nullableDoubleTester(MinorType.FLOAT8);
+ }
+
+ @Test
+ public void testDoubleArray() {
+ doubleArrayTester(MinorType.FLOAT8);
+ }
+
+ @Test
+ public void testStringRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.VARCHAR)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow("")
+ .addRow("abcd")
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.STRING, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals("", colReader.getString());
+
+ assertTrue(reader.next());
+ assertEquals("abcd", colReader.getString());
+ assertEquals("abcd", colReader.getObject());
+ assertEquals("\"abcd\"", colReader.getAsString());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableString() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", MinorType.VARCHAR)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow("")
+ .addSingleCol(null)
+ .addRow("abcd")
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals("", colReader.getString());
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals("abcd", colReader.getString());
+ assertEquals("abcd", colReader.getObject());
+ assertEquals("\"abcd\"", colReader.getAsString());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testStringArray() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", MinorType.VARCHAR)
+ .build();
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new String[] {})
+ .addSingleCol(new String[] {"fred", "", "wilma"})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.STRING, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals("fred", colReader.getString(0));
+ assertEquals("", colReader.getString(1));
+ assertEquals("wilma", colReader.getString(2));
+ assertEquals("fred", colReader.getObject(0));
+ assertEquals("", colReader.getObject(1));
+ assertEquals("wilma", colReader.getObject(2));
+ assertEquals("\"fred\"", colReader.getAsString(0));
+ assertEquals("\"\"", colReader.getAsString(1));
+ assertEquals("\"wilma\"", colReader.getAsString(2));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testIntervalYearRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.INTERVALYEAR)
+ .build();
+
+ Period p1 = Period.years(0);
+ Period p2 = Period.years(2).plusMonths(3);
+ Period p3 = Period.years(1234).plusMonths(11);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(p1)
+ .addRow(p2)
+ .addRow(p3)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(p1, colReader.getPeriod());
+
+ assertTrue(reader.next());
+ assertEquals(p2, colReader.getPeriod());
+ assertEquals(p2, colReader.getObject());
+ assertEquals(p2.toString(), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(p3, colReader.getPeriod());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableIntervalYear() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", MinorType.INTERVALYEAR)
+ .build();
+
+ Period p1 = Period.years(0);
+ Period p2 = Period.years(2).plusMonths(3);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(p1)
+ .addSingleCol(null)
+ .addRow(p2)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(p1, colReader.getPeriod());
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getPeriod());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(p2, colReader.getPeriod());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testIntervalYearArray() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", MinorType.INTERVALYEAR)
+ .build();
+
+ Period p1 = Period.years(0);
+ Period p2 = Period.years(2).plusMonths(3);
+ Period p3 = Period.years(1234).plusMonths(11);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new Period[] {})
+ .addSingleCol(new Period[] {p1, p2, p3})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(p1, colReader.getPeriod(0));
+ assertEquals(p2, colReader.getPeriod(1));
+ assertEquals(p3, colReader.getPeriod(2));
+ assertEquals(p2, colReader.getObject(1));
+ assertEquals(p2.toString(), colReader.getAsString(1));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testIntervalDayRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.INTERVALDAY)
+ .build();
+
+ Period p1 = Period.days(0);
+ Period p2 = Period.days(3).plusHours(4).plusMinutes(5).plusSeconds(23);
+ Period p3 = Period.days(999).plusHours(23).plusMinutes(59).plusSeconds(59);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(p1)
+ .addRow(p2)
+ .addRow(p3)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ // The normalizedStandard() call is a hack. See DRILL-5689.
+ assertEquals(p1, colReader.getPeriod().normalizedStandard());
+
+ assertTrue(reader.next());
+ assertEquals(p2, colReader.getPeriod().normalizedStandard());
+ assertEquals(p2, ((Period) colReader.getObject()).normalizedStandard());
+ assertEquals(p2.toString(), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(p3.normalizedStandard(), colReader.getPeriod().normalizedStandard());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableIntervalDay() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", MinorType.INTERVALDAY)
+ .build();
+
+ Period p1 = Period.years(0);
+ Period p2 = Period.days(3).plusHours(4).plusMinutes(5).plusSeconds(23);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(p1)
+ .addSingleCol(null)
+ .addRow(p2)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(p1, colReader.getPeriod().normalizedStandard());
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getPeriod());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(p2, colReader.getPeriod().normalizedStandard());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testIntervalDayArray() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", MinorType.INTERVALDAY)
+ .build();
+
+ Period p1 = Period.days(0);
+ Period p2 = Period.days(3).plusHours(4).plusMinutes(5).plusSeconds(23);
+ Period p3 = Period.days(999).plusHours(23).plusMinutes(59).plusSeconds(59);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new Period[] {})
+ .addSingleCol(new Period[] {p1, p2, p3})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(p1, colReader.getPeriod(0).normalizedStandard());
+ assertEquals(p2, colReader.getPeriod(1).normalizedStandard());
+ assertEquals(p3.normalizedStandard(), colReader.getPeriod(2).normalizedStandard());
+ assertEquals(p2, ((Period) colReader.getObject(1)).normalizedStandard());
+ assertEquals(p2.toString(), colReader.getAsString(1));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testIntervalRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.INTERVAL)
+ .build();
+
+ Period p1 = Period.days(0);
+ Period p2 = Period.years(7).plusMonths(8)
+ .plusDays(3).plusHours(4)
+ .plusMinutes(5).plusSeconds(23);
+ Period p3 = Period.years(9999).plusMonths(11)
+ .plusDays(365).plusHours(23)
+ .plusMinutes(59).plusSeconds(59);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(p1)
+ .addRow(p2)
+ .addRow(p3)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ // The normalizedStandard() call is a hack. See DRILL-5689.
+ assertEquals(p1, colReader.getPeriod().normalizedStandard());
+
+ assertTrue(reader.next());
+ assertEquals(p2, colReader.getPeriod().normalizedStandard());
+ assertEquals(p2, ((Period) colReader.getObject()).normalizedStandard());
+ assertEquals(p2.toString(), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(p3.normalizedStandard(), colReader.getPeriod().normalizedStandard());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableInterval() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", MinorType.INTERVAL)
+ .build();
+
+ Period p1 = Period.years(0);
+ Period p2 = Period.years(7).plusMonths(8)
+ .plusDays(3).plusHours(4)
+ .plusMinutes(5).plusSeconds(23);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(p1)
+ .addSingleCol(null)
+ .addRow(p2)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(p1, colReader.getPeriod().normalizedStandard());
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getPeriod());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(p2, colReader.getPeriod().normalizedStandard());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testIntervalArray() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", MinorType.INTERVAL)
+ .build();
+
+ Period p1 = Period.days(0);
+ Period p2 = Period.years(7).plusMonths(8)
+ .plusDays(3).plusHours(4)
+ .plusMinutes(5).plusSeconds(23);
+ Period p3 = Period.years(9999).plusMonths(11)
+ .plusDays(365).plusHours(23)
+ .plusMinutes(59).plusSeconds(59);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new Period[] {})
+ .addSingleCol(new Period[] {p1, p2, p3})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.PERIOD, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(p1, colReader.getPeriod(0).normalizedStandard());
+ assertEquals(p2, colReader.getPeriod(1).normalizedStandard());
+ assertEquals(p3.normalizedStandard(), colReader.getPeriod(2).normalizedStandard());
+ assertEquals(p2, ((Period) colReader.getObject(1)).normalizedStandard());
+ assertEquals(p2.toString(), colReader.getAsString(1));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testDecimal9RW() {
+ MajorType type = MajorType.newBuilder()
+ .setMinorType(MinorType.DECIMAL9)
+ .setScale(3)
+ .setPrecision(9)
+ .setMode(DataMode.REQUIRED)
+ .build();
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", type)
+ .build();
+
+ BigDecimal v1 = BigDecimal.ZERO;
+ BigDecimal v2 = BigDecimal.valueOf(123_456_789, 3);
+ BigDecimal v3 = BigDecimal.valueOf(999_999_999, 3);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(v1)
+ .addRow(v2)
+ .addRow(v3)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.DECIMAL, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, v1.compareTo(colReader.getDecimal()));
+
+ assertTrue(reader.next());
+ assertEquals(0, v2.compareTo(colReader.getDecimal()));
+ assertEquals(0, v2.compareTo((BigDecimal) colReader.getObject()));
+ assertEquals(v2.toString(), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(0, v3.compareTo(colReader.getDecimal()));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ private void nullableDecimalTester(MinorType type, int precision) {
+ MajorType majorType = MajorType.newBuilder()
+ .setMinorType(type)
+ .setScale(3)
+ .setPrecision(precision)
+ .setMode(DataMode.OPTIONAL)
+ .build();
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", majorType)
+ .build();
+
+ BigDecimal v1 = BigDecimal.ZERO;
+ BigDecimal v2 = BigDecimal.valueOf(123_456_789, 3);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(v1)
+ .addSingleCol(null)
+ .addRow(v2)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.DECIMAL, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, v1.compareTo(colReader.getDecimal()));
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(0, v2.compareTo(colReader.getDecimal()));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableDecimal9() {
+ nullableDecimalTester(MinorType.DECIMAL9, 9);
+ }
+
+ private void decimalArrayTester(MinorType type, int precision) {
+ MajorType majorType = MajorType.newBuilder()
+ .setMinorType(type)
+ .setScale(3)
+ .setPrecision(precision)
+ .setMode(DataMode.REPEATED)
+ .build();
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", majorType)
+ .build();
+
+ BigDecimal v1 = BigDecimal.ZERO;
+ BigDecimal v2 = BigDecimal.valueOf(123_456_789, 3);
+ BigDecimal v3 = BigDecimal.TEN;
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new BigDecimal[] {})
+ .addSingleCol(new BigDecimal[] {v1, v2, v3})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.DECIMAL, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertEquals(0, v1.compareTo(colReader.getDecimal(0)));
+ assertEquals(0, v2.compareTo(colReader.getDecimal(1)));
+ assertEquals(0, v3.compareTo(colReader.getDecimal(2)));
+ assertEquals(0, v2.compareTo((BigDecimal) colReader.getObject(1)));
+ assertEquals(v2.toString(), colReader.getAsString(1));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testDecimal9Array() {
+ decimalArrayTester(MinorType.DECIMAL9, 9);
+ }
+
+ @Test
+ public void testDecimal18RW() {
+ MajorType type = MajorType.newBuilder()
+ .setMinorType(MinorType.DECIMAL18)
+ .setScale(3)
+ .setPrecision(9)
+ .setMode(DataMode.REQUIRED)
+ .build();
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", type)
+ .build();
+
+ BigDecimal v1 = BigDecimal.ZERO;
+ BigDecimal v2 = BigDecimal.valueOf(123_456_789_123_456_789L, 3);
+ BigDecimal v3 = BigDecimal.valueOf(999_999_999_999_999_999L, 3);
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(v1)
+ .addRow(v2)
+ .addRow(v3)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.DECIMAL, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertEquals(0, v1.compareTo(colReader.getDecimal()));
+
+ assertTrue(reader.next());
+ assertEquals(0, v2.compareTo(colReader.getDecimal()));
+ assertEquals(0, v2.compareTo((BigDecimal) colReader.getObject()));
+ assertEquals(v2.toString(), colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertEquals(0, v3.compareTo(colReader.getDecimal()));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableDecimal18() {
+ nullableDecimalTester(MinorType.DECIMAL18, 9);
+ }
+
+ @Test
+ public void testDecimal18Array() {
+ decimalArrayTester(MinorType.DECIMAL18, 9);
+ }
+
+ // From the perspective of the vector, a date vector is just a long.
+
+ @Test
+ public void testDateRW() {
+ longRWTester(MinorType.DATE);
+ }
+
+ @Test
+ public void testNullableDate() {
+ nullableLongTester(MinorType.DATE);
+ }
+
+ @Test
+ public void testDateArray() {
+ longArrayTester(MinorType.DATE);
+ }
+
+ // From the perspective of the vector, a timestamp vector is just a long.
+
+ @Test
+ public void testTimestampRW() {
+ longRWTester(MinorType.TIMESTAMP);
+ }
+
+ @Test
+ public void testNullableTimestamp() {
+ nullableLongTester(MinorType.TIMESTAMP);
+ }
+
+ @Test
+ public void testTimestampArray() {
+ longArrayTester(MinorType.TIMESTAMP);
+ }
+
+ @Test
+ public void testVarBinaryRW() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .add("col", MinorType.VARBINARY)
+ .build();
+
+ byte v1[] = new byte[] {};
+ byte v2[] = new byte[] { (byte) 0x00, (byte) 0x7f, (byte) 0x80, (byte) 0xFF};
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(v1)
+ .addRow(v2)
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.BYTES, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertTrue(Arrays.equals(v1, colReader.getBytes()));
+
+ assertTrue(reader.next());
+ assertTrue(Arrays.equals(v2, colReader.getBytes()));
+ assertTrue(Arrays.equals(v2, (byte[]) colReader.getObject()));
+ assertEquals("[00, 7f, 80, ff]", colReader.getAsString());
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testNullableVarBinary() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addNullable("col", MinorType.VARBINARY)
+ .build();
+
+ byte v1[] = new byte[] {};
+ byte v2[] = new byte[] { (byte) 0x00, (byte) 0x7f, (byte) 0x80, (byte) 0xFF};
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addRow(v1)
+ .addSingleCol(null)
+ .addRow(v2)
+ .build();
+ assertEquals(3, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarReader colReader = reader.scalar(0);
+ assertEquals(ValueType.BYTES, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertFalse(colReader.isNull());
+ assertTrue(Arrays.equals(v1, colReader.getBytes()));
+
+ assertTrue(reader.next());
+ assertTrue(colReader.isNull());
+ assertNull(colReader.getObject());
+ assertEquals("null", colReader.getAsString());
+
+ assertTrue(reader.next());
+ assertTrue(Arrays.equals(v2, colReader.getBytes()));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+
+ @Test
+ public void testVarBinaryArray() {
+ BatchSchema batchSchema = new SchemaBuilder()
+ .addArray("col", MinorType.VARBINARY)
+ .build();
+
+ byte v1[] = new byte[] {};
+ byte v2[] = new byte[] { (byte) 0x00, (byte) 0x7f, (byte) 0x80, (byte) 0xFF};
+ byte v3[] = new byte[] { (byte) 0xDE, (byte) 0xAD, (byte) 0xBE, (byte) 0xAF};
+
+ SingleRowSet rs = fixture.rowSetBuilder(batchSchema)
+ .addSingleCol(new byte[][] {})
+ .addSingleCol(new byte[][] {v1, v2, v3})
+ .build();
+ assertEquals(2, rs.rowCount());
+
+ RowSetReader reader = rs.reader();
+ ScalarElementReader colReader = reader.elements(0);
+ assertEquals(ValueType.BYTES, colReader.valueType());
+
+ assertTrue(reader.next());
+ assertEquals(0, colReader.size());
+
+ assertTrue(reader.next());
+ assertEquals(3, colReader.size());
+ assertTrue(Arrays.equals(v1, colReader.getBytes(0)));
+ assertTrue(Arrays.equals(v2, colReader.getBytes(1)));
+ assertTrue(Arrays.equals(v3, colReader.getBytes(2)));
+ assertTrue(Arrays.equals(v2, (byte[]) colReader.getObject(1)));
+ assertEquals("[00, 7f, 80, ff]", colReader.getAsString(1));
+
+ assertFalse(reader.next());
+ rs.clear();
+ }
+}
http://git-wip-us.apache.org/repos/asf/drill/blob/40de8ca4/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestVariableWidthWriter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestVariableWidthWriter.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestVariableWidthWriter.java
new file mode 100644
index 0000000..103b212
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/TestVariableWidthWriter.java
@@ -0,0 +1,418 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test.rowSet.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.drill.common.types.TypeProtos.DataMode;
+import org.apache.drill.common.types.TypeProtos.MinorType;
+import org.apache.drill.exec.record.MaterializedField;
+import org.apache.drill.exec.vector.VarCharVector;
+import org.apache.drill.exec.vector.accessor.ColumnAccessors.VarCharColumnWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter;
+import org.apache.drill.exec.vector.accessor.ScalarWriter.ColumnWriterListener;
+import org.apache.drill.exec.vector.accessor.ValueType;
+import org.apache.drill.test.SubOperatorTest;
+import org.apache.drill.test.rowSet.SchemaBuilder;
+import org.apache.drill.test.rowSet.test.TestFixedWidthWriter.TestIndex;
+import org.bouncycastle.util.Arrays;
+import org.junit.Test;
+
+import com.google.common.base.Charsets;
+
+public class TestVariableWidthWriter extends SubOperatorTest {
+
+ /**
+ * Basic test to write a contiguous set of values, enough to cause
+ * the vector to double in size twice, then read back the values.
+ */
+
+ @Test
+ public void testWrite() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+
+ writer.startWrite();
+
+ // Write integers.
+ // Write enough that the vector is resized.
+
+ long origAddr = vector.getBuffer().addr();
+ String base = "sample-value";
+ for (int i = 0; i < 3000; i++) {
+ index.index = i;
+ writer.setString(base + i);
+ }
+ writer.endWrite();
+
+ // Should have been reallocated.
+
+ assertNotEquals(origAddr, vector.getBuffer().addr());
+
+ // Verify values
+
+ for (int i = 0; i < 3000; i++) {
+ assertEquals(base + i, stringAt(vector, i));
+ }
+ }
+ }
+
+ @Test
+ public void testRestartRow() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Write rows, rewriting every other row.
+
+ String base = "sample-value";
+ writer.startRow();
+ index.index = 0;
+ for (int i = 0; i < 50; i++) {
+ writer.setString(base + i);
+ if (i % 2 == 0) {
+ writer.saveRow();
+ writer.startRow();
+ index.index++;
+ } else {
+ writer.restartRow();
+ }
+ }
+ writer.endWrite();
+
+ // Verify values
+
+ for (int i = 0; i < 25; i++) {
+ assertEquals(base + (2 * i), stringAt(vector, i));
+ }
+ }
+ }
+
+ /**
+ * Filling empties in a variable-width row means carrying forward
+ * offsets (as tested elsewhere), leaving zero-length values.
+ */
+
+ @Test
+ public void testFillEmpties() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Write values, skipping four out of five positions,
+ // forcing backfill.
+ // The number of values is odd, forcing the writer to
+ // back-fill at the end as well as between values.
+
+ String base = "sample-value";
+ for (int i = 0; i < 501; i += 5) {
+ index.index = i;
+ writer.startRow();
+ writer.setString(base + i);
+ writer.saveRow();
+ }
+ // At end, vector index defined to point one past the
+ // last row. That is, the vector index gives the row count.
+
+ index.index = 504;
+ writer.endWrite();
+
+ // Verify values
+
+ for (int i = 0; i < 504; i++) {
+ assertEquals("Mismatch on " + i,
+ (i%5) == 0 ? base + i : "", stringAt(vector, i));
+ }
+ }
+ }
+
+ /**
+ * The rollover method is used during vector overflow.
+ */
+
+ @Test
+ public void testRollover() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Simulate doing an overflow of ten values.
+
+ String base = "sample-value";
+ for (int i = 0; i < 10; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setString(base + i);
+ writer.saveRow();
+ }
+
+ // Overflow occurs after writing the 11th row
+
+ index.index = 10;
+ writer.startRow();
+ String overflowValue = base + 10;
+ writer.setString(overflowValue);
+
+ // Overflow occurs
+
+ writer.preRollover();
+
+ // Simulate rollover
+
+ byte dummy[] = new byte[] { (byte) 0x55 };
+ for (int i = 0; i < 500; i++) {
+ vector.getMutator().setSafe(i, dummy);
+ }
+ for (int i = 1; i < 15; i++) {
+ vector.getOffsetVector().getMutator().set(i, 0xdeadbeef);
+ }
+ vector.getMutator().setSafe(0, overflowValue.getBytes(Charsets.UTF_8));
+
+ writer.postRollover();
+ index.index = 0;
+ writer.saveRow();
+
+ // Simulate resuming with a few more values.
+
+ for (int i = 1; i < 5; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setString(base + (i + 10));
+ writer.saveRow();
+ }
+ writer.endWrite();
+
+ // Verify the results
+
+ for (int i = 0; i < 5; i++) {
+ assertEquals(base + (10 + i), stringAt(vector, i));
+ }
+ }
+ }
+
+ /**
+ * Simulate the case in which the tail end of an overflow
+ * batch has empties. <tt>preRollover()</tt> should back-fill
+ * them with the next offset prior to rollover.
+ */
+
+ @Test
+ public void testRolloverWithEmpties() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Simulate doing an overflow of 15 values,
+ // of which 5 are empty.
+
+ String base = "sample-value";
+ for (int i = 0; i < 10; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setString(base + i);
+ writer.saveRow();
+ }
+
+ for (int i = 10; i < 15; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.saveRow();
+ }
+
+ // Overflow occurs before writing the 16th row
+
+ index.index = 15;
+ writer.startRow();
+
+ // Overflow occurs. This should fill empty offsets.
+
+ writer.preRollover();
+
+ // Verify the first "batch" results
+
+ for (int i = 0; i < 10; i++) {
+ assertEquals(base + i, stringAt(vector, i));
+ }
+ for (int i = 10; i < 15; i++) {
+ assertEquals("", stringAt(vector, i));
+ }
+
+ // Simulate rollover
+
+ byte dummy[] = new byte[] { (byte) 0x55 };
+ for (int i = 0; i < 500; i++) {
+ vector.getMutator().setSafe(i, dummy);
+ }
+ for (int i = 1; i < 15; i++) {
+ vector.getOffsetVector().getMutator().set(i, 0xdeadbeef);
+ }
+ vector.getMutator().setSafe(0, new byte[] {});
+
+ writer.postRollover();
+ index.index = 0;
+ writer.saveRow();
+
+ // Skip more values.
+
+ for (int i = 1; i < 5; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.saveRow();
+ }
+
+ // Simulate resuming with a few more values.
+
+ for (int i = 5; i < 10; i++) {
+ index.index = i;
+ writer.startRow();
+ writer.setString(base + (i + 20));
+ writer.saveRow();
+ }
+ writer.endWrite();
+
+ // Verify the results
+
+ for (int i = 0; i < 5; i++) {
+ assertEquals("", stringAt(vector, i));
+ }
+ for (int i = 5; i < 10; i++) {
+ assertEquals(base + (i + 20), stringAt(vector, i));
+ }
+ }
+ }
+
+
+ /**
+ * Test the case in which a scalar vector is used in conjunction
+ * with a nullable bits vector. The nullable vector will call the
+ * <tt>skipNulls()</tt> method to avoid writing values for null
+ * entries. For variable-width, there is no difference between
+ * filling empties and skipping nulls: both result in zero-sized
+ * entries.
+ */
+
+ @Test
+ public void testSkipNulls() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+ writer.startWrite();
+
+ // Write values, skipping four out of five positions,
+ // skipping nulls.
+ // The number of values is odd, forcing the writer to
+ // skip nulls at the end as well as between values.
+
+ String base = "sample-value";
+ for (int i = 0; i < 3000; i += 5) {
+ index.index = i;
+ writer.startRow();
+ writer.skipNulls();
+ writer.setString(base + i);
+ writer.saveRow();
+ }
+ index.index = 3003;
+ writer.startRow();
+ writer.skipNulls();
+ writer.saveRow();
+ writer.endWrite();
+
+ // Verify values. Skipping nulls should back-fill
+ // offsets, resulting in zero-length strings.
+
+ for (int i = 0; i < 3000; i++) {
+ assertEquals("Mismatch at " + i,
+ (i%5) == 0 ? base + i : "", stringAt(vector, i));
+ }
+ }
+ }
+
+ /**
+ * Test resize monitoring. Add a listener to an Varchar writer,
+ * capture each resize, and refuse a resize when the s
+ * of the vector exceeds 1 MB. This will trigger an overflow,
+ * which will throw an exception which we then check for.
+ */
+
+ @Test
+ public void testSizeLimit() {
+ try (VarCharVector vector = allocVector(1000)) {
+ TestIndex index = new TestIndex();
+ VarCharColumnWriter writer = makeWriter(vector, index);
+ writer.bindListener(new ColumnWriterListener() {
+ // Because assumed array size is 10, so 10 * 1000 = 10,000
+ // rounded to 16K
+ int totalAlloc = 16384;
+
+ @Override
+ public void overflowed(ScalarWriter writer) {
+ throw new IllegalStateException("overflow called");
+ }
+
+ @Override
+ public boolean canExpand(ScalarWriter writer, int delta) {
+ System.out.println("Delta: " + delta);
+ totalAlloc += delta;
+ return totalAlloc < 1024 * 1024;
+ }
+ });
+ writer.startWrite();
+
+ byte value[] = new byte[423];
+ Arrays.fill(value, (byte) 'X');
+ try {
+ for (int i = 0; ; i++ ) {
+ index.index = i;
+ writer.startRow();
+ writer.setBytes(value, value.length);
+ writer.saveRow();
+ }
+ }
+ catch(IllegalStateException e) {
+ assertTrue(e.getMessage().contains("overflow called"));
+ }
+ }
+ }
+
+ private String stringAt(VarCharVector vector, int i) {
+ return new String(vector.getAccessor().get(i), Charsets.UTF_8);
+ }
+
+ private VarCharVector allocVector(int size) {
+ MaterializedField field =
+ SchemaBuilder.columnSchema("x", MinorType.VARCHAR, DataMode.REQUIRED);
+ VarCharVector vector = new VarCharVector(field, fixture.allocator());
+ vector.allocateNew(size * 10, size);
+ return vector;
+ }
+
+ private VarCharColumnWriter makeWriter(VarCharVector vector, TestIndex index) {
+ VarCharColumnWriter writer = new VarCharColumnWriter(vector);
+ writer.bindIndex(index);
+
+ assertEquals(ValueType.STRING, writer.valueType());
+ return writer;
+ }
+}
http://git-wip-us.apache.org/repos/asf/drill/blob/40de8ca4/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/VectorPrinter.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/VectorPrinter.java b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/VectorPrinter.java
new file mode 100644
index 0000000..2056220
--- /dev/null
+++ b/exec/java-exec/src/test/java/org/apache/drill/test/rowSet/test/VectorPrinter.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.drill.test.rowSet.test;
+
+import org.apache.drill.exec.vector.UInt4Vector;
+import org.apache.drill.exec.vector.ValueVector;
+import org.apache.drill.exec.vector.VarCharVector;
+
+import com.google.common.base.Charsets;
+
+/**
+ * Handy tool to visualize string and offset vectors for
+ * debugging.
+ */
+
+public class VectorPrinter {
+
+ public static void printOffsets(UInt4Vector vector, int start, int length) {
+ header(vector, start, length);
+ for (int i = start, j = 0; j < length; i++, j++) {
+ if (j > 0) {
+ System.out.print(" ");
+ }
+ System.out.print(vector.getAccessor().get(i));
+ }
+ System.out.print("], addr = ");
+ System.out.println(vector.getBuffer().addr());
+ }
+
+ public static void printStrings(VarCharVector vector, int start, int length) {
+ printOffsets(vector.getOffsetVector(), start, length + 1);
+ header(vector, start, length);
+ System.out.println();
+ for (int i = start, j = 0; j < length; i++, j++) {
+ System.out.print(" ");
+ System.out.print(i);
+ System.out.print(": \"");
+ System.out.print(stringAt(vector, i));
+ System.out.println("\"");
+ }
+ System.out.println("]");
+ }
+
+ public static void header(ValueVector vector, int start, int length) {
+ System.out.print(vector.getClass());
+ System.out.print(": (");
+ System.out.print(start);
+ System.out.print(" - ");
+ System.out.print(start + length - 1);
+ System.out.print("): [");
+ }
+
+ public static String stringAt(VarCharVector vector, int i) {
+ return new String(vector.getAccessor().get(i), Charsets.UTF_8);
+ }
+
+}
http://git-wip-us.apache.org/repos/asf/drill/blob/40de8ca4/exec/java-exec/src/test/java/org/apache/drill/vector/TestFillEmpties.java
----------------------------------------------------------------------
diff --git a/exec/java-exec/src/test/java/org/apache/drill/vector/TestFillEmpties.java b/exec/java-exec/src/test/java/org/apache/drill/vector/TestFillEmpties.java
index 4da526e..f3390d3 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/vector/TestFillEmpties.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/vector/TestFillEmpties.java
@@ -19,59 +19,30 @@
package org.apache.drill.vector;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
import org.apache.drill.categories.VectorTest;
import org.apache.drill.common.types.TypeProtos.DataMode;
-import org.apache.drill.common.types.TypeProtos.MajorType;
import org.apache.drill.common.types.TypeProtos.MinorType;
-import org.apache.drill.exec.record.MaterializedField;
import org.apache.drill.exec.vector.BaseDataValueVector;
import org.apache.drill.exec.vector.IntVector;
import org.apache.drill.exec.vector.NullableVarCharVector;
import org.apache.drill.exec.vector.RepeatedVarCharVector;
import org.apache.drill.exec.vector.UInt4Vector;
import org.apache.drill.exec.vector.VarCharVector;
-import org.apache.drill.exec.vector.VectorOverflowException;
-import org.apache.drill.test.DrillTest;
-import org.apache.drill.test.OperatorFixture;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import org.apache.drill.test.SubOperatorTest;
+import org.apache.drill.test.rowSet.SchemaBuilder;
import org.junit.Test;
import io.netty.buffer.DrillBuf;
import org.junit.experimental.categories.Category;
@Category(VectorTest.class)
-public class TestFillEmpties extends DrillTest {
-
- public static OperatorFixture fixture;
-
- @BeforeClass
- public static void setUpBeforeClass() throws Exception {
- fixture = OperatorFixture.builder().build();
- }
-
- @AfterClass
- public static void tearDownAfterClass() throws Exception {
- fixture.close();
- }
-
- // To be replaced by a test method in a separate commit.
-
- public static MaterializedField makeField(String name, MinorType dataType, DataMode mode) {
- MajorType type = MajorType.newBuilder()
- .setMinorType(dataType)
- .setMode(mode)
- .build();
-
- return MaterializedField.create(name, type);
- }
+public class TestFillEmpties extends SubOperatorTest {
@Test
public void testNullableVarChar() {
@SuppressWarnings("resource")
- NullableVarCharVector vector = new NullableVarCharVector(makeField("a", MinorType.VARCHAR, DataMode.OPTIONAL), fixture.allocator());
+ NullableVarCharVector vector = new NullableVarCharVector(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.OPTIONAL), fixture.allocator());
vector.allocateNew( );
// Create "foo", null, "bar", but omit the null.
@@ -91,7 +62,7 @@ public class TestFillEmpties extends DrillTest {
@Test
public void testVarChar() {
@SuppressWarnings("resource")
- VarCharVector vector = new VarCharVector(makeField("a", MinorType.VARCHAR, DataMode.REQUIRED), fixture.allocator());
+ VarCharVector vector = new VarCharVector(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED), fixture.allocator());
vector.allocateNew( );
// Create "foo", null, "bar", but omit the null.
@@ -103,11 +74,7 @@ public class TestFillEmpties extends DrillTest {
// Work around: test fails without this. But, only the new column writers
// call this method.
- try {
- mutator.fillEmptiesBounded(0, 2);
- } catch (VectorOverflowException e) {
- fail();
- }
+ mutator.fillEmpties(0, 2);
value = makeValue("bar");
mutator.setSafe(2, value, 0, value.length);
@@ -119,7 +86,7 @@ public class TestFillEmpties extends DrillTest {
@Test
public void testInt() {
@SuppressWarnings("resource")
- IntVector vector = new IntVector(makeField("a", MinorType.INT, DataMode.REQUIRED), fixture.allocator());
+ IntVector vector = new IntVector(SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED), fixture.allocator());
vector.allocateNew( );
// Create 1, 0, 2, but omit the 0.
@@ -136,7 +103,7 @@ public class TestFillEmpties extends DrillTest {
@Test
public void testRepeatedVarChar() {
@SuppressWarnings("resource")
- RepeatedVarCharVector vector = new RepeatedVarCharVector(makeField("a", MinorType.VARCHAR, DataMode.REPEATED), fixture.allocator());
+ RepeatedVarCharVector vector = new RepeatedVarCharVector(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REPEATED), fixture.allocator());
vector.allocateNew( );
// Create "foo", null, "bar", but omit the null.
@@ -151,11 +118,7 @@ public class TestFillEmpties extends DrillTest {
// Work around: test fails without this. But, only the new column writers
// call this method.
- try {
- mutator.fillEmptiesBounded(0, 2);
- } catch (VectorOverflowException e) {
- fail();
- }
+ mutator.fillEmpties(0, 2);
mutator.startNewValue(2);
value = makeValue( "c" );
mutator.addSafe(2, value, 0, value.length);