You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by el...@apache.org on 2013/11/15 20:43:30 UTC

[1/4] git commit: ACCUMULO-1892 changes RandomBatchWriter to ensure it writes the specified number of rowids.

Updated Branches:
  refs/heads/1.5.1-SNAPSHOT ac20fe06c -> 1261625b3


ACCUMULO-1892 changes RandomBatchWriter to ensure it writes the specified number of rowids.

Signed-off-by: Keith Turner <kt...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/4119611e
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/4119611e
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/4119611e

Branch: refs/heads/1.5.1-SNAPSHOT
Commit: 4119611eea4bc5780761ec17691483dac3e95f47
Parents: 268028f
Author: Sean Busbey <bu...@clouderagovt.com>
Authored: Thu Nov 14 09:13:00 2013 -0600
Committer: Keith Turner <kt...@apache.org>
Committed: Fri Nov 15 13:36:24 2013 -0500

----------------------------------------------------------------------
 .../examples/simple/client/RandomBatchWriter.java  | 17 +++++++++++++----
 1 file changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/4119611e/src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --git a/src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java b/src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
index 3206fa6..71f9fec 100644
--- a/src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ b/src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
@@ -112,7 +112,7 @@ public class RandomBatchWriter {
     if (index != 13) {
       System.out
           .println("Usage : RandomBatchWriter [-s <seed>] <instance name> <zoo keepers> <username> <password> <table> <num> <min> <max> <value size> <max memory> <max latency> <num threads> <visibility>");
-      return;
+      System.exit(1);
     }
     
     String instanceName = processedArgs[0];
@@ -128,6 +128,11 @@ public class RandomBatchWriter {
     long maxLatency = Long.parseLong(processedArgs[10]) == 0 ? Long.MAX_VALUE : Long.parseLong(processedArgs[10]);
     int numThreads = Integer.parseInt(processedArgs[11]);
     String visiblity = processedArgs[12];
+
+    if ((max - min) < num) {
+      System.err.println(String.format("You must specify a min and a max that allow for at least num possible values. For example, you requested %d rows, but a min of %d and a max of %d only allows for %d rows.", num, min, max, (max-min)));
+      System.exit(1);
+    }
     
     // Uncomment the following lines for detailed debugging info
     // Logger logger = Logger.getLogger(Constants.CORE_PACKAGE_NAME);
@@ -147,9 +152,12 @@ public class RandomBatchWriter {
     // reuse the ColumnVisibility object to improve performance
     ColumnVisibility cv = new ColumnVisibility(visiblity);
     
-    for (int i = 0; i < num; i++) {
-      
-      long rowid = (Math.abs(r.nextLong()) % (max - min)) + min;
+    // Generate num unique row ids in the given range
+    HashSet<Long> rowids = new HashSet<Long>(num);
+    while (rowids.size() < num) {
+      rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
+    }
+    for (long rowid : rowids) {
       
       Mutation m = createMutation(rowid, valueSize, cv);
       
@@ -171,6 +179,7 @@ public class RandomBatchWriter {
       if (e.getConstraintViolationSummaries().size() > 0) {
         System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
       }
+      System.exit(1);
     }
   }
 }


Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Keith Turner <ke...@deenlo.com>.
On Sat, Nov 16, 2013 at 12:49 AM, Josh Elser <jo...@gmail.com> wrote:

> Haven't looked at code (what the code in question does or what the
> functional tests do)... but that means we don't have unit test coverage
> here either?


Some important bugs can not easily have unit test (performance problems,
concurrency bugs, memory exhaustion bugs, etc).  This example is eventually
used in a functional test, so maybe it would have eventually caused a test
failure?


>
>
> On 11/16/2013 12:41 AM, Sean Busbey wrote:
>
>> Just a note on not hitting any failed tests: without ACCUMULO-1878, the
>> mismatch between RandomBatchWriter and RandomBatchReader just fails
>> silently in the functional tests.
>>
>>
>> On Fri, Nov 15, 2013 at 11:36 PM, Josh Elser <jo...@gmail.com>
>> wrote:
>>
>>  Obviously I saw the conflict as I had thought I had correctly resolved
>>> it.
>>> I guess not.
>>>
>>> I had also assumed that a test would have failed on me if I had merged it
>>> incorrectly. Also an incorrect assumption, apparently.
>>>
>>> I don't really remember anymore, I think I took the changes from 1.4.5.
>>> Sorry for catching you mid-merge.
>>>
>>>
>>> On 11/15/2013 3:42 PM, Keith Turner wrote:
>>>
>>>  Josh,
>>>>
>>>> The conflict from the merge was not resolved correctly.  I was working
>>>> on
>>>> resolving this conflict but you pushed before I did.   I am really
>>>> curious
>>>> what happened, I want to make sure we are not dropping important changes
>>>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw
>>>> the
>>>> following conflict.   Did you see this conflict?
>>>>
>>>> <<<<<<<
>>>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>>       for (int i = 0; i < opts.num; i++) {
>>>>
>>>>         long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>>> opts.min;
>>>> =======
>>>>       // Generate num unique row ids in the given range
>>>>       HashSet<Long> rowids = new HashSet<Long>(num);
>>>>       while (rowids.size() < num) {
>>>>         rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>>>       }
>>>>       for (long rowid : rowids) {
>>>>
>>>>
>>>>>    origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>>>>>>>>>>>
>>>>>>>>>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>>>
>>>> Keith
>>>>
>>>>
>>>>
>>>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>>>
>>>>   Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>>
>>>>>
>>>>> Conflicts:
>>>>>
>>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>>> simple/client/RandomBatchWriter.java
>>>>>
>>>>>
>>>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/
>>>>> 1261625b
>>>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>>>
>>>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>>>> Parents: ac20fe0 a40a6d4
>>>>> Author: Josh Elser <el...@apache.org>
>>>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>>>> Committer: Josh Elser <el...@apache.org>
>>>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>>>
>>>>> ----------------------------------------------------------------------
>>>>>    .../simple/client/RandomBatchWriter.java        |  1 +
>>>>>    .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>>>> ++++++++++++++++----
>>>>>    2 files changed, 27 insertions(+), 5 deletions(-)
>>>>> ----------------------------------------------------------------------
>>>>>
>>>>>
>>>>>
>>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>> ----------------------------------------------------------------------
>>>>> diff --cc
>>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>>> simple/client/RandomBatchWriter.java
>>>>> index 886c4ba,0000000..f9acfd9
>>>>> mode 100644,000000..100644
>>>>> ---
>>>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>> +++
>>>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>> @@@ -1,169 -1,0 +1,170 @@@
>>>>>    +/*
>>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or
>>>>> more
>>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>>> with
>>>>>    + * this work for additional information regarding copyright
>>>>> ownership.
>>>>>    + * The ASF licenses this file to You under the Apache License,
>>>>> Version
>>>>> 2.0
>>>>>    + * (the "License"); you may not use this file except in compliance
>>>>> with
>>>>>    + * the License.  You may obtain a copy of the License at
>>>>>    + *
>>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>>    + *
>>>>>    + * Unless required by applicable law or agreed to in writing,
>>>>> software
>>>>>    + * distributed under the License is distributed on an "AS IS"
>>>>> BASIS,
>>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>>> implied.
>>>>>    + * See the License for the specific language governing permissions
>>>>> and
>>>>>    + * limitations under the License.
>>>>>    + */
>>>>>    +package org.apache.accumulo.examples.simple.client;
>>>>>    +
>>>>>    +import java.util.HashMap;
>>>>>    +import java.util.HashSet;
>>>>>    +import java.util.Map.Entry;
>>>>>    +import java.util.Random;
>>>>>    +import java.util.Set;
>>>>>    +
>>>>>    +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>>>    +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>>>    +import org.apache.accumulo.core.client.AccumuloException;
>>>>>    +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>>    +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>>>    +import org.apache.accumulo.core.client.TableNotFoundException;
>>>>>    +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>>>    +import org.apache.accumulo.core.data.KeyExtent;
>>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>>    +import org.apache.accumulo.core.data.Value;
>>>>>    +import org.apache.accumulo.core.security.ColumnVisibility;
>>>>>    +import org.apache.hadoop.io.Text;
>>>>>    +
>>>>>    +import com.beust.jcommander.Parameter;
>>>>>    +
>>>>>    +/**
>>>>>    + * Simple example for writing random data to Accumulo. See
>>>>> docs/examples/README.batch for instructions.
>>>>>    + *
>>>>>    + * The rows of the entries will be randomly generated numbers
>>>>> between
>>>>> a
>>>>> specified min and max (prefixed by "row_"). The column families will be
>>>>> "foo" and column
>>>>>    + * qualifiers will be "1". The values will be random byte arrays
>>>>> of a
>>>>> specified size.
>>>>>    + */
>>>>>    +public class RandomBatchWriter {
>>>>>    +
>>>>>    +  /**
>>>>>    +   * Creates a random byte array of specified size using the
>>>>> specified
>>>>> seed.
>>>>>    +   *
>>>>>    +   * @param rowid
>>>>>    +   *          the seed to use for the random number generator
>>>>>    +   * @param dataSize
>>>>>    +   *          the size of the array
>>>>>    +   * @return a random byte array
>>>>>    +   */
>>>>>    +  public static byte[] createValue(long rowid, int dataSize) {
>>>>>    +    Random r = new Random(rowid);
>>>>>    +    byte value[] = new byte[dataSize];
>>>>>    +
>>>>>    +    r.nextBytes(value);
>>>>>    +
>>>>>    +    // transform to printable chars
>>>>>    +    for (int j = 0; j < value.length; j++) {
>>>>>    +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>>>    +    }
>>>>>    +
>>>>>    +    return value;
>>>>>    +  }
>>>>>    +
>>>>>    +  /**
>>>>>    +   * Creates a mutation on a specified row with column family
>>>>> "foo",
>>>>> column qualifier "1", specified visibility, and a random value of
>>>>> specified
>>>>> size.
>>>>>    +   *
>>>>>    +   * @param rowid
>>>>>    +   *          the row of the mutation
>>>>>    +   * @param dataSize
>>>>>    +   *          the size of the random value
>>>>>    +   * @param visibility
>>>>>    +   *          the visibility of the entry to insert
>>>>>    +   * @return a mutation
>>>>>    +   */
>>>>>    +  public static Mutation createMutation(long rowid, int dataSize,
>>>>> ColumnVisibility visibility) {
>>>>>    +    Text row = new Text(String.format("row_%010d", rowid));
>>>>>    +
>>>>>    +    Mutation m = new Mutation(row);
>>>>>    +
>>>>>    +    // create a random value that is a function of the
>>>>>    +    // row id for verification purposes
>>>>>    +    byte value[] = createValue(rowid, dataSize);
>>>>>    +
>>>>>    +    m.put(new Text("foo"), new Text("1"), visibility, new
>>>>> Value(value));
>>>>>    +
>>>>>    +    return m;
>>>>>    +  }
>>>>>    +
>>>>>    +  static class Opts extends ClientOnRequiredTable {
>>>>>    +    @Parameter(names="--num", required=true)
>>>>>    +    int num = 0;
>>>>>    +    @Parameter(names="--min")
>>>>>    +    long min = 0;
>>>>>    +    @Parameter(names="--max")
>>>>>    +    long max = Long.MAX_VALUE;
>>>>>    +    @Parameter(names="--size", required=true, description="size of
>>>>> the
>>>>> value to write")
>>>>>    +    int size = 0;
>>>>>    +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>>>    +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>>>    +    @Parameter(names="--seed", description="seed for pseudo-random
>>>>> number generator")
>>>>>    +    Long seed = null;
>>>>>    +  }
>>>>>    +
>>>>>    +  /**
>>>>>    +   * Writes a specified number of entries to Accumulo using a
>>>>> {@link
>>>>> BatchWriter}.
>>>>>    +   *
>>>>>    +   * @throws AccumuloException
>>>>>    +   * @throws AccumuloSecurityException
>>>>>    +   * @throws TableNotFoundException
>>>>>    +   */
>>>>>    +  public static void main(String[] args) throws AccumuloException,
>>>>> AccumuloSecurityException, TableNotFoundException {
>>>>>    +    Opts opts = new Opts();
>>>>>    +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>>>    +    opts.parseArgs(RandomBatchWriter.class.getName(), args,
>>>>> bwOpts);
>>>>>    +
>>>>>    +    Random r;
>>>>>    +    if (opts.seed == null)
>>>>>    +      r = new Random();
>>>>>    +    else {
>>>>>    +      r = new Random(opts.seed);
>>>>>    +    }
>>>>>    +
>>>>>    +    Connector connector = opts.getConnector();
>>>>>    +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>>>> bwOpts.getBatchWriterConfig());
>>>>>    +
>>>>>    +    // reuse the ColumnVisibility object to improve performance
>>>>>    +    ColumnVisibility cv = opts.visiblity;
>>>>>    +
>>>>>    +    for (int i = 0; i < opts.num; i++) {
>>>>>    +
>>>>>    +      long rowid = (Math.abs(r.nextLong()) % (opts.max -
>>>>> opts.min)) +
>>>>> opts.min;
>>>>>    +
>>>>>    +      Mutation m = createMutation(rowid, opts.size, cv);
>>>>>    +
>>>>>    +      bw.addMutation(m);
>>>>>    +
>>>>>    +    }
>>>>>    +
>>>>>    +    try {
>>>>>    +      bw.close();
>>>>>    +    } catch (MutationsRejectedException e) {
>>>>>    +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>>>    +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>>>> HashMap<String,Set<SecurityErrorCode>>();
>>>>>    +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>>>    +          Set<SecurityErrorCode> secCodes =
>>>>> tables.get(ke.getKey().getTableId().toString());
>>>>>    +          if (secCodes == null) {
>>>>>    +            secCodes = new HashSet<SecurityErrorCode>();
>>>>>    +            tables.put(ke.getKey().getTableId().toString(),
>>>>> secCodes);
>>>>>    +          }
>>>>>    +          secCodes.addAll(ke.getValue());
>>>>>    +        }
>>>>>    +        System.err.println("ERROR : Not authorized to write to
>>>>> tables
>>>>> :
>>>>> " + tables);
>>>>>    +      }
>>>>>    +
>>>>>    +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>>>    +        System.err.println("ERROR : Constraint violations occurred
>>>>> :
>>>>> " +
>>>>> e.getConstraintViolationSummaries());
>>>>>    +      }
>>>>> ++      System.exit(1);
>>>>>    +    }
>>>>>    +  }
>>>>>    +}
>>>>>
>>>>>
>>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> ----------------------------------------------------------------------
>>>>> diff --cc
>>>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> index a579397,0000000..a1f58f6
>>>>> mode 100644,000000..100644
>>>>> ---
>>>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> +++
>>>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> @@@ -1,129 -1,0 +1,150 @@@
>>>>>    +/*
>>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or
>>>>> more
>>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>>> with
>>>>>    + * this work for additional information regarding copyright
>>>>> ownership.
>>>>>    + * The ASF licenses this file to You under the Apache License,
>>>>> Version
>>>>> 2.0
>>>>>    + * (the "License"); you may not use this file except in compliance
>>>>> with
>>>>>    + * the License.  You may obtain a copy of the License at
>>>>>    + *
>>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>>    + *
>>>>>    + * Unless required by applicable law or agreed to in writing,
>>>>> software
>>>>>    + * distributed under the License is distributed on an "AS IS"
>>>>> BASIS,
>>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>>> implied.
>>>>>    + * See the License for the specific language governing permissions
>>>>> and
>>>>>    + * limitations under the License.
>>>>>    + */
>>>>>    +package org.apache.accumulo.minicluster;
>>>>>    +
>>>>>    +import java.io.File;
>>>>>    +import java.util.Map;
>>>>>    +
>>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>>    +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>>    +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>>>    +import org.apache.accumulo.core.client.security.tokens.
>>>>> PasswordToken;
>>>>>    +import org.apache.accumulo.core.conf.Property;
>>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>>    +import org.apache.accumulo.server.util.PortUtils;
>>>>>    +import org.apache.commons.io.FileUtils;
>>>>>    +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>>>    +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>>>    +import org.apache.log4j.Level;
>>>>>    +import org.apache.log4j.Logger;
>>>>> - import org.junit.AfterClass;
>>>>>    +import org.junit.Assert;
>>>>> - import org.junit.BeforeClass;
>>>>> ++import org.junit.Ignore;
>>>>>    +import org.junit.Test;
>>>>>    +import org.junit.rules.TemporaryFolder;
>>>>>    +
>>>>>    +import com.google.common.collect.ImmutableMap;
>>>>> ++import com.google.common.io.Files;
>>>>>    +
>>>>>    +/**
>>>>>    + *
>>>>>    + */
>>>>>    +public class MiniAccumuloClusterGCTest {
>>>>>    +
>>>>> ++  @Test
>>>>> ++  public void testGcConfig() throws Exception {
>>>>> ++    File f = Files.createTempDir();
>>>>> ++    f.deleteOnExit();
>>>>> ++    try {
>>>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f,
>>>>> passwd);
>>>>> ++      macConfig.setNumTservers(1);
>>>>> ++
>>>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>>>> ++
>>>>> ++      // Turn on the garbage collector
>>>>> ++      macConfig.runGC(true);
>>>>> ++
>>>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>>>> ++    } finally {
>>>>> ++      if (null != f && f.exists()) {
>>>>> ++        f.delete();
>>>>> ++      }
>>>>> ++    }
>>>>> ++  }
>>>>> ++
>>>>> ++
>>>>>    +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>>>    +  private static MiniAccumuloConfig macConfig;
>>>>>    +  private static MiniAccumuloCluster accumulo;
>>>>>    +  private static final String passwd = "password";
>>>>>    +
>>>>> -   @BeforeClass
>>>>>    +  public static void setupMiniCluster() throws Exception {
>>>>>    +    tmpDir.create();
>>>>>    +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.
>>>>> ERROR);
>>>>>    +
>>>>>    +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>>>    +    macConfig.setNumTservers(1);
>>>>>    +
>>>>>    +    // Turn on the garbage collector
>>>>>    +    macConfig.runGC(true);
>>>>>    +
>>>>>    +    String gcPort = Integer.toString(PortUtils.
>>>>> getRandomFreePort());
>>>>>    +
>>>>>    +    // And tweak the settings to make it run often
>>>>>    +    Map<String,String> config =
>>>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>>>> gcPort);
>>>>>    +    macConfig.setSiteConfig(config);
>>>>>    +
>>>>>    +    accumulo = new MiniAccumuloCluster(macConfig);
>>>>>    +    accumulo.start();
>>>>>    +  }
>>>>>    +
>>>>> -   @AfterClass
>>>>>    +  public static void tearDownMiniCluster() throws Exception {
>>>>>    +    accumulo.stop();
>>>>>    +    tmpDir.delete();
>>>>>    +  }
>>>>>    +
>>>>> -   @Test(timeout = 20000)
>>>>> ++  // This test seems to be a little too unstable for a unit test
>>>>> ++  @Ignore
>>>>>    +  public void test() throws Exception {
>>>>>    +    ZooKeeperInstance inst = new
>>>>> ZooKeeperInstance(accumulo.getInstanceName(),
>>>>> accumulo.getZooKeepers());
>>>>>    +    Connector c = inst.getConnector("root", new
>>>>> PasswordToken(passwd));
>>>>>    +
>>>>>    +    final String table = "foobar";
>>>>>    +    c.tableOperations().create(table);
>>>>>    +
>>>>>    +    BatchWriter bw = null;
>>>>>    +
>>>>>    +    // Add some data
>>>>>    +    try {
>>>>>    +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>>>    +      Mutation m = new Mutation("a");
>>>>>    +      for (int i = 0; i < 50; i++) {
>>>>>    +        m.put("colf", Integer.toString(i), "");
>>>>>    +      }
>>>>>    +
>>>>>    +      bw.addMutation(m);
>>>>>    +    } finally {
>>>>>    +      if (null != bw) {
>>>>>    +        bw.close();
>>>>>    +      }
>>>>>    +    }
>>>>>    +
>>>>>    +    final boolean flush = true, wait = true;
>>>>>    +
>>>>>    +    // Compact the tables to get some rfiles which we can gc
>>>>>    +    c.tableOperations().compact(table, null, null, flush, wait);
>>>>>    +    c.tableOperations().compact("!METADATA", null, null, flush,
>>>>> wait);
>>>>>    +
>>>>>    +    File accumuloDir = new File(tmpDir.getRoot().
>>>>> getAbsolutePath(),
>>>>> "accumulo");
>>>>>    +    File tables = new File(accumuloDir.getAbsolutePath(),
>>>>> "tables");
>>>>>    +
>>>>>    +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>>    +
>>>>>    +    // Sleep for 4s to let the GC do its thing
>>>>>    +    for (int i = 1; i < 5; i++) {
>>>>>    +      Thread.sleep(1000);
>>>>>    +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>>    +
>>>>>    +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>>>    +        return;
>>>>>    +      }
>>>>>    +    }
>>>>>    +
>>>>>    +    Assert.fail("Expected to find less files after compaction and
>>>>> pause
>>>>> for GC");
>>>>>    +  }
>>>>>    +
>>>>>    +}
>>>>>
>>>>>
>>>>>
>>>>>
>>>>
>>
>>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Josh Elser <jo...@gmail.com>.
Haven't looked at code (what the code in question does or what the 
functional tests do)... but that means we don't have unit test coverage 
here either?

On 11/16/2013 12:41 AM, Sean Busbey wrote:
> Just a note on not hitting any failed tests: without ACCUMULO-1878, the
> mismatch between RandomBatchWriter and RandomBatchReader just fails
> silently in the functional tests.
>
>
> On Fri, Nov 15, 2013 at 11:36 PM, Josh Elser <jo...@gmail.com> wrote:
>
>> Obviously I saw the conflict as I had thought I had correctly resolved it.
>> I guess not.
>>
>> I had also assumed that a test would have failed on me if I had merged it
>> incorrectly. Also an incorrect assumption, apparently.
>>
>> I don't really remember anymore, I think I took the changes from 1.4.5.
>> Sorry for catching you mid-merge.
>>
>>
>> On 11/15/2013 3:42 PM, Keith Turner wrote:
>>
>>> Josh,
>>>
>>> The conflict from the merge was not resolved correctly.  I was working on
>>> resolving this conflict but you pushed before I did.   I am really curious
>>> what happened, I want to make sure we are not dropping important changes
>>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw the
>>> following conflict.   Did you see this conflict?
>>>
>>> <<<<<<<
>>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>>       for (int i = 0; i < opts.num; i++) {
>>>
>>>         long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>> opts.min;
>>> =======
>>>       // Generate num unique row ids in the given range
>>>       HashSet<Long> rowids = new HashSet<Long>(num);
>>>       while (rowids.size() < num) {
>>>         rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>>       }
>>>       for (long rowid : rowids) {
>>>
>>>>
>>>>>>>>>>   origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>>
>>> Keith
>>>
>>>
>>>
>>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>>
>>>   Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>>
>>>> Conflicts:
>>>>
>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>> simple/client/RandomBatchWriter.java
>>>>
>>>>
>>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
>>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>>
>>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>>> Parents: ac20fe0 a40a6d4
>>>> Author: Josh Elser <el...@apache.org>
>>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>>> Committer: Josh Elser <el...@apache.org>
>>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>>
>>>> ----------------------------------------------------------------------
>>>>    .../simple/client/RandomBatchWriter.java        |  1 +
>>>>    .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>>> ++++++++++++++++----
>>>>    2 files changed, 27 insertions(+), 5 deletions(-)
>>>> ----------------------------------------------------------------------
>>>>
>>>>
>>>>
>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>> ----------------------------------------------------------------------
>>>> diff --cc
>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>> simple/client/RandomBatchWriter.java
>>>> index 886c4ba,0000000..f9acfd9
>>>> mode 100644,000000..100644
>>>> ---
>>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>> +++
>>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>> @@@ -1,169 -1,0 +1,170 @@@
>>>>    +/*
>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>> with
>>>>    + * this work for additional information regarding copyright ownership.
>>>>    + * The ASF licenses this file to You under the Apache License, Version
>>>> 2.0
>>>>    + * (the "License"); you may not use this file except in compliance
>>>> with
>>>>    + * the License.  You may obtain a copy of the License at
>>>>    + *
>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>    + *
>>>>    + * Unless required by applicable law or agreed to in writing, software
>>>>    + * distributed under the License is distributed on an "AS IS" BASIS,
>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>> implied.
>>>>    + * See the License for the specific language governing permissions and
>>>>    + * limitations under the License.
>>>>    + */
>>>>    +package org.apache.accumulo.examples.simple.client;
>>>>    +
>>>>    +import java.util.HashMap;
>>>>    +import java.util.HashSet;
>>>>    +import java.util.Map.Entry;
>>>>    +import java.util.Random;
>>>>    +import java.util.Set;
>>>>    +
>>>>    +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>>    +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>>    +import org.apache.accumulo.core.client.AccumuloException;
>>>>    +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>    +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>>    +import org.apache.accumulo.core.client.TableNotFoundException;
>>>>    +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>>    +import org.apache.accumulo.core.data.KeyExtent;
>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>    +import org.apache.accumulo.core.data.Value;
>>>>    +import org.apache.accumulo.core.security.ColumnVisibility;
>>>>    +import org.apache.hadoop.io.Text;
>>>>    +
>>>>    +import com.beust.jcommander.Parameter;
>>>>    +
>>>>    +/**
>>>>    + * Simple example for writing random data to Accumulo. See
>>>> docs/examples/README.batch for instructions.
>>>>    + *
>>>>    + * The rows of the entries will be randomly generated numbers between
>>>> a
>>>> specified min and max (prefixed by "row_"). The column families will be
>>>> "foo" and column
>>>>    + * qualifiers will be "1". The values will be random byte arrays of a
>>>> specified size.
>>>>    + */
>>>>    +public class RandomBatchWriter {
>>>>    +
>>>>    +  /**
>>>>    +   * Creates a random byte array of specified size using the specified
>>>> seed.
>>>>    +   *
>>>>    +   * @param rowid
>>>>    +   *          the seed to use for the random number generator
>>>>    +   * @param dataSize
>>>>    +   *          the size of the array
>>>>    +   * @return a random byte array
>>>>    +   */
>>>>    +  public static byte[] createValue(long rowid, int dataSize) {
>>>>    +    Random r = new Random(rowid);
>>>>    +    byte value[] = new byte[dataSize];
>>>>    +
>>>>    +    r.nextBytes(value);
>>>>    +
>>>>    +    // transform to printable chars
>>>>    +    for (int j = 0; j < value.length; j++) {
>>>>    +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>>    +    }
>>>>    +
>>>>    +    return value;
>>>>    +  }
>>>>    +
>>>>    +  /**
>>>>    +   * Creates a mutation on a specified row with column family "foo",
>>>> column qualifier "1", specified visibility, and a random value of
>>>> specified
>>>> size.
>>>>    +   *
>>>>    +   * @param rowid
>>>>    +   *          the row of the mutation
>>>>    +   * @param dataSize
>>>>    +   *          the size of the random value
>>>>    +   * @param visibility
>>>>    +   *          the visibility of the entry to insert
>>>>    +   * @return a mutation
>>>>    +   */
>>>>    +  public static Mutation createMutation(long rowid, int dataSize,
>>>> ColumnVisibility visibility) {
>>>>    +    Text row = new Text(String.format("row_%010d", rowid));
>>>>    +
>>>>    +    Mutation m = new Mutation(row);
>>>>    +
>>>>    +    // create a random value that is a function of the
>>>>    +    // row id for verification purposes
>>>>    +    byte value[] = createValue(rowid, dataSize);
>>>>    +
>>>>    +    m.put(new Text("foo"), new Text("1"), visibility, new
>>>> Value(value));
>>>>    +
>>>>    +    return m;
>>>>    +  }
>>>>    +
>>>>    +  static class Opts extends ClientOnRequiredTable {
>>>>    +    @Parameter(names="--num", required=true)
>>>>    +    int num = 0;
>>>>    +    @Parameter(names="--min")
>>>>    +    long min = 0;
>>>>    +    @Parameter(names="--max")
>>>>    +    long max = Long.MAX_VALUE;
>>>>    +    @Parameter(names="--size", required=true, description="size of the
>>>> value to write")
>>>>    +    int size = 0;
>>>>    +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>>    +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>>    +    @Parameter(names="--seed", description="seed for pseudo-random
>>>> number generator")
>>>>    +    Long seed = null;
>>>>    +  }
>>>>    +
>>>>    +  /**
>>>>    +   * Writes a specified number of entries to Accumulo using a {@link
>>>> BatchWriter}.
>>>>    +   *
>>>>    +   * @throws AccumuloException
>>>>    +   * @throws AccumuloSecurityException
>>>>    +   * @throws TableNotFoundException
>>>>    +   */
>>>>    +  public static void main(String[] args) throws AccumuloException,
>>>> AccumuloSecurityException, TableNotFoundException {
>>>>    +    Opts opts = new Opts();
>>>>    +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>>    +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
>>>>    +
>>>>    +    Random r;
>>>>    +    if (opts.seed == null)
>>>>    +      r = new Random();
>>>>    +    else {
>>>>    +      r = new Random(opts.seed);
>>>>    +    }
>>>>    +
>>>>    +    Connector connector = opts.getConnector();
>>>>    +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>>> bwOpts.getBatchWriterConfig());
>>>>    +
>>>>    +    // reuse the ColumnVisibility object to improve performance
>>>>    +    ColumnVisibility cv = opts.visiblity;
>>>>    +
>>>>    +    for (int i = 0; i < opts.num; i++) {
>>>>    +
>>>>    +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>>> opts.min;
>>>>    +
>>>>    +      Mutation m = createMutation(rowid, opts.size, cv);
>>>>    +
>>>>    +      bw.addMutation(m);
>>>>    +
>>>>    +    }
>>>>    +
>>>>    +    try {
>>>>    +      bw.close();
>>>>    +    } catch (MutationsRejectedException e) {
>>>>    +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>>    +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>>> HashMap<String,Set<SecurityErrorCode>>();
>>>>    +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>>    +          Set<SecurityErrorCode> secCodes =
>>>> tables.get(ke.getKey().getTableId().toString());
>>>>    +          if (secCodes == null) {
>>>>    +            secCodes = new HashSet<SecurityErrorCode>();
>>>>    +            tables.put(ke.getKey().getTableId().toString(),
>>>> secCodes);
>>>>    +          }
>>>>    +          secCodes.addAll(ke.getValue());
>>>>    +        }
>>>>    +        System.err.println("ERROR : Not authorized to write to tables
>>>> :
>>>> " + tables);
>>>>    +      }
>>>>    +
>>>>    +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>>    +        System.err.println("ERROR : Constraint violations occurred :
>>>> " +
>>>> e.getConstraintViolationSummaries());
>>>>    +      }
>>>> ++      System.exit(1);
>>>>    +    }
>>>>    +  }
>>>>    +}
>>>>
>>>>
>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> ----------------------------------------------------------------------
>>>> diff --cc
>>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> index a579397,0000000..a1f58f6
>>>> mode 100644,000000..100644
>>>> ---
>>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> +++
>>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> @@@ -1,129 -1,0 +1,150 @@@
>>>>    +/*
>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>> with
>>>>    + * this work for additional information regarding copyright ownership.
>>>>    + * The ASF licenses this file to You under the Apache License, Version
>>>> 2.0
>>>>    + * (the "License"); you may not use this file except in compliance
>>>> with
>>>>    + * the License.  You may obtain a copy of the License at
>>>>    + *
>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>    + *
>>>>    + * Unless required by applicable law or agreed to in writing, software
>>>>    + * distributed under the License is distributed on an "AS IS" BASIS,
>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>> implied.
>>>>    + * See the License for the specific language governing permissions and
>>>>    + * limitations under the License.
>>>>    + */
>>>>    +package org.apache.accumulo.minicluster;
>>>>    +
>>>>    +import java.io.File;
>>>>    +import java.util.Map;
>>>>    +
>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>    +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>    +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>>    +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
>>>>    +import org.apache.accumulo.core.conf.Property;
>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>    +import org.apache.accumulo.server.util.PortUtils;
>>>>    +import org.apache.commons.io.FileUtils;
>>>>    +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>>    +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>>    +import org.apache.log4j.Level;
>>>>    +import org.apache.log4j.Logger;
>>>> - import org.junit.AfterClass;
>>>>    +import org.junit.Assert;
>>>> - import org.junit.BeforeClass;
>>>> ++import org.junit.Ignore;
>>>>    +import org.junit.Test;
>>>>    +import org.junit.rules.TemporaryFolder;
>>>>    +
>>>>    +import com.google.common.collect.ImmutableMap;
>>>> ++import com.google.common.io.Files;
>>>>    +
>>>>    +/**
>>>>    + *
>>>>    + */
>>>>    +public class MiniAccumuloClusterGCTest {
>>>>    +
>>>> ++  @Test
>>>> ++  public void testGcConfig() throws Exception {
>>>> ++    File f = Files.createTempDir();
>>>> ++    f.deleteOnExit();
>>>> ++    try {
>>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
>>>> ++      macConfig.setNumTservers(1);
>>>> ++
>>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>>> ++
>>>> ++      // Turn on the garbage collector
>>>> ++      macConfig.runGC(true);
>>>> ++
>>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>>> ++    } finally {
>>>> ++      if (null != f && f.exists()) {
>>>> ++        f.delete();
>>>> ++      }
>>>> ++    }
>>>> ++  }
>>>> ++
>>>> ++
>>>>    +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>>    +  private static MiniAccumuloConfig macConfig;
>>>>    +  private static MiniAccumuloCluster accumulo;
>>>>    +  private static final String passwd = "password";
>>>>    +
>>>> -   @BeforeClass
>>>>    +  public static void setupMiniCluster() throws Exception {
>>>>    +    tmpDir.create();
>>>>    +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
>>>>    +
>>>>    +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>>    +    macConfig.setNumTservers(1);
>>>>    +
>>>>    +    // Turn on the garbage collector
>>>>    +    macConfig.runGC(true);
>>>>    +
>>>>    +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
>>>>    +
>>>>    +    // And tweak the settings to make it run often
>>>>    +    Map<String,String> config =
>>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>>> gcPort);
>>>>    +    macConfig.setSiteConfig(config);
>>>>    +
>>>>    +    accumulo = new MiniAccumuloCluster(macConfig);
>>>>    +    accumulo.start();
>>>>    +  }
>>>>    +
>>>> -   @AfterClass
>>>>    +  public static void tearDownMiniCluster() throws Exception {
>>>>    +    accumulo.stop();
>>>>    +    tmpDir.delete();
>>>>    +  }
>>>>    +
>>>> -   @Test(timeout = 20000)
>>>> ++  // This test seems to be a little too unstable for a unit test
>>>> ++  @Ignore
>>>>    +  public void test() throws Exception {
>>>>    +    ZooKeeperInstance inst = new
>>>> ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
>>>>    +    Connector c = inst.getConnector("root", new
>>>> PasswordToken(passwd));
>>>>    +
>>>>    +    final String table = "foobar";
>>>>    +    c.tableOperations().create(table);
>>>>    +
>>>>    +    BatchWriter bw = null;
>>>>    +
>>>>    +    // Add some data
>>>>    +    try {
>>>>    +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>>    +      Mutation m = new Mutation("a");
>>>>    +      for (int i = 0; i < 50; i++) {
>>>>    +        m.put("colf", Integer.toString(i), "");
>>>>    +      }
>>>>    +
>>>>    +      bw.addMutation(m);
>>>>    +    } finally {
>>>>    +      if (null != bw) {
>>>>    +        bw.close();
>>>>    +      }
>>>>    +    }
>>>>    +
>>>>    +    final boolean flush = true, wait = true;
>>>>    +
>>>>    +    // Compact the tables to get some rfiles which we can gc
>>>>    +    c.tableOperations().compact(table, null, null, flush, wait);
>>>>    +    c.tableOperations().compact("!METADATA", null, null, flush,
>>>> wait);
>>>>    +
>>>>    +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(),
>>>> "accumulo");
>>>>    +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
>>>>    +
>>>>    +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>    +
>>>>    +    // Sleep for 4s to let the GC do its thing
>>>>    +    for (int i = 1; i < 5; i++) {
>>>>    +      Thread.sleep(1000);
>>>>    +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>    +
>>>>    +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>>    +        return;
>>>>    +      }
>>>>    +    }
>>>>    +
>>>>    +    Assert.fail("Expected to find less files after compaction and
>>>> pause
>>>> for GC");
>>>>    +  }
>>>>    +
>>>>    +}
>>>>
>>>>
>>>>
>>>
>
>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Sean Busbey <bu...@clouderagovt.com>.
Just a note on not hitting any failed tests: without ACCUMULO-1878, the
mismatch between RandomBatchWriter and RandomBatchReader just fails
silently in the functional tests.


On Fri, Nov 15, 2013 at 11:36 PM, Josh Elser <jo...@gmail.com> wrote:

> Obviously I saw the conflict as I had thought I had correctly resolved it.
> I guess not.
>
> I had also assumed that a test would have failed on me if I had merged it
> incorrectly. Also an incorrect assumption, apparently.
>
> I don't really remember anymore, I think I took the changes from 1.4.5.
> Sorry for catching you mid-merge.
>
>
> On 11/15/2013 3:42 PM, Keith Turner wrote:
>
>> Josh,
>>
>> The conflict from the merge was not resolved correctly.  I was working on
>> resolving this conflict but you pushed before I did.   I am really curious
>> what happened, I want to make sure we are not dropping important changes
>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw the
>> following conflict.   Did you see this conflict?
>>
>> <<<<<<<
>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>> examples/simple/client/RandomBatchWriter.java
>>      for (int i = 0; i < opts.num; i++) {
>>
>>        long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>> opts.min;
>> =======
>>      // Generate num unique row ids in the given range
>>      HashSet<Long> rowids = new HashSet<Long>(num);
>>      while (rowids.size() < num) {
>>        rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>      }
>>      for (long rowid : rowids) {
>>
>>>
>>>>>>>>>  origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>
>> Keith
>>
>>
>>
>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>
>>  Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>
>>> Conflicts:
>>>
>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>> simple/client/RandomBatchWriter.java
>>>
>>>
>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>
>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>> Parents: ac20fe0 a40a6d4
>>> Author: Josh Elser <el...@apache.org>
>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>> Committer: Josh Elser <el...@apache.org>
>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>
>>> ----------------------------------------------------------------------
>>>   .../simple/client/RandomBatchWriter.java        |  1 +
>>>   .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>> ++++++++++++++++----
>>>   2 files changed, 27 insertions(+), 5 deletions(-)
>>> ----------------------------------------------------------------------
>>>
>>>
>>>
>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>> ----------------------------------------------------------------------
>>> diff --cc
>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>> simple/client/RandomBatchWriter.java
>>> index 886c4ba,0000000..f9acfd9
>>> mode 100644,000000..100644
>>> ---
>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>> +++
>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>> @@@ -1,169 -1,0 +1,170 @@@
>>>   +/*
>>>   + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>   + * contributor license agreements.  See the NOTICE file distributed
>>> with
>>>   + * this work for additional information regarding copyright ownership.
>>>   + * The ASF licenses this file to You under the Apache License, Version
>>> 2.0
>>>   + * (the "License"); you may not use this file except in compliance
>>> with
>>>   + * the License.  You may obtain a copy of the License at
>>>   + *
>>>   + *     http://www.apache.org/licenses/LICENSE-2.0
>>>   + *
>>>   + * Unless required by applicable law or agreed to in writing, software
>>>   + * distributed under the License is distributed on an "AS IS" BASIS,
>>>   + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>> implied.
>>>   + * See the License for the specific language governing permissions and
>>>   + * limitations under the License.
>>>   + */
>>>   +package org.apache.accumulo.examples.simple.client;
>>>   +
>>>   +import java.util.HashMap;
>>>   +import java.util.HashSet;
>>>   +import java.util.Map.Entry;
>>>   +import java.util.Random;
>>>   +import java.util.Set;
>>>   +
>>>   +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>   +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>   +import org.apache.accumulo.core.client.AccumuloException;
>>>   +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>   +import org.apache.accumulo.core.client.BatchWriter;
>>>   +import org.apache.accumulo.core.client.Connector;
>>>   +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>   +import org.apache.accumulo.core.client.TableNotFoundException;
>>>   +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>   +import org.apache.accumulo.core.data.KeyExtent;
>>>   +import org.apache.accumulo.core.data.Mutation;
>>>   +import org.apache.accumulo.core.data.Value;
>>>   +import org.apache.accumulo.core.security.ColumnVisibility;
>>>   +import org.apache.hadoop.io.Text;
>>>   +
>>>   +import com.beust.jcommander.Parameter;
>>>   +
>>>   +/**
>>>   + * Simple example for writing random data to Accumulo. See
>>> docs/examples/README.batch for instructions.
>>>   + *
>>>   + * The rows of the entries will be randomly generated numbers between
>>> a
>>> specified min and max (prefixed by "row_"). The column families will be
>>> "foo" and column
>>>   + * qualifiers will be "1". The values will be random byte arrays of a
>>> specified size.
>>>   + */
>>>   +public class RandomBatchWriter {
>>>   +
>>>   +  /**
>>>   +   * Creates a random byte array of specified size using the specified
>>> seed.
>>>   +   *
>>>   +   * @param rowid
>>>   +   *          the seed to use for the random number generator
>>>   +   * @param dataSize
>>>   +   *          the size of the array
>>>   +   * @return a random byte array
>>>   +   */
>>>   +  public static byte[] createValue(long rowid, int dataSize) {
>>>   +    Random r = new Random(rowid);
>>>   +    byte value[] = new byte[dataSize];
>>>   +
>>>   +    r.nextBytes(value);
>>>   +
>>>   +    // transform to printable chars
>>>   +    for (int j = 0; j < value.length; j++) {
>>>   +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>   +    }
>>>   +
>>>   +    return value;
>>>   +  }
>>>   +
>>>   +  /**
>>>   +   * Creates a mutation on a specified row with column family "foo",
>>> column qualifier "1", specified visibility, and a random value of
>>> specified
>>> size.
>>>   +   *
>>>   +   * @param rowid
>>>   +   *          the row of the mutation
>>>   +   * @param dataSize
>>>   +   *          the size of the random value
>>>   +   * @param visibility
>>>   +   *          the visibility of the entry to insert
>>>   +   * @return a mutation
>>>   +   */
>>>   +  public static Mutation createMutation(long rowid, int dataSize,
>>> ColumnVisibility visibility) {
>>>   +    Text row = new Text(String.format("row_%010d", rowid));
>>>   +
>>>   +    Mutation m = new Mutation(row);
>>>   +
>>>   +    // create a random value that is a function of the
>>>   +    // row id for verification purposes
>>>   +    byte value[] = createValue(rowid, dataSize);
>>>   +
>>>   +    m.put(new Text("foo"), new Text("1"), visibility, new
>>> Value(value));
>>>   +
>>>   +    return m;
>>>   +  }
>>>   +
>>>   +  static class Opts extends ClientOnRequiredTable {
>>>   +    @Parameter(names="--num", required=true)
>>>   +    int num = 0;
>>>   +    @Parameter(names="--min")
>>>   +    long min = 0;
>>>   +    @Parameter(names="--max")
>>>   +    long max = Long.MAX_VALUE;
>>>   +    @Parameter(names="--size", required=true, description="size of the
>>> value to write")
>>>   +    int size = 0;
>>>   +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>   +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>   +    @Parameter(names="--seed", description="seed for pseudo-random
>>> number generator")
>>>   +    Long seed = null;
>>>   +  }
>>>   +
>>>   +  /**
>>>   +   * Writes a specified number of entries to Accumulo using a {@link
>>> BatchWriter}.
>>>   +   *
>>>   +   * @throws AccumuloException
>>>   +   * @throws AccumuloSecurityException
>>>   +   * @throws TableNotFoundException
>>>   +   */
>>>   +  public static void main(String[] args) throws AccumuloException,
>>> AccumuloSecurityException, TableNotFoundException {
>>>   +    Opts opts = new Opts();
>>>   +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>   +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
>>>   +
>>>   +    Random r;
>>>   +    if (opts.seed == null)
>>>   +      r = new Random();
>>>   +    else {
>>>   +      r = new Random(opts.seed);
>>>   +    }
>>>   +
>>>   +    Connector connector = opts.getConnector();
>>>   +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>> bwOpts.getBatchWriterConfig());
>>>   +
>>>   +    // reuse the ColumnVisibility object to improve performance
>>>   +    ColumnVisibility cv = opts.visiblity;
>>>   +
>>>   +    for (int i = 0; i < opts.num; i++) {
>>>   +
>>>   +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>> opts.min;
>>>   +
>>>   +      Mutation m = createMutation(rowid, opts.size, cv);
>>>   +
>>>   +      bw.addMutation(m);
>>>   +
>>>   +    }
>>>   +
>>>   +    try {
>>>   +      bw.close();
>>>   +    } catch (MutationsRejectedException e) {
>>>   +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>   +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>> HashMap<String,Set<SecurityErrorCode>>();
>>>   +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>   +          Set<SecurityErrorCode> secCodes =
>>> tables.get(ke.getKey().getTableId().toString());
>>>   +          if (secCodes == null) {
>>>   +            secCodes = new HashSet<SecurityErrorCode>();
>>>   +            tables.put(ke.getKey().getTableId().toString(),
>>> secCodes);
>>>   +          }
>>>   +          secCodes.addAll(ke.getValue());
>>>   +        }
>>>   +        System.err.println("ERROR : Not authorized to write to tables
>>> :
>>> " + tables);
>>>   +      }
>>>   +
>>>   +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>   +        System.err.println("ERROR : Constraint violations occurred :
>>> " +
>>> e.getConstraintViolationSummaries());
>>>   +      }
>>> ++      System.exit(1);
>>>   +    }
>>>   +  }
>>>   +}
>>>
>>>
>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> ----------------------------------------------------------------------
>>> diff --cc
>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> index a579397,0000000..a1f58f6
>>> mode 100644,000000..100644
>>> ---
>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> +++
>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> @@@ -1,129 -1,0 +1,150 @@@
>>>   +/*
>>>   + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>   + * contributor license agreements.  See the NOTICE file distributed
>>> with
>>>   + * this work for additional information regarding copyright ownership.
>>>   + * The ASF licenses this file to You under the Apache License, Version
>>> 2.0
>>>   + * (the "License"); you may not use this file except in compliance
>>> with
>>>   + * the License.  You may obtain a copy of the License at
>>>   + *
>>>   + *     http://www.apache.org/licenses/LICENSE-2.0
>>>   + *
>>>   + * Unless required by applicable law or agreed to in writing, software
>>>   + * distributed under the License is distributed on an "AS IS" BASIS,
>>>   + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>> implied.
>>>   + * See the License for the specific language governing permissions and
>>>   + * limitations under the License.
>>>   + */
>>>   +package org.apache.accumulo.minicluster;
>>>   +
>>>   +import java.io.File;
>>>   +import java.util.Map;
>>>   +
>>>   +import org.apache.accumulo.core.client.BatchWriter;
>>>   +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>   +import org.apache.accumulo.core.client.Connector;
>>>   +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>   +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
>>>   +import org.apache.accumulo.core.conf.Property;
>>>   +import org.apache.accumulo.core.data.Mutation;
>>>   +import org.apache.accumulo.server.util.PortUtils;
>>>   +import org.apache.commons.io.FileUtils;
>>>   +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>   +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>   +import org.apache.log4j.Level;
>>>   +import org.apache.log4j.Logger;
>>> - import org.junit.AfterClass;
>>>   +import org.junit.Assert;
>>> - import org.junit.BeforeClass;
>>> ++import org.junit.Ignore;
>>>   +import org.junit.Test;
>>>   +import org.junit.rules.TemporaryFolder;
>>>   +
>>>   +import com.google.common.collect.ImmutableMap;
>>> ++import com.google.common.io.Files;
>>>   +
>>>   +/**
>>>   + *
>>>   + */
>>>   +public class MiniAccumuloClusterGCTest {
>>>   +
>>> ++  @Test
>>> ++  public void testGcConfig() throws Exception {
>>> ++    File f = Files.createTempDir();
>>> ++    f.deleteOnExit();
>>> ++    try {
>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
>>> ++      macConfig.setNumTservers(1);
>>> ++
>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>> ++
>>> ++      // Turn on the garbage collector
>>> ++      macConfig.runGC(true);
>>> ++
>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>> ++    } finally {
>>> ++      if (null != f && f.exists()) {
>>> ++        f.delete();
>>> ++      }
>>> ++    }
>>> ++  }
>>> ++
>>> ++
>>>   +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>   +  private static MiniAccumuloConfig macConfig;
>>>   +  private static MiniAccumuloCluster accumulo;
>>>   +  private static final String passwd = "password";
>>>   +
>>> -   @BeforeClass
>>>   +  public static void setupMiniCluster() throws Exception {
>>>   +    tmpDir.create();
>>>   +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
>>>   +
>>>   +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>   +    macConfig.setNumTservers(1);
>>>   +
>>>   +    // Turn on the garbage collector
>>>   +    macConfig.runGC(true);
>>>   +
>>>   +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
>>>   +
>>>   +    // And tweak the settings to make it run often
>>>   +    Map<String,String> config =
>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>> gcPort);
>>>   +    macConfig.setSiteConfig(config);
>>>   +
>>>   +    accumulo = new MiniAccumuloCluster(macConfig);
>>>   +    accumulo.start();
>>>   +  }
>>>   +
>>> -   @AfterClass
>>>   +  public static void tearDownMiniCluster() throws Exception {
>>>   +    accumulo.stop();
>>>   +    tmpDir.delete();
>>>   +  }
>>>   +
>>> -   @Test(timeout = 20000)
>>> ++  // This test seems to be a little too unstable for a unit test
>>> ++  @Ignore
>>>   +  public void test() throws Exception {
>>>   +    ZooKeeperInstance inst = new
>>> ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
>>>   +    Connector c = inst.getConnector("root", new
>>> PasswordToken(passwd));
>>>   +
>>>   +    final String table = "foobar";
>>>   +    c.tableOperations().create(table);
>>>   +
>>>   +    BatchWriter bw = null;
>>>   +
>>>   +    // Add some data
>>>   +    try {
>>>   +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>   +      Mutation m = new Mutation("a");
>>>   +      for (int i = 0; i < 50; i++) {
>>>   +        m.put("colf", Integer.toString(i), "");
>>>   +      }
>>>   +
>>>   +      bw.addMutation(m);
>>>   +    } finally {
>>>   +      if (null != bw) {
>>>   +        bw.close();
>>>   +      }
>>>   +    }
>>>   +
>>>   +    final boolean flush = true, wait = true;
>>>   +
>>>   +    // Compact the tables to get some rfiles which we can gc
>>>   +    c.tableOperations().compact(table, null, null, flush, wait);
>>>   +    c.tableOperations().compact("!METADATA", null, null, flush,
>>> wait);
>>>   +
>>>   +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(),
>>> "accumulo");
>>>   +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
>>>   +
>>>   +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>   +
>>>   +    // Sleep for 4s to let the GC do its thing
>>>   +    for (int i = 1; i < 5; i++) {
>>>   +      Thread.sleep(1000);
>>>   +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>   +
>>>   +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>   +        return;
>>>   +      }
>>>   +    }
>>>   +
>>>   +    Assert.fail("Expected to find less files after compaction and
>>> pause
>>> for GC");
>>>   +  }
>>>   +
>>>   +}
>>>
>>>
>>>
>>


-- 
Sean

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Josh Elser <jo...@gmail.com>.
Yup, just make a new commit and say that I screwed up the conflict 
resolution :)

Reverting merge commits is not as straightforward as you might think.

On 11/16/2013 11:48 AM, Keith Turner wrote:
> On Sat, Nov 16, 2013 at 11:42 AM, Josh Elser <jo...@gmail.com> wrote:
>
>> Nope, I just did it manually. I must have not been paying close enough
>> attention and deleted the wrong conflict sections. My bad.
>>
>> Do you want me to fix that or are you already looking at it again?
>
>
> I can resolve it quickly, I know what needs to be done.   I'll do it.
> Should this just be a plain old commit in 1.5? Not sure if there are any
> git tricks for redoing conflict resolution.
>
>
>>
>>
>> On 11/16/2013 11:26 AM, Keith Turner wrote:
>>
>>> On Sat, Nov 16, 2013 at 12:36 AM, Josh Elser <jo...@gmail.com>
>>> wrote:
>>>
>>>   Obviously I saw the conflict as I had thought I had correctly resolved
>>>> it.
>>>> I guess not.
>>>>
>>>>
>>> I was not sure about that.  I was not sure how you were doing things. It
>>> seems like the conflict was resolved in such a way that all 1.5 changes
>>> were taken, I was wondering if this was done w/ a git command.   Svn had
>>> command like this for automatically resolving merges, which I found you
>>> have to be careful with in corner cases (like a file with some changes
>>> that
>>> conflicted and some that merged cleanly, svn could throw out the conflicts
>>> and the clean merges).
>>>
>>>
>>>
>>>   I had also assumed that a test would have failed on me if I had merged it
>>>> incorrectly. Also an incorrect assumption, apparently.
>>>>
>>>> I don't really remember anymore, I think I took the changes from 1.4.5.
>>>> Sorry for catching you mid-merge.
>>>>
>>>
>>>
>>> Thats fine, I was mainly trying to determine if there were any lessons to
>>> be learned to prevent future problems.
>>>
>>>
>>>
>>>>
>>>> On 11/15/2013 3:42 PM, Keith Turner wrote:
>>>>
>>>>   Josh,
>>>>>
>>>>> The conflict from the merge was not resolved correctly.  I was working
>>>>> on
>>>>> resolving this conflict but you pushed before I did.   I am really
>>>>> curious
>>>>> what happened, I want to make sure we are not dropping important changes
>>>>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw
>>>>> the
>>>>> following conflict.   Did you see this conflict?
>>>>>
>>>>> <<<<<<<
>>>>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>>        for (int i = 0; i < opts.num; i++) {
>>>>>
>>>>>          long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>>>> opts.min;
>>>>> =======
>>>>>        // Generate num unique row ids in the given range
>>>>>        HashSet<Long> rowids = new HashSet<Long>(num);
>>>>>        while (rowids.size() < num) {
>>>>>          rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>>>>        }
>>>>>        for (long rowid : rowids) {
>>>>>
>>>>>
>>>>>>     origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>>>>>>>>>>>>
>>>>>>>>>>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>>>>
>>>>> Keith
>>>>>
>>>>>
>>>>>
>>>>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>>>>
>>>>>    Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>>>
>>>>>>
>>>>>> Conflicts:
>>>>>>
>>>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>>>> simple/client/RandomBatchWriter.java
>>>>>>
>>>>>>
>>>>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>>>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/
>>>>>> 1261625b
>>>>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>>>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>>>>
>>>>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>>>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>>>>> Parents: ac20fe0 a40a6d4
>>>>>> Author: Josh Elser <el...@apache.org>
>>>>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>>>>> Committer: Josh Elser <el...@apache.org>
>>>>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>>>>
>>>>>> ----------------------------------------------------------------------
>>>>>>     .../simple/client/RandomBatchWriter.java        |  1 +
>>>>>>     .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>>>>> ++++++++++++++++----
>>>>>>     2 files changed, 27 insertions(+), 5 deletions(-)
>>>>>> ----------------------------------------------------------------------
>>>>>>
>>>>>>
>>>>>>
>>>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>>>>> examples/simple/client/RandomBatchWriter.java
>>>>>> ----------------------------------------------------------------------
>>>>>> diff --cc
>>>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>>>> simple/client/RandomBatchWriter.java
>>>>>> index 886c4ba,0000000..f9acfd9
>>>>>> mode 100644,000000..100644
>>>>>> ---
>>>>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>>>>> examples/simple/client/RandomBatchWriter.java
>>>>>> +++
>>>>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>>>>> examples/simple/client/RandomBatchWriter.java
>>>>>> @@@ -1,169 -1,0 +1,170 @@@
>>>>>>     +/*
>>>>>>     + * Licensed to the Apache Software Foundation (ASF) under one or
>>>>>> more
>>>>>>     + * contributor license agreements.  See the NOTICE file distributed
>>>>>> with
>>>>>>     + * this work for additional information regarding copyright
>>>>>> ownership.
>>>>>>     + * The ASF licenses this file to You under the Apache License,
>>>>>> Version
>>>>>> 2.0
>>>>>>     + * (the "License"); you may not use this file except in compliance
>>>>>> with
>>>>>>     + * the License.  You may obtain a copy of the License at
>>>>>>     + *
>>>>>>     + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>>>     + *
>>>>>>     + * Unless required by applicable law or agreed to in writing,
>>>>>> software
>>>>>>     + * distributed under the License is distributed on an "AS IS"
>>>>>> BASIS,
>>>>>>     + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>>>> implied.
>>>>>>     + * See the License for the specific language governing permissions
>>>>>> and
>>>>>>     + * limitations under the License.
>>>>>>     + */
>>>>>>     +package org.apache.accumulo.examples.simple.client;
>>>>>>     +
>>>>>>     +import java.util.HashMap;
>>>>>>     +import java.util.HashSet;
>>>>>>     +import java.util.Map.Entry;
>>>>>>     +import java.util.Random;
>>>>>>     +import java.util.Set;
>>>>>>     +
>>>>>>     +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>>>>     +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>>>>     +import org.apache.accumulo.core.client.AccumuloException;
>>>>>>     +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>>>>     +import org.apache.accumulo.core.client.BatchWriter;
>>>>>>     +import org.apache.accumulo.core.client.Connector;
>>>>>>     +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>>>>     +import org.apache.accumulo.core.client.TableNotFoundException;
>>>>>>     +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>>>>     +import org.apache.accumulo.core.data.KeyExtent;
>>>>>>     +import org.apache.accumulo.core.data.Mutation;
>>>>>>     +import org.apache.accumulo.core.data.Value;
>>>>>>     +import org.apache.accumulo.core.security.ColumnVisibility;
>>>>>>     +import org.apache.hadoop.io.Text;
>>>>>>     +
>>>>>>     +import com.beust.jcommander.Parameter;
>>>>>>     +
>>>>>>     +/**
>>>>>>     + * Simple example for writing random data to Accumulo. See
>>>>>> docs/examples/README.batch for instructions.
>>>>>>     + *
>>>>>>     + * The rows of the entries will be randomly generated numbers
>>>>>> between
>>>>>> a
>>>>>> specified min and max (prefixed by "row_"). The column families will be
>>>>>> "foo" and column
>>>>>>     + * qualifiers will be "1". The values will be random byte arrays
>>>>>> of a
>>>>>> specified size.
>>>>>>     + */
>>>>>>     +public class RandomBatchWriter {
>>>>>>     +
>>>>>>     +  /**
>>>>>>     +   * Creates a random byte array of specified size using the
>>>>>> specified
>>>>>> seed.
>>>>>>     +   *
>>>>>>     +   * @param rowid
>>>>>>     +   *          the seed to use for the random number generator
>>>>>>     +   * @param dataSize
>>>>>>     +   *          the size of the array
>>>>>>     +   * @return a random byte array
>>>>>>     +   */
>>>>>>     +  public static byte[] createValue(long rowid, int dataSize) {
>>>>>>     +    Random r = new Random(rowid);
>>>>>>     +    byte value[] = new byte[dataSize];
>>>>>>     +
>>>>>>     +    r.nextBytes(value);
>>>>>>     +
>>>>>>     +    // transform to printable chars
>>>>>>     +    for (int j = 0; j < value.length; j++) {
>>>>>>     +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>>>>     +    }
>>>>>>     +
>>>>>>     +    return value;
>>>>>>     +  }
>>>>>>     +
>>>>>>     +  /**
>>>>>>     +   * Creates a mutation on a specified row with column family
>>>>>> "foo",
>>>>>> column qualifier "1", specified visibility, and a random value of
>>>>>> specified
>>>>>> size.
>>>>>>     +   *
>>>>>>     +   * @param rowid
>>>>>>     +   *          the row of the mutation
>>>>>>     +   * @param dataSize
>>>>>>     +   *          the size of the random value
>>>>>>     +   * @param visibility
>>>>>>     +   *          the visibility of the entry to insert
>>>>>>     +   * @return a mutation
>>>>>>     +   */
>>>>>>     +  public static Mutation createMutation(long rowid, int dataSize,
>>>>>> ColumnVisibility visibility) {
>>>>>>     +    Text row = new Text(String.format("row_%010d", rowid));
>>>>>>     +
>>>>>>     +    Mutation m = new Mutation(row);
>>>>>>     +
>>>>>>     +    // create a random value that is a function of the
>>>>>>     +    // row id for verification purposes
>>>>>>     +    byte value[] = createValue(rowid, dataSize);
>>>>>>     +
>>>>>>     +    m.put(new Text("foo"), new Text("1"), visibility, new
>>>>>> Value(value));
>>>>>>     +
>>>>>>     +    return m;
>>>>>>     +  }
>>>>>>     +
>>>>>>     +  static class Opts extends ClientOnRequiredTable {
>>>>>>     +    @Parameter(names="--num", required=true)
>>>>>>     +    int num = 0;
>>>>>>     +    @Parameter(names="--min")
>>>>>>     +    long min = 0;
>>>>>>     +    @Parameter(names="--max")
>>>>>>     +    long max = Long.MAX_VALUE;
>>>>>>     +    @Parameter(names="--size", required=true, description="size of
>>>>>> the
>>>>>> value to write")
>>>>>>     +    int size = 0;
>>>>>>     +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>>>>     +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>>>>     +    @Parameter(names="--seed", description="seed for pseudo-random
>>>>>> number generator")
>>>>>>     +    Long seed = null;
>>>>>>     +  }
>>>>>>     +
>>>>>>     +  /**
>>>>>>     +   * Writes a specified number of entries to Accumulo using a
>>>>>> {@link
>>>>>> BatchWriter}.
>>>>>>     +   *
>>>>>>     +   * @throws AccumuloException
>>>>>>     +   * @throws AccumuloSecurityException
>>>>>>     +   * @throws TableNotFoundException
>>>>>>     +   */
>>>>>>     +  public static void main(String[] args) throws AccumuloException,
>>>>>> AccumuloSecurityException, TableNotFoundException {
>>>>>>     +    Opts opts = new Opts();
>>>>>>     +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>>>>     +    opts.parseArgs(RandomBatchWriter.class.getName(), args,
>>>>>> bwOpts);
>>>>>>     +
>>>>>>     +    Random r;
>>>>>>     +    if (opts.seed == null)
>>>>>>     +      r = new Random();
>>>>>>     +    else {
>>>>>>     +      r = new Random(opts.seed);
>>>>>>     +    }
>>>>>>     +
>>>>>>     +    Connector connector = opts.getConnector();
>>>>>>     +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>>>>> bwOpts.getBatchWriterConfig());
>>>>>>     +
>>>>>>     +    // reuse the ColumnVisibility object to improve performance
>>>>>>     +    ColumnVisibility cv = opts.visiblity;
>>>>>>     +
>>>>>>     +    for (int i = 0; i < opts.num; i++) {
>>>>>>     +
>>>>>>     +      long rowid = (Math.abs(r.nextLong()) % (opts.max -
>>>>>> opts.min)) +
>>>>>> opts.min;
>>>>>>     +
>>>>>>     +      Mutation m = createMutation(rowid, opts.size, cv);
>>>>>>     +
>>>>>>     +      bw.addMutation(m);
>>>>>>     +
>>>>>>     +    }
>>>>>>     +
>>>>>>     +    try {
>>>>>>     +      bw.close();
>>>>>>     +    } catch (MutationsRejectedException e) {
>>>>>>     +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>>>>     +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>>>>> HashMap<String,Set<SecurityErrorCode>>();
>>>>>>     +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>>>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>>>>     +          Set<SecurityErrorCode> secCodes =
>>>>>> tables.get(ke.getKey().getTableId().toString());
>>>>>>     +          if (secCodes == null) {
>>>>>>     +            secCodes = new HashSet<SecurityErrorCode>();
>>>>>>     +            tables.put(ke.getKey().getTableId().toString(),
>>>>>> secCodes);
>>>>>>     +          }
>>>>>>     +          secCodes.addAll(ke.getValue());
>>>>>>     +        }
>>>>>>     +        System.err.println("ERROR : Not authorized to write to
>>>>>> tables
>>>>>> :
>>>>>> " + tables);
>>>>>>     +      }
>>>>>>     +
>>>>>>     +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>>>>     +        System.err.println("ERROR : Constraint violations occurred
>>>>>> :
>>>>>> " +
>>>>>> e.getConstraintViolationSummaries());
>>>>>>     +      }
>>>>>> ++      System.exit(1);
>>>>>>     +    }
>>>>>>     +  }
>>>>>>     +}
>>>>>>
>>>>>>
>>>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>>> MiniAccumuloClusterGCTest.java
>>>>>> ----------------------------------------------------------------------
>>>>>> diff --cc
>>>>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>>> MiniAccumuloClusterGCTest.java
>>>>>> index a579397,0000000..a1f58f6
>>>>>> mode 100644,000000..100644
>>>>>> ---
>>>>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>>> MiniAccumuloClusterGCTest.java
>>>>>> +++
>>>>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>>> MiniAccumuloClusterGCTest.java
>>>>>> @@@ -1,129 -1,0 +1,150 @@@
>>>>>>     +/*
>>>>>>     + * Licensed to the Apache Software Foundation (ASF) under one or
>>>>>> more
>>>>>>     + * contributor license agreements.  See the NOTICE file distributed
>>>>>> with
>>>>>>     + * this work for additional information regarding copyright
>>>>>> ownership.
>>>>>>     + * The ASF licenses this file to You under the Apache License,
>>>>>> Version
>>>>>> 2.0
>>>>>>     + * (the "License"); you may not use this file except in compliance
>>>>>> with
>>>>>>     + * the License.  You may obtain a copy of the License at
>>>>>>     + *
>>>>>>     + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>>>     + *
>>>>>>     + * Unless required by applicable law or agreed to in writing,
>>>>>> software
>>>>>>     + * distributed under the License is distributed on an "AS IS"
>>>>>> BASIS,
>>>>>>     + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>>>> implied.
>>>>>>     + * See the License for the specific language governing permissions
>>>>>> and
>>>>>>     + * limitations under the License.
>>>>>>     + */
>>>>>>     +package org.apache.accumulo.minicluster;
>>>>>>     +
>>>>>>     +import java.io.File;
>>>>>>     +import java.util.Map;
>>>>>>     +
>>>>>>     +import org.apache.accumulo.core.client.BatchWriter;
>>>>>>     +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>>>>     +import org.apache.accumulo.core.client.Connector;
>>>>>>     +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>>>>     +import org.apache.accumulo.core.client.security.tokens.
>>>>>> PasswordToken;
>>>>>>     +import org.apache.accumulo.core.conf.Property;
>>>>>>     +import org.apache.accumulo.core.data.Mutation;
>>>>>>     +import org.apache.accumulo.server.util.PortUtils;
>>>>>>     +import org.apache.commons.io.FileUtils;
>>>>>>     +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>>>>     +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>>>>     +import org.apache.log4j.Level;
>>>>>>     +import org.apache.log4j.Logger;
>>>>>> - import org.junit.AfterClass;
>>>>>>     +import org.junit.Assert;
>>>>>> - import org.junit.BeforeClass;
>>>>>> ++import org.junit.Ignore;
>>>>>>     +import org.junit.Test;
>>>>>>     +import org.junit.rules.TemporaryFolder;
>>>>>>     +
>>>>>>     +import com.google.common.collect.ImmutableMap;
>>>>>> ++import com.google.common.io.Files;
>>>>>>     +
>>>>>>     +/**
>>>>>>     + *
>>>>>>     + */
>>>>>>     +public class MiniAccumuloClusterGCTest {
>>>>>>     +
>>>>>> ++  @Test
>>>>>> ++  public void testGcConfig() throws Exception {
>>>>>> ++    File f = Files.createTempDir();
>>>>>> ++    f.deleteOnExit();
>>>>>> ++    try {
>>>>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f,
>>>>>> passwd);
>>>>>> ++      macConfig.setNumTservers(1);
>>>>>> ++
>>>>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>>>>> ++
>>>>>> ++      // Turn on the garbage collector
>>>>>> ++      macConfig.runGC(true);
>>>>>> ++
>>>>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>>>>> ++    } finally {
>>>>>> ++      if (null != f && f.exists()) {
>>>>>> ++        f.delete();
>>>>>> ++      }
>>>>>> ++    }
>>>>>> ++  }
>>>>>> ++
>>>>>> ++
>>>>>>     +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>>>>     +  private static MiniAccumuloConfig macConfig;
>>>>>>     +  private static MiniAccumuloCluster accumulo;
>>>>>>     +  private static final String passwd = "password";
>>>>>>     +
>>>>>> -   @BeforeClass
>>>>>>     +  public static void setupMiniCluster() throws Exception {
>>>>>>     +    tmpDir.create();
>>>>>>     +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.
>>>>>> ERROR);
>>>>>>     +
>>>>>>     +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>>>>     +    macConfig.setNumTservers(1);
>>>>>>     +
>>>>>>     +    // Turn on the garbage collector
>>>>>>     +    macConfig.runGC(true);
>>>>>>     +
>>>>>>     +    String gcPort = Integer.toString(PortUtils.
>>>>>> getRandomFreePort());
>>>>>>     +
>>>>>>     +    // And tweak the settings to make it run often
>>>>>>     +    Map<String,String> config =
>>>>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>>>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>>>>> gcPort);
>>>>>>     +    macConfig.setSiteConfig(config);
>>>>>>     +
>>>>>>     +    accumulo = new MiniAccumuloCluster(macConfig);
>>>>>>     +    accumulo.start();
>>>>>>     +  }
>>>>>>     +
>>>>>> -   @AfterClass
>>>>>>     +  public static void tearDownMiniCluster() throws Exception {
>>>>>>     +    accumulo.stop();
>>>>>>     +    tmpDir.delete();
>>>>>>     +  }
>>>>>>     +
>>>>>> -   @Test(timeout = 20000)
>>>>>> ++  // This test seems to be a little too unstable for a unit test
>>>>>> ++  @Ignore
>>>>>>     +  public void test() throws Exception {
>>>>>>     +    ZooKeeperInstance inst = new
>>>>>> ZooKeeperInstance(accumulo.getInstanceName(),
>>>>>> accumulo.getZooKeepers());
>>>>>>     +    Connector c = inst.getConnector("root", new
>>>>>> PasswordToken(passwd));
>>>>>>     +
>>>>>>     +    final String table = "foobar";
>>>>>>     +    c.tableOperations().create(table);
>>>>>>     +
>>>>>>     +    BatchWriter bw = null;
>>>>>>     +
>>>>>>     +    // Add some data
>>>>>>     +    try {
>>>>>>     +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>>>>     +      Mutation m = new Mutation("a");
>>>>>>     +      for (int i = 0; i < 50; i++) {
>>>>>>     +        m.put("colf", Integer.toString(i), "");
>>>>>>     +      }
>>>>>>     +
>>>>>>     +      bw.addMutation(m);
>>>>>>     +    } finally {
>>>>>>     +      if (null != bw) {
>>>>>>     +        bw.close();
>>>>>>     +      }
>>>>>>     +    }
>>>>>>     +
>>>>>>     +    final boolean flush = true, wait = true;
>>>>>>     +
>>>>>>     +    // Compact the tables to get some rfiles which we can gc
>>>>>>     +    c.tableOperations().compact(table, null, null, flush, wait);
>>>>>>     +    c.tableOperations().compact("!METADATA", null, null, flush,
>>>>>> wait);
>>>>>>     +
>>>>>>     +    File accumuloDir = new File(tmpDir.getRoot().
>>>>>> getAbsolutePath(),
>>>>>> "accumulo");
>>>>>>     +    File tables = new File(accumuloDir.getAbsolutePath(),
>>>>>> "tables");
>>>>>>     +
>>>>>>     +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>>>     +
>>>>>>     +    // Sleep for 4s to let the GC do its thing
>>>>>>     +    for (int i = 1; i < 5; i++) {
>>>>>>     +      Thread.sleep(1000);
>>>>>>     +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>>>     +
>>>>>>     +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>>>>     +        return;
>>>>>>     +      }
>>>>>>     +    }
>>>>>>     +
>>>>>>     +    Assert.fail("Expected to find less files after compaction and
>>>>>> pause
>>>>>> for GC");
>>>>>>     +  }
>>>>>>     +
>>>>>>     +}
>>>>>>
>>>>>>
>>>>>>
>>>>>>
>>>>>
>>>
>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Keith Turner <ke...@deenlo.com>.
On Sat, Nov 16, 2013 at 11:42 AM, Josh Elser <jo...@gmail.com> wrote:

> Nope, I just did it manually. I must have not been paying close enough
> attention and deleted the wrong conflict sections. My bad.
>
> Do you want me to fix that or are you already looking at it again?


I can resolve it quickly, I know what needs to be done.   I'll do it.
Should this just be a plain old commit in 1.5? Not sure if there are any
git tricks for redoing conflict resolution.


>
>
> On 11/16/2013 11:26 AM, Keith Turner wrote:
>
>> On Sat, Nov 16, 2013 at 12:36 AM, Josh Elser <jo...@gmail.com>
>> wrote:
>>
>>  Obviously I saw the conflict as I had thought I had correctly resolved
>>> it.
>>> I guess not.
>>>
>>>
>> I was not sure about that.  I was not sure how you were doing things. It
>> seems like the conflict was resolved in such a way that all 1.5 changes
>> were taken, I was wondering if this was done w/ a git command.   Svn had
>> command like this for automatically resolving merges, which I found you
>> have to be careful with in corner cases (like a file with some changes
>> that
>> conflicted and some that merged cleanly, svn could throw out the conflicts
>> and the clean merges).
>>
>>
>>
>>  I had also assumed that a test would have failed on me if I had merged it
>>> incorrectly. Also an incorrect assumption, apparently.
>>>
>>> I don't really remember anymore, I think I took the changes from 1.4.5.
>>> Sorry for catching you mid-merge.
>>>
>>
>>
>> Thats fine, I was mainly trying to determine if there were any lessons to
>> be learned to prevent future problems.
>>
>>
>>
>>>
>>> On 11/15/2013 3:42 PM, Keith Turner wrote:
>>>
>>>  Josh,
>>>>
>>>> The conflict from the merge was not resolved correctly.  I was working
>>>> on
>>>> resolving this conflict but you pushed before I did.   I am really
>>>> curious
>>>> what happened, I want to make sure we are not dropping important changes
>>>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw
>>>> the
>>>> following conflict.   Did you see this conflict?
>>>>
>>>> <<<<<<<
>>>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>>       for (int i = 0; i < opts.num; i++) {
>>>>
>>>>         long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>>> opts.min;
>>>> =======
>>>>       // Generate num unique row ids in the given range
>>>>       HashSet<Long> rowids = new HashSet<Long>(num);
>>>>       while (rowids.size() < num) {
>>>>         rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>>>       }
>>>>       for (long rowid : rowids) {
>>>>
>>>>
>>>>>    origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>>>>>>>>>>>
>>>>>>>>>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>>>
>>>> Keith
>>>>
>>>>
>>>>
>>>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>>>
>>>>   Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>>
>>>>>
>>>>> Conflicts:
>>>>>
>>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>>> simple/client/RandomBatchWriter.java
>>>>>
>>>>>
>>>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/
>>>>> 1261625b
>>>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>>>
>>>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>>>> Parents: ac20fe0 a40a6d4
>>>>> Author: Josh Elser <el...@apache.org>
>>>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>>>> Committer: Josh Elser <el...@apache.org>
>>>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>>>
>>>>> ----------------------------------------------------------------------
>>>>>    .../simple/client/RandomBatchWriter.java        |  1 +
>>>>>    .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>>>> ++++++++++++++++----
>>>>>    2 files changed, 27 insertions(+), 5 deletions(-)
>>>>> ----------------------------------------------------------------------
>>>>>
>>>>>
>>>>>
>>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>> ----------------------------------------------------------------------
>>>>> diff --cc
>>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>>> simple/client/RandomBatchWriter.java
>>>>> index 886c4ba,0000000..f9acfd9
>>>>> mode 100644,000000..100644
>>>>> ---
>>>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>> +++
>>>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>>>> examples/simple/client/RandomBatchWriter.java
>>>>> @@@ -1,169 -1,0 +1,170 @@@
>>>>>    +/*
>>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or
>>>>> more
>>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>>> with
>>>>>    + * this work for additional information regarding copyright
>>>>> ownership.
>>>>>    + * The ASF licenses this file to You under the Apache License,
>>>>> Version
>>>>> 2.0
>>>>>    + * (the "License"); you may not use this file except in compliance
>>>>> with
>>>>>    + * the License.  You may obtain a copy of the License at
>>>>>    + *
>>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>>    + *
>>>>>    + * Unless required by applicable law or agreed to in writing,
>>>>> software
>>>>>    + * distributed under the License is distributed on an "AS IS"
>>>>> BASIS,
>>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>>> implied.
>>>>>    + * See the License for the specific language governing permissions
>>>>> and
>>>>>    + * limitations under the License.
>>>>>    + */
>>>>>    +package org.apache.accumulo.examples.simple.client;
>>>>>    +
>>>>>    +import java.util.HashMap;
>>>>>    +import java.util.HashSet;
>>>>>    +import java.util.Map.Entry;
>>>>>    +import java.util.Random;
>>>>>    +import java.util.Set;
>>>>>    +
>>>>>    +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>>>    +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>>>    +import org.apache.accumulo.core.client.AccumuloException;
>>>>>    +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>>    +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>>>    +import org.apache.accumulo.core.client.TableNotFoundException;
>>>>>    +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>>>    +import org.apache.accumulo.core.data.KeyExtent;
>>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>>    +import org.apache.accumulo.core.data.Value;
>>>>>    +import org.apache.accumulo.core.security.ColumnVisibility;
>>>>>    +import org.apache.hadoop.io.Text;
>>>>>    +
>>>>>    +import com.beust.jcommander.Parameter;
>>>>>    +
>>>>>    +/**
>>>>>    + * Simple example for writing random data to Accumulo. See
>>>>> docs/examples/README.batch for instructions.
>>>>>    + *
>>>>>    + * The rows of the entries will be randomly generated numbers
>>>>> between
>>>>> a
>>>>> specified min and max (prefixed by "row_"). The column families will be
>>>>> "foo" and column
>>>>>    + * qualifiers will be "1". The values will be random byte arrays
>>>>> of a
>>>>> specified size.
>>>>>    + */
>>>>>    +public class RandomBatchWriter {
>>>>>    +
>>>>>    +  /**
>>>>>    +   * Creates a random byte array of specified size using the
>>>>> specified
>>>>> seed.
>>>>>    +   *
>>>>>    +   * @param rowid
>>>>>    +   *          the seed to use for the random number generator
>>>>>    +   * @param dataSize
>>>>>    +   *          the size of the array
>>>>>    +   * @return a random byte array
>>>>>    +   */
>>>>>    +  public static byte[] createValue(long rowid, int dataSize) {
>>>>>    +    Random r = new Random(rowid);
>>>>>    +    byte value[] = new byte[dataSize];
>>>>>    +
>>>>>    +    r.nextBytes(value);
>>>>>    +
>>>>>    +    // transform to printable chars
>>>>>    +    for (int j = 0; j < value.length; j++) {
>>>>>    +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>>>    +    }
>>>>>    +
>>>>>    +    return value;
>>>>>    +  }
>>>>>    +
>>>>>    +  /**
>>>>>    +   * Creates a mutation on a specified row with column family
>>>>> "foo",
>>>>> column qualifier "1", specified visibility, and a random value of
>>>>> specified
>>>>> size.
>>>>>    +   *
>>>>>    +   * @param rowid
>>>>>    +   *          the row of the mutation
>>>>>    +   * @param dataSize
>>>>>    +   *          the size of the random value
>>>>>    +   * @param visibility
>>>>>    +   *          the visibility of the entry to insert
>>>>>    +   * @return a mutation
>>>>>    +   */
>>>>>    +  public static Mutation createMutation(long rowid, int dataSize,
>>>>> ColumnVisibility visibility) {
>>>>>    +    Text row = new Text(String.format("row_%010d", rowid));
>>>>>    +
>>>>>    +    Mutation m = new Mutation(row);
>>>>>    +
>>>>>    +    // create a random value that is a function of the
>>>>>    +    // row id for verification purposes
>>>>>    +    byte value[] = createValue(rowid, dataSize);
>>>>>    +
>>>>>    +    m.put(new Text("foo"), new Text("1"), visibility, new
>>>>> Value(value));
>>>>>    +
>>>>>    +    return m;
>>>>>    +  }
>>>>>    +
>>>>>    +  static class Opts extends ClientOnRequiredTable {
>>>>>    +    @Parameter(names="--num", required=true)
>>>>>    +    int num = 0;
>>>>>    +    @Parameter(names="--min")
>>>>>    +    long min = 0;
>>>>>    +    @Parameter(names="--max")
>>>>>    +    long max = Long.MAX_VALUE;
>>>>>    +    @Parameter(names="--size", required=true, description="size of
>>>>> the
>>>>> value to write")
>>>>>    +    int size = 0;
>>>>>    +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>>>    +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>>>    +    @Parameter(names="--seed", description="seed for pseudo-random
>>>>> number generator")
>>>>>    +    Long seed = null;
>>>>>    +  }
>>>>>    +
>>>>>    +  /**
>>>>>    +   * Writes a specified number of entries to Accumulo using a
>>>>> {@link
>>>>> BatchWriter}.
>>>>>    +   *
>>>>>    +   * @throws AccumuloException
>>>>>    +   * @throws AccumuloSecurityException
>>>>>    +   * @throws TableNotFoundException
>>>>>    +   */
>>>>>    +  public static void main(String[] args) throws AccumuloException,
>>>>> AccumuloSecurityException, TableNotFoundException {
>>>>>    +    Opts opts = new Opts();
>>>>>    +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>>>    +    opts.parseArgs(RandomBatchWriter.class.getName(), args,
>>>>> bwOpts);
>>>>>    +
>>>>>    +    Random r;
>>>>>    +    if (opts.seed == null)
>>>>>    +      r = new Random();
>>>>>    +    else {
>>>>>    +      r = new Random(opts.seed);
>>>>>    +    }
>>>>>    +
>>>>>    +    Connector connector = opts.getConnector();
>>>>>    +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>>>> bwOpts.getBatchWriterConfig());
>>>>>    +
>>>>>    +    // reuse the ColumnVisibility object to improve performance
>>>>>    +    ColumnVisibility cv = opts.visiblity;
>>>>>    +
>>>>>    +    for (int i = 0; i < opts.num; i++) {
>>>>>    +
>>>>>    +      long rowid = (Math.abs(r.nextLong()) % (opts.max -
>>>>> opts.min)) +
>>>>> opts.min;
>>>>>    +
>>>>>    +      Mutation m = createMutation(rowid, opts.size, cv);
>>>>>    +
>>>>>    +      bw.addMutation(m);
>>>>>    +
>>>>>    +    }
>>>>>    +
>>>>>    +    try {
>>>>>    +      bw.close();
>>>>>    +    } catch (MutationsRejectedException e) {
>>>>>    +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>>>    +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>>>> HashMap<String,Set<SecurityErrorCode>>();
>>>>>    +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>>>    +          Set<SecurityErrorCode> secCodes =
>>>>> tables.get(ke.getKey().getTableId().toString());
>>>>>    +          if (secCodes == null) {
>>>>>    +            secCodes = new HashSet<SecurityErrorCode>();
>>>>>    +            tables.put(ke.getKey().getTableId().toString(),
>>>>> secCodes);
>>>>>    +          }
>>>>>    +          secCodes.addAll(ke.getValue());
>>>>>    +        }
>>>>>    +        System.err.println("ERROR : Not authorized to write to
>>>>> tables
>>>>> :
>>>>> " + tables);
>>>>>    +      }
>>>>>    +
>>>>>    +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>>>    +        System.err.println("ERROR : Constraint violations occurred
>>>>> :
>>>>> " +
>>>>> e.getConstraintViolationSummaries());
>>>>>    +      }
>>>>> ++      System.exit(1);
>>>>>    +    }
>>>>>    +  }
>>>>>    +}
>>>>>
>>>>>
>>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> ----------------------------------------------------------------------
>>>>> diff --cc
>>>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> index a579397,0000000..a1f58f6
>>>>> mode 100644,000000..100644
>>>>> ---
>>>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> +++
>>>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>>> MiniAccumuloClusterGCTest.java
>>>>> @@@ -1,129 -1,0 +1,150 @@@
>>>>>    +/*
>>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or
>>>>> more
>>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>>> with
>>>>>    + * this work for additional information regarding copyright
>>>>> ownership.
>>>>>    + * The ASF licenses this file to You under the Apache License,
>>>>> Version
>>>>> 2.0
>>>>>    + * (the "License"); you may not use this file except in compliance
>>>>> with
>>>>>    + * the License.  You may obtain a copy of the License at
>>>>>    + *
>>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>>    + *
>>>>>    + * Unless required by applicable law or agreed to in writing,
>>>>> software
>>>>>    + * distributed under the License is distributed on an "AS IS"
>>>>> BASIS,
>>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>>> implied.
>>>>>    + * See the License for the specific language governing permissions
>>>>> and
>>>>>    + * limitations under the License.
>>>>>    + */
>>>>>    +package org.apache.accumulo.minicluster;
>>>>>    +
>>>>>    +import java.io.File;
>>>>>    +import java.util.Map;
>>>>>    +
>>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>>    +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>>    +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>>>    +import org.apache.accumulo.core.client.security.tokens.
>>>>> PasswordToken;
>>>>>    +import org.apache.accumulo.core.conf.Property;
>>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>>    +import org.apache.accumulo.server.util.PortUtils;
>>>>>    +import org.apache.commons.io.FileUtils;
>>>>>    +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>>>    +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>>>    +import org.apache.log4j.Level;
>>>>>    +import org.apache.log4j.Logger;
>>>>> - import org.junit.AfterClass;
>>>>>    +import org.junit.Assert;
>>>>> - import org.junit.BeforeClass;
>>>>> ++import org.junit.Ignore;
>>>>>    +import org.junit.Test;
>>>>>    +import org.junit.rules.TemporaryFolder;
>>>>>    +
>>>>>    +import com.google.common.collect.ImmutableMap;
>>>>> ++import com.google.common.io.Files;
>>>>>    +
>>>>>    +/**
>>>>>    + *
>>>>>    + */
>>>>>    +public class MiniAccumuloClusterGCTest {
>>>>>    +
>>>>> ++  @Test
>>>>> ++  public void testGcConfig() throws Exception {
>>>>> ++    File f = Files.createTempDir();
>>>>> ++    f.deleteOnExit();
>>>>> ++    try {
>>>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f,
>>>>> passwd);
>>>>> ++      macConfig.setNumTservers(1);
>>>>> ++
>>>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>>>> ++
>>>>> ++      // Turn on the garbage collector
>>>>> ++      macConfig.runGC(true);
>>>>> ++
>>>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>>>> ++    } finally {
>>>>> ++      if (null != f && f.exists()) {
>>>>> ++        f.delete();
>>>>> ++      }
>>>>> ++    }
>>>>> ++  }
>>>>> ++
>>>>> ++
>>>>>    +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>>>    +  private static MiniAccumuloConfig macConfig;
>>>>>    +  private static MiniAccumuloCluster accumulo;
>>>>>    +  private static final String passwd = "password";
>>>>>    +
>>>>> -   @BeforeClass
>>>>>    +  public static void setupMiniCluster() throws Exception {
>>>>>    +    tmpDir.create();
>>>>>    +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.
>>>>> ERROR);
>>>>>    +
>>>>>    +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>>>    +    macConfig.setNumTservers(1);
>>>>>    +
>>>>>    +    // Turn on the garbage collector
>>>>>    +    macConfig.runGC(true);
>>>>>    +
>>>>>    +    String gcPort = Integer.toString(PortUtils.
>>>>> getRandomFreePort());
>>>>>    +
>>>>>    +    // And tweak the settings to make it run often
>>>>>    +    Map<String,String> config =
>>>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>>>> gcPort);
>>>>>    +    macConfig.setSiteConfig(config);
>>>>>    +
>>>>>    +    accumulo = new MiniAccumuloCluster(macConfig);
>>>>>    +    accumulo.start();
>>>>>    +  }
>>>>>    +
>>>>> -   @AfterClass
>>>>>    +  public static void tearDownMiniCluster() throws Exception {
>>>>>    +    accumulo.stop();
>>>>>    +    tmpDir.delete();
>>>>>    +  }
>>>>>    +
>>>>> -   @Test(timeout = 20000)
>>>>> ++  // This test seems to be a little too unstable for a unit test
>>>>> ++  @Ignore
>>>>>    +  public void test() throws Exception {
>>>>>    +    ZooKeeperInstance inst = new
>>>>> ZooKeeperInstance(accumulo.getInstanceName(),
>>>>> accumulo.getZooKeepers());
>>>>>    +    Connector c = inst.getConnector("root", new
>>>>> PasswordToken(passwd));
>>>>>    +
>>>>>    +    final String table = "foobar";
>>>>>    +    c.tableOperations().create(table);
>>>>>    +
>>>>>    +    BatchWriter bw = null;
>>>>>    +
>>>>>    +    // Add some data
>>>>>    +    try {
>>>>>    +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>>>    +      Mutation m = new Mutation("a");
>>>>>    +      for (int i = 0; i < 50; i++) {
>>>>>    +        m.put("colf", Integer.toString(i), "");
>>>>>    +      }
>>>>>    +
>>>>>    +      bw.addMutation(m);
>>>>>    +    } finally {
>>>>>    +      if (null != bw) {
>>>>>    +        bw.close();
>>>>>    +      }
>>>>>    +    }
>>>>>    +
>>>>>    +    final boolean flush = true, wait = true;
>>>>>    +
>>>>>    +    // Compact the tables to get some rfiles which we can gc
>>>>>    +    c.tableOperations().compact(table, null, null, flush, wait);
>>>>>    +    c.tableOperations().compact("!METADATA", null, null, flush,
>>>>> wait);
>>>>>    +
>>>>>    +    File accumuloDir = new File(tmpDir.getRoot().
>>>>> getAbsolutePath(),
>>>>> "accumulo");
>>>>>    +    File tables = new File(accumuloDir.getAbsolutePath(),
>>>>> "tables");
>>>>>    +
>>>>>    +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>>    +
>>>>>    +    // Sleep for 4s to let the GC do its thing
>>>>>    +    for (int i = 1; i < 5; i++) {
>>>>>    +      Thread.sleep(1000);
>>>>>    +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>>    +
>>>>>    +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>>>    +        return;
>>>>>    +      }
>>>>>    +    }
>>>>>    +
>>>>>    +    Assert.fail("Expected to find less files after compaction and
>>>>> pause
>>>>> for GC");
>>>>>    +  }
>>>>>    +
>>>>>    +}
>>>>>
>>>>>
>>>>>
>>>>>
>>>>
>>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Josh Elser <jo...@gmail.com>.
Nope, I just did it manually. I must have not been paying close enough 
attention and deleted the wrong conflict sections. My bad.

Do you want me to fix that or are you already looking at it again?

On 11/16/2013 11:26 AM, Keith Turner wrote:
> On Sat, Nov 16, 2013 at 12:36 AM, Josh Elser <jo...@gmail.com> wrote:
>
>> Obviously I saw the conflict as I had thought I had correctly resolved it.
>> I guess not.
>>
>
> I was not sure about that.  I was not sure how you were doing things. It
> seems like the conflict was resolved in such a way that all 1.5 changes
> were taken, I was wondering if this was done w/ a git command.   Svn had
> command like this for automatically resolving merges, which I found you
> have to be careful with in corner cases (like a file with some changes that
> conflicted and some that merged cleanly, svn could throw out the conflicts
> and the clean merges).
>
>
>
>> I had also assumed that a test would have failed on me if I had merged it
>> incorrectly. Also an incorrect assumption, apparently.
>>
>> I don't really remember anymore, I think I took the changes from 1.4.5.
>> Sorry for catching you mid-merge.
>
>
> Thats fine, I was mainly trying to determine if there were any lessons to
> be learned to prevent future problems.
>
>
>>
>>
>> On 11/15/2013 3:42 PM, Keith Turner wrote:
>>
>>> Josh,
>>>
>>> The conflict from the merge was not resolved correctly.  I was working on
>>> resolving this conflict but you pushed before I did.   I am really curious
>>> what happened, I want to make sure we are not dropping important changes
>>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw the
>>> following conflict.   Did you see this conflict?
>>>
>>> <<<<<<<
>>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>>       for (int i = 0; i < opts.num; i++) {
>>>
>>>         long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>> opts.min;
>>> =======
>>>       // Generate num unique row ids in the given range
>>>       HashSet<Long> rowids = new HashSet<Long>(num);
>>>       while (rowids.size() < num) {
>>>         rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>>       }
>>>       for (long rowid : rowids) {
>>>
>>>>
>>>>>>>>>>   origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>>
>>> Keith
>>>
>>>
>>>
>>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>>
>>>   Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>>
>>>> Conflicts:
>>>>
>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>> simple/client/RandomBatchWriter.java
>>>>
>>>>
>>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
>>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>>
>>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>>> Parents: ac20fe0 a40a6d4
>>>> Author: Josh Elser <el...@apache.org>
>>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>>> Committer: Josh Elser <el...@apache.org>
>>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>>
>>>> ----------------------------------------------------------------------
>>>>    .../simple/client/RandomBatchWriter.java        |  1 +
>>>>    .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>>> ++++++++++++++++----
>>>>    2 files changed, 27 insertions(+), 5 deletions(-)
>>>> ----------------------------------------------------------------------
>>>>
>>>>
>>>>
>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>> ----------------------------------------------------------------------
>>>> diff --cc
>>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>>> simple/client/RandomBatchWriter.java
>>>> index 886c4ba,0000000..f9acfd9
>>>> mode 100644,000000..100644
>>>> ---
>>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>> +++
>>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>>> examples/simple/client/RandomBatchWriter.java
>>>> @@@ -1,169 -1,0 +1,170 @@@
>>>>    +/*
>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>> with
>>>>    + * this work for additional information regarding copyright ownership.
>>>>    + * The ASF licenses this file to You under the Apache License, Version
>>>> 2.0
>>>>    + * (the "License"); you may not use this file except in compliance
>>>> with
>>>>    + * the License.  You may obtain a copy of the License at
>>>>    + *
>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>    + *
>>>>    + * Unless required by applicable law or agreed to in writing, software
>>>>    + * distributed under the License is distributed on an "AS IS" BASIS,
>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>> implied.
>>>>    + * See the License for the specific language governing permissions and
>>>>    + * limitations under the License.
>>>>    + */
>>>>    +package org.apache.accumulo.examples.simple.client;
>>>>    +
>>>>    +import java.util.HashMap;
>>>>    +import java.util.HashSet;
>>>>    +import java.util.Map.Entry;
>>>>    +import java.util.Random;
>>>>    +import java.util.Set;
>>>>    +
>>>>    +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>>    +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>>    +import org.apache.accumulo.core.client.AccumuloException;
>>>>    +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>    +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>>    +import org.apache.accumulo.core.client.TableNotFoundException;
>>>>    +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>>    +import org.apache.accumulo.core.data.KeyExtent;
>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>    +import org.apache.accumulo.core.data.Value;
>>>>    +import org.apache.accumulo.core.security.ColumnVisibility;
>>>>    +import org.apache.hadoop.io.Text;
>>>>    +
>>>>    +import com.beust.jcommander.Parameter;
>>>>    +
>>>>    +/**
>>>>    + * Simple example for writing random data to Accumulo. See
>>>> docs/examples/README.batch for instructions.
>>>>    + *
>>>>    + * The rows of the entries will be randomly generated numbers between
>>>> a
>>>> specified min and max (prefixed by "row_"). The column families will be
>>>> "foo" and column
>>>>    + * qualifiers will be "1". The values will be random byte arrays of a
>>>> specified size.
>>>>    + */
>>>>    +public class RandomBatchWriter {
>>>>    +
>>>>    +  /**
>>>>    +   * Creates a random byte array of specified size using the specified
>>>> seed.
>>>>    +   *
>>>>    +   * @param rowid
>>>>    +   *          the seed to use for the random number generator
>>>>    +   * @param dataSize
>>>>    +   *          the size of the array
>>>>    +   * @return a random byte array
>>>>    +   */
>>>>    +  public static byte[] createValue(long rowid, int dataSize) {
>>>>    +    Random r = new Random(rowid);
>>>>    +    byte value[] = new byte[dataSize];
>>>>    +
>>>>    +    r.nextBytes(value);
>>>>    +
>>>>    +    // transform to printable chars
>>>>    +    for (int j = 0; j < value.length; j++) {
>>>>    +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>>    +    }
>>>>    +
>>>>    +    return value;
>>>>    +  }
>>>>    +
>>>>    +  /**
>>>>    +   * Creates a mutation on a specified row with column family "foo",
>>>> column qualifier "1", specified visibility, and a random value of
>>>> specified
>>>> size.
>>>>    +   *
>>>>    +   * @param rowid
>>>>    +   *          the row of the mutation
>>>>    +   * @param dataSize
>>>>    +   *          the size of the random value
>>>>    +   * @param visibility
>>>>    +   *          the visibility of the entry to insert
>>>>    +   * @return a mutation
>>>>    +   */
>>>>    +  public static Mutation createMutation(long rowid, int dataSize,
>>>> ColumnVisibility visibility) {
>>>>    +    Text row = new Text(String.format("row_%010d", rowid));
>>>>    +
>>>>    +    Mutation m = new Mutation(row);
>>>>    +
>>>>    +    // create a random value that is a function of the
>>>>    +    // row id for verification purposes
>>>>    +    byte value[] = createValue(rowid, dataSize);
>>>>    +
>>>>    +    m.put(new Text("foo"), new Text("1"), visibility, new
>>>> Value(value));
>>>>    +
>>>>    +    return m;
>>>>    +  }
>>>>    +
>>>>    +  static class Opts extends ClientOnRequiredTable {
>>>>    +    @Parameter(names="--num", required=true)
>>>>    +    int num = 0;
>>>>    +    @Parameter(names="--min")
>>>>    +    long min = 0;
>>>>    +    @Parameter(names="--max")
>>>>    +    long max = Long.MAX_VALUE;
>>>>    +    @Parameter(names="--size", required=true, description="size of the
>>>> value to write")
>>>>    +    int size = 0;
>>>>    +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>>    +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>>    +    @Parameter(names="--seed", description="seed for pseudo-random
>>>> number generator")
>>>>    +    Long seed = null;
>>>>    +  }
>>>>    +
>>>>    +  /**
>>>>    +   * Writes a specified number of entries to Accumulo using a {@link
>>>> BatchWriter}.
>>>>    +   *
>>>>    +   * @throws AccumuloException
>>>>    +   * @throws AccumuloSecurityException
>>>>    +   * @throws TableNotFoundException
>>>>    +   */
>>>>    +  public static void main(String[] args) throws AccumuloException,
>>>> AccumuloSecurityException, TableNotFoundException {
>>>>    +    Opts opts = new Opts();
>>>>    +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>>    +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
>>>>    +
>>>>    +    Random r;
>>>>    +    if (opts.seed == null)
>>>>    +      r = new Random();
>>>>    +    else {
>>>>    +      r = new Random(opts.seed);
>>>>    +    }
>>>>    +
>>>>    +    Connector connector = opts.getConnector();
>>>>    +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>>> bwOpts.getBatchWriterConfig());
>>>>    +
>>>>    +    // reuse the ColumnVisibility object to improve performance
>>>>    +    ColumnVisibility cv = opts.visiblity;
>>>>    +
>>>>    +    for (int i = 0; i < opts.num; i++) {
>>>>    +
>>>>    +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>>> opts.min;
>>>>    +
>>>>    +      Mutation m = createMutation(rowid, opts.size, cv);
>>>>    +
>>>>    +      bw.addMutation(m);
>>>>    +
>>>>    +    }
>>>>    +
>>>>    +    try {
>>>>    +      bw.close();
>>>>    +    } catch (MutationsRejectedException e) {
>>>>    +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>>    +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>>> HashMap<String,Set<SecurityErrorCode>>();
>>>>    +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>>    +          Set<SecurityErrorCode> secCodes =
>>>> tables.get(ke.getKey().getTableId().toString());
>>>>    +          if (secCodes == null) {
>>>>    +            secCodes = new HashSet<SecurityErrorCode>();
>>>>    +            tables.put(ke.getKey().getTableId().toString(),
>>>> secCodes);
>>>>    +          }
>>>>    +          secCodes.addAll(ke.getValue());
>>>>    +        }
>>>>    +        System.err.println("ERROR : Not authorized to write to tables
>>>> :
>>>> " + tables);
>>>>    +      }
>>>>    +
>>>>    +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>>    +        System.err.println("ERROR : Constraint violations occurred :
>>>> " +
>>>> e.getConstraintViolationSummaries());
>>>>    +      }
>>>> ++      System.exit(1);
>>>>    +    }
>>>>    +  }
>>>>    +}
>>>>
>>>>
>>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> ----------------------------------------------------------------------
>>>> diff --cc
>>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> index a579397,0000000..a1f58f6
>>>> mode 100644,000000..100644
>>>> ---
>>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> +++
>>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>>> MiniAccumuloClusterGCTest.java
>>>> @@@ -1,129 -1,0 +1,150 @@@
>>>>    +/*
>>>>    + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>>    + * contributor license agreements.  See the NOTICE file distributed
>>>> with
>>>>    + * this work for additional information regarding copyright ownership.
>>>>    + * The ASF licenses this file to You under the Apache License, Version
>>>> 2.0
>>>>    + * (the "License"); you may not use this file except in compliance
>>>> with
>>>>    + * the License.  You may obtain a copy of the License at
>>>>    + *
>>>>    + *     http://www.apache.org/licenses/LICENSE-2.0
>>>>    + *
>>>>    + * Unless required by applicable law or agreed to in writing, software
>>>>    + * distributed under the License is distributed on an "AS IS" BASIS,
>>>>    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>>> implied.
>>>>    + * See the License for the specific language governing permissions and
>>>>    + * limitations under the License.
>>>>    + */
>>>>    +package org.apache.accumulo.minicluster;
>>>>    +
>>>>    +import java.io.File;
>>>>    +import java.util.Map;
>>>>    +
>>>>    +import org.apache.accumulo.core.client.BatchWriter;
>>>>    +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>>    +import org.apache.accumulo.core.client.Connector;
>>>>    +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>>    +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
>>>>    +import org.apache.accumulo.core.conf.Property;
>>>>    +import org.apache.accumulo.core.data.Mutation;
>>>>    +import org.apache.accumulo.server.util.PortUtils;
>>>>    +import org.apache.commons.io.FileUtils;
>>>>    +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>>    +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>>    +import org.apache.log4j.Level;
>>>>    +import org.apache.log4j.Logger;
>>>> - import org.junit.AfterClass;
>>>>    +import org.junit.Assert;
>>>> - import org.junit.BeforeClass;
>>>> ++import org.junit.Ignore;
>>>>    +import org.junit.Test;
>>>>    +import org.junit.rules.TemporaryFolder;
>>>>    +
>>>>    +import com.google.common.collect.ImmutableMap;
>>>> ++import com.google.common.io.Files;
>>>>    +
>>>>    +/**
>>>>    + *
>>>>    + */
>>>>    +public class MiniAccumuloClusterGCTest {
>>>>    +
>>>> ++  @Test
>>>> ++  public void testGcConfig() throws Exception {
>>>> ++    File f = Files.createTempDir();
>>>> ++    f.deleteOnExit();
>>>> ++    try {
>>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
>>>> ++      macConfig.setNumTservers(1);
>>>> ++
>>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>>> ++
>>>> ++      // Turn on the garbage collector
>>>> ++      macConfig.runGC(true);
>>>> ++
>>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>>> ++    } finally {
>>>> ++      if (null != f && f.exists()) {
>>>> ++        f.delete();
>>>> ++      }
>>>> ++    }
>>>> ++  }
>>>> ++
>>>> ++
>>>>    +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>>    +  private static MiniAccumuloConfig macConfig;
>>>>    +  private static MiniAccumuloCluster accumulo;
>>>>    +  private static final String passwd = "password";
>>>>    +
>>>> -   @BeforeClass
>>>>    +  public static void setupMiniCluster() throws Exception {
>>>>    +    tmpDir.create();
>>>>    +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
>>>>    +
>>>>    +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>>    +    macConfig.setNumTservers(1);
>>>>    +
>>>>    +    // Turn on the garbage collector
>>>>    +    macConfig.runGC(true);
>>>>    +
>>>>    +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
>>>>    +
>>>>    +    // And tweak the settings to make it run often
>>>>    +    Map<String,String> config =
>>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>>> gcPort);
>>>>    +    macConfig.setSiteConfig(config);
>>>>    +
>>>>    +    accumulo = new MiniAccumuloCluster(macConfig);
>>>>    +    accumulo.start();
>>>>    +  }
>>>>    +
>>>> -   @AfterClass
>>>>    +  public static void tearDownMiniCluster() throws Exception {
>>>>    +    accumulo.stop();
>>>>    +    tmpDir.delete();
>>>>    +  }
>>>>    +
>>>> -   @Test(timeout = 20000)
>>>> ++  // This test seems to be a little too unstable for a unit test
>>>> ++  @Ignore
>>>>    +  public void test() throws Exception {
>>>>    +    ZooKeeperInstance inst = new
>>>> ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
>>>>    +    Connector c = inst.getConnector("root", new
>>>> PasswordToken(passwd));
>>>>    +
>>>>    +    final String table = "foobar";
>>>>    +    c.tableOperations().create(table);
>>>>    +
>>>>    +    BatchWriter bw = null;
>>>>    +
>>>>    +    // Add some data
>>>>    +    try {
>>>>    +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>>    +      Mutation m = new Mutation("a");
>>>>    +      for (int i = 0; i < 50; i++) {
>>>>    +        m.put("colf", Integer.toString(i), "");
>>>>    +      }
>>>>    +
>>>>    +      bw.addMutation(m);
>>>>    +    } finally {
>>>>    +      if (null != bw) {
>>>>    +        bw.close();
>>>>    +      }
>>>>    +    }
>>>>    +
>>>>    +    final boolean flush = true, wait = true;
>>>>    +
>>>>    +    // Compact the tables to get some rfiles which we can gc
>>>>    +    c.tableOperations().compact(table, null, null, flush, wait);
>>>>    +    c.tableOperations().compact("!METADATA", null, null, flush,
>>>> wait);
>>>>    +
>>>>    +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(),
>>>> "accumulo");
>>>>    +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
>>>>    +
>>>>    +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>    +
>>>>    +    // Sleep for 4s to let the GC do its thing
>>>>    +    for (int i = 1; i < 5; i++) {
>>>>    +      Thread.sleep(1000);
>>>>    +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>>    +
>>>>    +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>>    +        return;
>>>>    +      }
>>>>    +    }
>>>>    +
>>>>    +    Assert.fail("Expected to find less files after compaction and
>>>> pause
>>>> for GC");
>>>>    +  }
>>>>    +
>>>>    +}
>>>>
>>>>
>>>>
>>>
>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Keith Turner <ke...@deenlo.com>.
On Sat, Nov 16, 2013 at 12:36 AM, Josh Elser <jo...@gmail.com> wrote:

> Obviously I saw the conflict as I had thought I had correctly resolved it.
> I guess not.
>

I was not sure about that.  I was not sure how you were doing things. It
seems like the conflict was resolved in such a way that all 1.5 changes
were taken, I was wondering if this was done w/ a git command.   Svn had
command like this for automatically resolving merges, which I found you
have to be careful with in corner cases (like a file with some changes that
conflicted and some that merged cleanly, svn could throw out the conflicts
and the clean merges).



> I had also assumed that a test would have failed on me if I had merged it
> incorrectly. Also an incorrect assumption, apparently.
>
> I don't really remember anymore, I think I took the changes from 1.4.5.
> Sorry for catching you mid-merge.


Thats fine, I was mainly trying to determine if there were any lessons to
be learned to prevent future problems.


>
>
> On 11/15/2013 3:42 PM, Keith Turner wrote:
>
>> Josh,
>>
>> The conflict from the merge was not resolved correctly.  I was working on
>> resolving this conflict but you pushed before I did.   I am really curious
>> what happened, I want to make sure we are not dropping important changes
>> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw the
>> following conflict.   Did you see this conflict?
>>
>> <<<<<<<
>> HEAD:examples/simple/src/main/java/org/apache/accumulo/
>> examples/simple/client/RandomBatchWriter.java
>>      for (int i = 0; i < opts.num; i++) {
>>
>>        long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>> opts.min;
>> =======
>>      // Generate num unique row ids in the given range
>>      HashSet<Long> rowids = new HashSet<Long>(num);
>>      while (rowids.size() < num) {
>>        rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>>      }
>>      for (long rowid : rowids) {
>>
>>>
>>>>>>>>>  origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/
>> org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>
>> Keith
>>
>>
>>
>> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>>
>>  Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>>
>>> Conflicts:
>>>
>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>> simple/client/RandomBatchWriter.java
>>>
>>>
>>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
>>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>>
>>> Branch: refs/heads/1.5.1-SNAPSHOT
>>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>>> Parents: ac20fe0 a40a6d4
>>> Author: Josh Elser <el...@apache.org>
>>> Authored: Fri Nov 15 11:43:10 2013 -0800
>>> Committer: Josh Elser <el...@apache.org>
>>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>>
>>> ----------------------------------------------------------------------
>>>   .../simple/client/RandomBatchWriter.java        |  1 +
>>>   .../minicluster/MiniAccumuloClusterGCTest.java  | 31
>>> ++++++++++++++++----
>>>   2 files changed, 27 insertions(+), 5 deletions(-)
>>> ----------------------------------------------------------------------
>>>
>>>
>>>
>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>> 1261625b/examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>> ----------------------------------------------------------------------
>>> diff --cc
>>> examples/simple/src/main/java/org/apache/accumulo/examples/
>>> simple/client/RandomBatchWriter.java
>>> index 886c4ba,0000000..f9acfd9
>>> mode 100644,000000..100644
>>> ---
>>> a/examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>> +++
>>> b/examples/simple/src/main/java/org/apache/accumulo/
>>> examples/simple/client/RandomBatchWriter.java
>>> @@@ -1,169 -1,0 +1,170 @@@
>>>   +/*
>>>   + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>   + * contributor license agreements.  See the NOTICE file distributed
>>> with
>>>   + * this work for additional information regarding copyright ownership.
>>>   + * The ASF licenses this file to You under the Apache License, Version
>>> 2.0
>>>   + * (the "License"); you may not use this file except in compliance
>>> with
>>>   + * the License.  You may obtain a copy of the License at
>>>   + *
>>>   + *     http://www.apache.org/licenses/LICENSE-2.0
>>>   + *
>>>   + * Unless required by applicable law or agreed to in writing, software
>>>   + * distributed under the License is distributed on an "AS IS" BASIS,
>>>   + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>> implied.
>>>   + * See the License for the specific language governing permissions and
>>>   + * limitations under the License.
>>>   + */
>>>   +package org.apache.accumulo.examples.simple.client;
>>>   +
>>>   +import java.util.HashMap;
>>>   +import java.util.HashSet;
>>>   +import java.util.Map.Entry;
>>>   +import java.util.Random;
>>>   +import java.util.Set;
>>>   +
>>>   +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>>   +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>>   +import org.apache.accumulo.core.client.AccumuloException;
>>>   +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>>   +import org.apache.accumulo.core.client.BatchWriter;
>>>   +import org.apache.accumulo.core.client.Connector;
>>>   +import org.apache.accumulo.core.client.MutationsRejectedException;
>>>   +import org.apache.accumulo.core.client.TableNotFoundException;
>>>   +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>>   +import org.apache.accumulo.core.data.KeyExtent;
>>>   +import org.apache.accumulo.core.data.Mutation;
>>>   +import org.apache.accumulo.core.data.Value;
>>>   +import org.apache.accumulo.core.security.ColumnVisibility;
>>>   +import org.apache.hadoop.io.Text;
>>>   +
>>>   +import com.beust.jcommander.Parameter;
>>>   +
>>>   +/**
>>>   + * Simple example for writing random data to Accumulo. See
>>> docs/examples/README.batch for instructions.
>>>   + *
>>>   + * The rows of the entries will be randomly generated numbers between
>>> a
>>> specified min and max (prefixed by "row_"). The column families will be
>>> "foo" and column
>>>   + * qualifiers will be "1". The values will be random byte arrays of a
>>> specified size.
>>>   + */
>>>   +public class RandomBatchWriter {
>>>   +
>>>   +  /**
>>>   +   * Creates a random byte array of specified size using the specified
>>> seed.
>>>   +   *
>>>   +   * @param rowid
>>>   +   *          the seed to use for the random number generator
>>>   +   * @param dataSize
>>>   +   *          the size of the array
>>>   +   * @return a random byte array
>>>   +   */
>>>   +  public static byte[] createValue(long rowid, int dataSize) {
>>>   +    Random r = new Random(rowid);
>>>   +    byte value[] = new byte[dataSize];
>>>   +
>>>   +    r.nextBytes(value);
>>>   +
>>>   +    // transform to printable chars
>>>   +    for (int j = 0; j < value.length; j++) {
>>>   +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>>   +    }
>>>   +
>>>   +    return value;
>>>   +  }
>>>   +
>>>   +  /**
>>>   +   * Creates a mutation on a specified row with column family "foo",
>>> column qualifier "1", specified visibility, and a random value of
>>> specified
>>> size.
>>>   +   *
>>>   +   * @param rowid
>>>   +   *          the row of the mutation
>>>   +   * @param dataSize
>>>   +   *          the size of the random value
>>>   +   * @param visibility
>>>   +   *          the visibility of the entry to insert
>>>   +   * @return a mutation
>>>   +   */
>>>   +  public static Mutation createMutation(long rowid, int dataSize,
>>> ColumnVisibility visibility) {
>>>   +    Text row = new Text(String.format("row_%010d", rowid));
>>>   +
>>>   +    Mutation m = new Mutation(row);
>>>   +
>>>   +    // create a random value that is a function of the
>>>   +    // row id for verification purposes
>>>   +    byte value[] = createValue(rowid, dataSize);
>>>   +
>>>   +    m.put(new Text("foo"), new Text("1"), visibility, new
>>> Value(value));
>>>   +
>>>   +    return m;
>>>   +  }
>>>   +
>>>   +  static class Opts extends ClientOnRequiredTable {
>>>   +    @Parameter(names="--num", required=true)
>>>   +    int num = 0;
>>>   +    @Parameter(names="--min")
>>>   +    long min = 0;
>>>   +    @Parameter(names="--max")
>>>   +    long max = Long.MAX_VALUE;
>>>   +    @Parameter(names="--size", required=true, description="size of the
>>> value to write")
>>>   +    int size = 0;
>>>   +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>>   +    ColumnVisibility visiblity = new ColumnVisibility("");
>>>   +    @Parameter(names="--seed", description="seed for pseudo-random
>>> number generator")
>>>   +    Long seed = null;
>>>   +  }
>>>   +
>>>   +  /**
>>>   +   * Writes a specified number of entries to Accumulo using a {@link
>>> BatchWriter}.
>>>   +   *
>>>   +   * @throws AccumuloException
>>>   +   * @throws AccumuloSecurityException
>>>   +   * @throws TableNotFoundException
>>>   +   */
>>>   +  public static void main(String[] args) throws AccumuloException,
>>> AccumuloSecurityException, TableNotFoundException {
>>>   +    Opts opts = new Opts();
>>>   +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>>   +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
>>>   +
>>>   +    Random r;
>>>   +    if (opts.seed == null)
>>>   +      r = new Random();
>>>   +    else {
>>>   +      r = new Random(opts.seed);
>>>   +    }
>>>   +
>>>   +    Connector connector = opts.getConnector();
>>>   +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>>> bwOpts.getBatchWriterConfig());
>>>   +
>>>   +    // reuse the ColumnVisibility object to improve performance
>>>   +    ColumnVisibility cv = opts.visiblity;
>>>   +
>>>   +    for (int i = 0; i < opts.num; i++) {
>>>   +
>>>   +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>>> opts.min;
>>>   +
>>>   +      Mutation m = createMutation(rowid, opts.size, cv);
>>>   +
>>>   +      bw.addMutation(m);
>>>   +
>>>   +    }
>>>   +
>>>   +    try {
>>>   +      bw.close();
>>>   +    } catch (MutationsRejectedException e) {
>>>   +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>>   +        HashMap<String,Set<SecurityErrorCode>> tables = new
>>> HashMap<String,Set<SecurityErrorCode>>();
>>>   +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>>> e.getAuthorizationFailuresMap().entrySet()) {
>>>   +          Set<SecurityErrorCode> secCodes =
>>> tables.get(ke.getKey().getTableId().toString());
>>>   +          if (secCodes == null) {
>>>   +            secCodes = new HashSet<SecurityErrorCode>();
>>>   +            tables.put(ke.getKey().getTableId().toString(),
>>> secCodes);
>>>   +          }
>>>   +          secCodes.addAll(ke.getValue());
>>>   +        }
>>>   +        System.err.println("ERROR : Not authorized to write to tables
>>> :
>>> " + tables);
>>>   +      }
>>>   +
>>>   +      if (e.getConstraintViolationSummaries().size() > 0) {
>>>   +        System.err.println("ERROR : Constraint violations occurred :
>>> " +
>>> e.getConstraintViolationSummaries());
>>>   +      }
>>> ++      System.exit(1);
>>>   +    }
>>>   +  }
>>>   +}
>>>
>>>
>>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/
>>> 1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> ----------------------------------------------------------------------
>>> diff --cc
>>> minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> index a579397,0000000..a1f58f6
>>> mode 100644,000000..100644
>>> ---
>>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> +++
>>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/
>>> MiniAccumuloClusterGCTest.java
>>> @@@ -1,129 -1,0 +1,150 @@@
>>>   +/*
>>>   + * Licensed to the Apache Software Foundation (ASF) under one or more
>>>   + * contributor license agreements.  See the NOTICE file distributed
>>> with
>>>   + * this work for additional information regarding copyright ownership.
>>>   + * The ASF licenses this file to You under the Apache License, Version
>>> 2.0
>>>   + * (the "License"); you may not use this file except in compliance
>>> with
>>>   + * the License.  You may obtain a copy of the License at
>>>   + *
>>>   + *     http://www.apache.org/licenses/LICENSE-2.0
>>>   + *
>>>   + * Unless required by applicable law or agreed to in writing, software
>>>   + * distributed under the License is distributed on an "AS IS" BASIS,
>>>   + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>>> implied.
>>>   + * See the License for the specific language governing permissions and
>>>   + * limitations under the License.
>>>   + */
>>>   +package org.apache.accumulo.minicluster;
>>>   +
>>>   +import java.io.File;
>>>   +import java.util.Map;
>>>   +
>>>   +import org.apache.accumulo.core.client.BatchWriter;
>>>   +import org.apache.accumulo.core.client.BatchWriterConfig;
>>>   +import org.apache.accumulo.core.client.Connector;
>>>   +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>>   +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
>>>   +import org.apache.accumulo.core.conf.Property;
>>>   +import org.apache.accumulo.core.data.Mutation;
>>>   +import org.apache.accumulo.server.util.PortUtils;
>>>   +import org.apache.commons.io.FileUtils;
>>>   +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>>   +import org.apache.commons.io.filefilter.TrueFileFilter;
>>>   +import org.apache.log4j.Level;
>>>   +import org.apache.log4j.Logger;
>>> - import org.junit.AfterClass;
>>>   +import org.junit.Assert;
>>> - import org.junit.BeforeClass;
>>> ++import org.junit.Ignore;
>>>   +import org.junit.Test;
>>>   +import org.junit.rules.TemporaryFolder;
>>>   +
>>>   +import com.google.common.collect.ImmutableMap;
>>> ++import com.google.common.io.Files;
>>>   +
>>>   +/**
>>>   + *
>>>   + */
>>>   +public class MiniAccumuloClusterGCTest {
>>>   +
>>> ++  @Test
>>> ++  public void testGcConfig() throws Exception {
>>> ++    File f = Files.createTempDir();
>>> ++    f.deleteOnExit();
>>> ++    try {
>>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
>>> ++      macConfig.setNumTservers(1);
>>> ++
>>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>>> ++
>>> ++      // Turn on the garbage collector
>>> ++      macConfig.runGC(true);
>>> ++
>>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>>> ++    } finally {
>>> ++      if (null != f && f.exists()) {
>>> ++        f.delete();
>>> ++      }
>>> ++    }
>>> ++  }
>>> ++
>>> ++
>>>   +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>>   +  private static MiniAccumuloConfig macConfig;
>>>   +  private static MiniAccumuloCluster accumulo;
>>>   +  private static final String passwd = "password";
>>>   +
>>> -   @BeforeClass
>>>   +  public static void setupMiniCluster() throws Exception {
>>>   +    tmpDir.create();
>>>   +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
>>>   +
>>>   +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>>   +    macConfig.setNumTservers(1);
>>>   +
>>>   +    // Turn on the garbage collector
>>>   +    macConfig.runGC(true);
>>>   +
>>>   +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
>>>   +
>>>   +    // And tweak the settings to make it run often
>>>   +    Map<String,String> config =
>>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(),
>>> gcPort);
>>>   +    macConfig.setSiteConfig(config);
>>>   +
>>>   +    accumulo = new MiniAccumuloCluster(macConfig);
>>>   +    accumulo.start();
>>>   +  }
>>>   +
>>> -   @AfterClass
>>>   +  public static void tearDownMiniCluster() throws Exception {
>>>   +    accumulo.stop();
>>>   +    tmpDir.delete();
>>>   +  }
>>>   +
>>> -   @Test(timeout = 20000)
>>> ++  // This test seems to be a little too unstable for a unit test
>>> ++  @Ignore
>>>   +  public void test() throws Exception {
>>>   +    ZooKeeperInstance inst = new
>>> ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
>>>   +    Connector c = inst.getConnector("root", new
>>> PasswordToken(passwd));
>>>   +
>>>   +    final String table = "foobar";
>>>   +    c.tableOperations().create(table);
>>>   +
>>>   +    BatchWriter bw = null;
>>>   +
>>>   +    // Add some data
>>>   +    try {
>>>   +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>>   +      Mutation m = new Mutation("a");
>>>   +      for (int i = 0; i < 50; i++) {
>>>   +        m.put("colf", Integer.toString(i), "");
>>>   +      }
>>>   +
>>>   +      bw.addMutation(m);
>>>   +    } finally {
>>>   +      if (null != bw) {
>>>   +        bw.close();
>>>   +      }
>>>   +    }
>>>   +
>>>   +    final boolean flush = true, wait = true;
>>>   +
>>>   +    // Compact the tables to get some rfiles which we can gc
>>>   +    c.tableOperations().compact(table, null, null, flush, wait);
>>>   +    c.tableOperations().compact("!METADATA", null, null, flush,
>>> wait);
>>>   +
>>>   +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(),
>>> "accumulo");
>>>   +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
>>>   +
>>>   +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>   +
>>>   +    // Sleep for 4s to let the GC do its thing
>>>   +    for (int i = 1; i < 5; i++) {
>>>   +      Thread.sleep(1000);
>>>   +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>>   +
>>>   +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>>   +        return;
>>>   +      }
>>>   +    }
>>>   +
>>>   +    Assert.fail("Expected to find less files after compaction and
>>> pause
>>> for GC");
>>>   +  }
>>>   +
>>>   +}
>>>
>>>
>>>
>>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Josh Elser <jo...@gmail.com>.
Obviously I saw the conflict as I had thought I had correctly resolved 
it. I guess not.

I had also assumed that a test would have failed on me if I had merged 
it incorrectly. Also an incorrect assumption, apparently.

I don't really remember anymore, I think I took the changes from 1.4.5. 
Sorry for catching you mid-merge.

On 11/15/2013 3:42 PM, Keith Turner wrote:
> Josh,
>
> The conflict from the merge was not resolved correctly.  I was working on
> resolving this conflict but you pushed before I did.   I am really curious
> what happened, I want to make sure we are not dropping important changes
> when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw the
> following conflict.   Did you see this conflict?
>
> <<<<<<<
> HEAD:examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>      for (int i = 0; i < opts.num; i++) {
>
>        long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
> opts.min;
> =======
>      // Generate num unique row ids in the given range
>      HashSet<Long> rowids = new HashSet<Long>(num);
>      while (rowids.size() < num) {
>        rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
>      }
>      for (long rowid : rowids) {
>>>>>>>>
> origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>
> Keith
>
>
>
> On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:
>
>> Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>>
>> Conflicts:
>>
>> examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>>
>>
>> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
>> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
>> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
>> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>>
>> Branch: refs/heads/1.5.1-SNAPSHOT
>> Commit: 1261625b30691d57473efed313f3baf841d791e6
>> Parents: ac20fe0 a40a6d4
>> Author: Josh Elser <el...@apache.org>
>> Authored: Fri Nov 15 11:43:10 2013 -0800
>> Committer: Josh Elser <el...@apache.org>
>> Committed: Fri Nov 15 11:43:10 2013 -0800
>>
>> ----------------------------------------------------------------------
>>   .../simple/client/RandomBatchWriter.java        |  1 +
>>   .../minicluster/MiniAccumuloClusterGCTest.java  | 31 ++++++++++++++++----
>>   2 files changed, 27 insertions(+), 5 deletions(-)
>> ----------------------------------------------------------------------
>>
>>
>>
>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/1261625b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>> ----------------------------------------------------------------------
>> diff --cc
>> examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>> index 886c4ba,0000000..f9acfd9
>> mode 100644,000000..100644
>> ---
>> a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>> +++
>> b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>> @@@ -1,169 -1,0 +1,170 @@@
>>   +/*
>>   + * Licensed to the Apache Software Foundation (ASF) under one or more
>>   + * contributor license agreements.  See the NOTICE file distributed with
>>   + * this work for additional information regarding copyright ownership.
>>   + * The ASF licenses this file to You under the Apache License, Version
>> 2.0
>>   + * (the "License"); you may not use this file except in compliance with
>>   + * the License.  You may obtain a copy of the License at
>>   + *
>>   + *     http://www.apache.org/licenses/LICENSE-2.0
>>   + *
>>   + * Unless required by applicable law or agreed to in writing, software
>>   + * distributed under the License is distributed on an "AS IS" BASIS,
>>   + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>> implied.
>>   + * See the License for the specific language governing permissions and
>>   + * limitations under the License.
>>   + */
>>   +package org.apache.accumulo.examples.simple.client;
>>   +
>>   +import java.util.HashMap;
>>   +import java.util.HashSet;
>>   +import java.util.Map.Entry;
>>   +import java.util.Random;
>>   +import java.util.Set;
>>   +
>>   +import org.apache.accumulo.core.cli.BatchWriterOpts;
>>   +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>>   +import org.apache.accumulo.core.client.AccumuloException;
>>   +import org.apache.accumulo.core.client.AccumuloSecurityException;
>>   +import org.apache.accumulo.core.client.BatchWriter;
>>   +import org.apache.accumulo.core.client.Connector;
>>   +import org.apache.accumulo.core.client.MutationsRejectedException;
>>   +import org.apache.accumulo.core.client.TableNotFoundException;
>>   +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>>   +import org.apache.accumulo.core.data.KeyExtent;
>>   +import org.apache.accumulo.core.data.Mutation;
>>   +import org.apache.accumulo.core.data.Value;
>>   +import org.apache.accumulo.core.security.ColumnVisibility;
>>   +import org.apache.hadoop.io.Text;
>>   +
>>   +import com.beust.jcommander.Parameter;
>>   +
>>   +/**
>>   + * Simple example for writing random data to Accumulo. See
>> docs/examples/README.batch for instructions.
>>   + *
>>   + * The rows of the entries will be randomly generated numbers between a
>> specified min and max (prefixed by "row_"). The column families will be
>> "foo" and column
>>   + * qualifiers will be "1". The values will be random byte arrays of a
>> specified size.
>>   + */
>>   +public class RandomBatchWriter {
>>   +
>>   +  /**
>>   +   * Creates a random byte array of specified size using the specified
>> seed.
>>   +   *
>>   +   * @param rowid
>>   +   *          the seed to use for the random number generator
>>   +   * @param dataSize
>>   +   *          the size of the array
>>   +   * @return a random byte array
>>   +   */
>>   +  public static byte[] createValue(long rowid, int dataSize) {
>>   +    Random r = new Random(rowid);
>>   +    byte value[] = new byte[dataSize];
>>   +
>>   +    r.nextBytes(value);
>>   +
>>   +    // transform to printable chars
>>   +    for (int j = 0; j < value.length; j++) {
>>   +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>>   +    }
>>   +
>>   +    return value;
>>   +  }
>>   +
>>   +  /**
>>   +   * Creates a mutation on a specified row with column family "foo",
>> column qualifier "1", specified visibility, and a random value of specified
>> size.
>>   +   *
>>   +   * @param rowid
>>   +   *          the row of the mutation
>>   +   * @param dataSize
>>   +   *          the size of the random value
>>   +   * @param visibility
>>   +   *          the visibility of the entry to insert
>>   +   * @return a mutation
>>   +   */
>>   +  public static Mutation createMutation(long rowid, int dataSize,
>> ColumnVisibility visibility) {
>>   +    Text row = new Text(String.format("row_%010d", rowid));
>>   +
>>   +    Mutation m = new Mutation(row);
>>   +
>>   +    // create a random value that is a function of the
>>   +    // row id for verification purposes
>>   +    byte value[] = createValue(rowid, dataSize);
>>   +
>>   +    m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
>>   +
>>   +    return m;
>>   +  }
>>   +
>>   +  static class Opts extends ClientOnRequiredTable {
>>   +    @Parameter(names="--num", required=true)
>>   +    int num = 0;
>>   +    @Parameter(names="--min")
>>   +    long min = 0;
>>   +    @Parameter(names="--max")
>>   +    long max = Long.MAX_VALUE;
>>   +    @Parameter(names="--size", required=true, description="size of the
>> value to write")
>>   +    int size = 0;
>>   +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>>   +    ColumnVisibility visiblity = new ColumnVisibility("");
>>   +    @Parameter(names="--seed", description="seed for pseudo-random
>> number generator")
>>   +    Long seed = null;
>>   +  }
>>   +
>>   +  /**
>>   +   * Writes a specified number of entries to Accumulo using a {@link
>> BatchWriter}.
>>   +   *
>>   +   * @throws AccumuloException
>>   +   * @throws AccumuloSecurityException
>>   +   * @throws TableNotFoundException
>>   +   */
>>   +  public static void main(String[] args) throws AccumuloException,
>> AccumuloSecurityException, TableNotFoundException {
>>   +    Opts opts = new Opts();
>>   +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>>   +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
>>   +
>>   +    Random r;
>>   +    if (opts.seed == null)
>>   +      r = new Random();
>>   +    else {
>>   +      r = new Random(opts.seed);
>>   +    }
>>   +
>>   +    Connector connector = opts.getConnector();
>>   +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
>> bwOpts.getBatchWriterConfig());
>>   +
>>   +    // reuse the ColumnVisibility object to improve performance
>>   +    ColumnVisibility cv = opts.visiblity;
>>   +
>>   +    for (int i = 0; i < opts.num; i++) {
>>   +
>>   +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
>> opts.min;
>>   +
>>   +      Mutation m = createMutation(rowid, opts.size, cv);
>>   +
>>   +      bw.addMutation(m);
>>   +
>>   +    }
>>   +
>>   +    try {
>>   +      bw.close();
>>   +    } catch (MutationsRejectedException e) {
>>   +      if (e.getAuthorizationFailuresMap().size() > 0) {
>>   +        HashMap<String,Set<SecurityErrorCode>> tables = new
>> HashMap<String,Set<SecurityErrorCode>>();
>>   +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
>> e.getAuthorizationFailuresMap().entrySet()) {
>>   +          Set<SecurityErrorCode> secCodes =
>> tables.get(ke.getKey().getTableId().toString());
>>   +          if (secCodes == null) {
>>   +            secCodes = new HashSet<SecurityErrorCode>();
>>   +            tables.put(ke.getKey().getTableId().toString(), secCodes);
>>   +          }
>>   +          secCodes.addAll(ke.getValue());
>>   +        }
>>   +        System.err.println("ERROR : Not authorized to write to tables :
>> " + tables);
>>   +      }
>>   +
>>   +      if (e.getConstraintViolationSummaries().size() > 0) {
>>   +        System.err.println("ERROR : Constraint violations occurred : " +
>> e.getConstraintViolationSummaries());
>>   +      }
>> ++      System.exit(1);
>>   +    }
>>   +  }
>>   +}
>>
>>
>> http://git-wip-us.apache.org/repos/asf/accumulo/blob/1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
>> ----------------------------------------------------------------------
>> diff --cc
>> minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
>> index a579397,0000000..a1f58f6
>> mode 100644,000000..100644
>> ---
>> a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
>> +++
>> b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
>> @@@ -1,129 -1,0 +1,150 @@@
>>   +/*
>>   + * Licensed to the Apache Software Foundation (ASF) under one or more
>>   + * contributor license agreements.  See the NOTICE file distributed with
>>   + * this work for additional information regarding copyright ownership.
>>   + * The ASF licenses this file to You under the Apache License, Version
>> 2.0
>>   + * (the "License"); you may not use this file except in compliance with
>>   + * the License.  You may obtain a copy of the License at
>>   + *
>>   + *     http://www.apache.org/licenses/LICENSE-2.0
>>   + *
>>   + * Unless required by applicable law or agreed to in writing, software
>>   + * distributed under the License is distributed on an "AS IS" BASIS,
>>   + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
>> implied.
>>   + * See the License for the specific language governing permissions and
>>   + * limitations under the License.
>>   + */
>>   +package org.apache.accumulo.minicluster;
>>   +
>>   +import java.io.File;
>>   +import java.util.Map;
>>   +
>>   +import org.apache.accumulo.core.client.BatchWriter;
>>   +import org.apache.accumulo.core.client.BatchWriterConfig;
>>   +import org.apache.accumulo.core.client.Connector;
>>   +import org.apache.accumulo.core.client.ZooKeeperInstance;
>>   +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
>>   +import org.apache.accumulo.core.conf.Property;
>>   +import org.apache.accumulo.core.data.Mutation;
>>   +import org.apache.accumulo.server.util.PortUtils;
>>   +import org.apache.commons.io.FileUtils;
>>   +import org.apache.commons.io.filefilter.SuffixFileFilter;
>>   +import org.apache.commons.io.filefilter.TrueFileFilter;
>>   +import org.apache.log4j.Level;
>>   +import org.apache.log4j.Logger;
>> - import org.junit.AfterClass;
>>   +import org.junit.Assert;
>> - import org.junit.BeforeClass;
>> ++import org.junit.Ignore;
>>   +import org.junit.Test;
>>   +import org.junit.rules.TemporaryFolder;
>>   +
>>   +import com.google.common.collect.ImmutableMap;
>> ++import com.google.common.io.Files;
>>   +
>>   +/**
>>   + *
>>   + */
>>   +public class MiniAccumuloClusterGCTest {
>>   +
>> ++  @Test
>> ++  public void testGcConfig() throws Exception {
>> ++    File f = Files.createTempDir();
>> ++    f.deleteOnExit();
>> ++    try {
>> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
>> ++      macConfig.setNumTservers(1);
>> ++
>> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
>> ++
>> ++      // Turn on the garbage collector
>> ++      macConfig.runGC(true);
>> ++
>> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
>> ++    } finally {
>> ++      if (null != f && f.exists()) {
>> ++        f.delete();
>> ++      }
>> ++    }
>> ++  }
>> ++
>> ++
>>   +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>>   +  private static MiniAccumuloConfig macConfig;
>>   +  private static MiniAccumuloCluster accumulo;
>>   +  private static final String passwd = "password";
>>   +
>> -   @BeforeClass
>>   +  public static void setupMiniCluster() throws Exception {
>>   +    tmpDir.create();
>>   +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
>>   +
>>   +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>>   +    macConfig.setNumTservers(1);
>>   +
>>   +    // Turn on the garbage collector
>>   +    macConfig.runGC(true);
>>   +
>>   +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
>>   +
>>   +    // And tweak the settings to make it run often
>>   +    Map<String,String> config =
>> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
>> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(), gcPort);
>>   +    macConfig.setSiteConfig(config);
>>   +
>>   +    accumulo = new MiniAccumuloCluster(macConfig);
>>   +    accumulo.start();
>>   +  }
>>   +
>> -   @AfterClass
>>   +  public static void tearDownMiniCluster() throws Exception {
>>   +    accumulo.stop();
>>   +    tmpDir.delete();
>>   +  }
>>   +
>> -   @Test(timeout = 20000)
>> ++  // This test seems to be a little too unstable for a unit test
>> ++  @Ignore
>>   +  public void test() throws Exception {
>>   +    ZooKeeperInstance inst = new
>> ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
>>   +    Connector c = inst.getConnector("root", new PasswordToken(passwd));
>>   +
>>   +    final String table = "foobar";
>>   +    c.tableOperations().create(table);
>>   +
>>   +    BatchWriter bw = null;
>>   +
>>   +    // Add some data
>>   +    try {
>>   +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>>   +      Mutation m = new Mutation("a");
>>   +      for (int i = 0; i < 50; i++) {
>>   +        m.put("colf", Integer.toString(i), "");
>>   +      }
>>   +
>>   +      bw.addMutation(m);
>>   +    } finally {
>>   +      if (null != bw) {
>>   +        bw.close();
>>   +      }
>>   +    }
>>   +
>>   +    final boolean flush = true, wait = true;
>>   +
>>   +    // Compact the tables to get some rfiles which we can gc
>>   +    c.tableOperations().compact(table, null, null, flush, wait);
>>   +    c.tableOperations().compact("!METADATA", null, null, flush, wait);
>>   +
>>   +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(),
>> "accumulo");
>>   +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
>>   +
>>   +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>   +
>>   +    // Sleep for 4s to let the GC do its thing
>>   +    for (int i = 1; i < 5; i++) {
>>   +      Thread.sleep(1000);
>>   +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
>> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>>   +
>>   +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>>   +        return;
>>   +      }
>>   +    }
>>   +
>>   +    Assert.fail("Expected to find less files after compaction and pause
>> for GC");
>>   +  }
>>   +
>>   +}
>>
>>
>

Re: [4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by Keith Turner <ke...@deenlo.com>.
Josh,

The conflict from the merge was not resolved correctly.  I was working on
resolving this conflict but you pushed before I did.   I am really curious
what happened, I want to make sure we are not dropping important changes
when resolving conflicts during merge.  When merging 1.4 to 1.5 I saw the
following conflict.   Did you see this conflict?

<<<<<<<
HEAD:examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
    for (int i = 0; i < opts.num; i++) {

      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
opts.min;
=======
    // Generate num unique row ids in the given range
    HashSet<Long> rowids = new HashSet<Long>(num);
    while (rowids.size() < num) {
      rowids.add((Math.abs(r.nextLong()) % (max - min)) + min);
    }
    for (long rowid : rowids) {
>>>>>>>
origin/1.4.5-SNAPSHOT:src/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java

Keith



On Fri, Nov 15, 2013 at 2:43 PM, <el...@apache.org> wrote:

> Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT
>
> Conflicts:
>
> examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
>
>
> Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
> Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
> Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
> Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b
>
> Branch: refs/heads/1.5.1-SNAPSHOT
> Commit: 1261625b30691d57473efed313f3baf841d791e6
> Parents: ac20fe0 a40a6d4
> Author: Josh Elser <el...@apache.org>
> Authored: Fri Nov 15 11:43:10 2013 -0800
> Committer: Josh Elser <el...@apache.org>
> Committed: Fri Nov 15 11:43:10 2013 -0800
>
> ----------------------------------------------------------------------
>  .../simple/client/RandomBatchWriter.java        |  1 +
>  .../minicluster/MiniAccumuloClusterGCTest.java  | 31 ++++++++++++++++----
>  2 files changed, 27 insertions(+), 5 deletions(-)
> ----------------------------------------------------------------------
>
>
>
> http://git-wip-us.apache.org/repos/asf/accumulo/blob/1261625b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
> ----------------------------------------------------------------------
> diff --cc
> examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
> index 886c4ba,0000000..f9acfd9
> mode 100644,000000..100644
> ---
> a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
> +++
> b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
> @@@ -1,169 -1,0 +1,170 @@@
>  +/*
>  + * Licensed to the Apache Software Foundation (ASF) under one or more
>  + * contributor license agreements.  See the NOTICE file distributed with
>  + * this work for additional information regarding copyright ownership.
>  + * The ASF licenses this file to You under the Apache License, Version
> 2.0
>  + * (the "License"); you may not use this file except in compliance with
>  + * the License.  You may obtain a copy of the License at
>  + *
>  + *     http://www.apache.org/licenses/LICENSE-2.0
>  + *
>  + * Unless required by applicable law or agreed to in writing, software
>  + * distributed under the License is distributed on an "AS IS" BASIS,
>  + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
> implied.
>  + * See the License for the specific language governing permissions and
>  + * limitations under the License.
>  + */
>  +package org.apache.accumulo.examples.simple.client;
>  +
>  +import java.util.HashMap;
>  +import java.util.HashSet;
>  +import java.util.Map.Entry;
>  +import java.util.Random;
>  +import java.util.Set;
>  +
>  +import org.apache.accumulo.core.cli.BatchWriterOpts;
>  +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
>  +import org.apache.accumulo.core.client.AccumuloException;
>  +import org.apache.accumulo.core.client.AccumuloSecurityException;
>  +import org.apache.accumulo.core.client.BatchWriter;
>  +import org.apache.accumulo.core.client.Connector;
>  +import org.apache.accumulo.core.client.MutationsRejectedException;
>  +import org.apache.accumulo.core.client.TableNotFoundException;
>  +import org.apache.accumulo.core.client.security.SecurityErrorCode;
>  +import org.apache.accumulo.core.data.KeyExtent;
>  +import org.apache.accumulo.core.data.Mutation;
>  +import org.apache.accumulo.core.data.Value;
>  +import org.apache.accumulo.core.security.ColumnVisibility;
>  +import org.apache.hadoop.io.Text;
>  +
>  +import com.beust.jcommander.Parameter;
>  +
>  +/**
>  + * Simple example for writing random data to Accumulo. See
> docs/examples/README.batch for instructions.
>  + *
>  + * The rows of the entries will be randomly generated numbers between a
> specified min and max (prefixed by "row_"). The column families will be
> "foo" and column
>  + * qualifiers will be "1". The values will be random byte arrays of a
> specified size.
>  + */
>  +public class RandomBatchWriter {
>  +
>  +  /**
>  +   * Creates a random byte array of specified size using the specified
> seed.
>  +   *
>  +   * @param rowid
>  +   *          the seed to use for the random number generator
>  +   * @param dataSize
>  +   *          the size of the array
>  +   * @return a random byte array
>  +   */
>  +  public static byte[] createValue(long rowid, int dataSize) {
>  +    Random r = new Random(rowid);
>  +    byte value[] = new byte[dataSize];
>  +
>  +    r.nextBytes(value);
>  +
>  +    // transform to printable chars
>  +    for (int j = 0; j < value.length; j++) {
>  +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
>  +    }
>  +
>  +    return value;
>  +  }
>  +
>  +  /**
>  +   * Creates a mutation on a specified row with column family "foo",
> column qualifier "1", specified visibility, and a random value of specified
> size.
>  +   *
>  +   * @param rowid
>  +   *          the row of the mutation
>  +   * @param dataSize
>  +   *          the size of the random value
>  +   * @param visibility
>  +   *          the visibility of the entry to insert
>  +   * @return a mutation
>  +   */
>  +  public static Mutation createMutation(long rowid, int dataSize,
> ColumnVisibility visibility) {
>  +    Text row = new Text(String.format("row_%010d", rowid));
>  +
>  +    Mutation m = new Mutation(row);
>  +
>  +    // create a random value that is a function of the
>  +    // row id for verification purposes
>  +    byte value[] = createValue(rowid, dataSize);
>  +
>  +    m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
>  +
>  +    return m;
>  +  }
>  +
>  +  static class Opts extends ClientOnRequiredTable {
>  +    @Parameter(names="--num", required=true)
>  +    int num = 0;
>  +    @Parameter(names="--min")
>  +    long min = 0;
>  +    @Parameter(names="--max")
>  +    long max = Long.MAX_VALUE;
>  +    @Parameter(names="--size", required=true, description="size of the
> value to write")
>  +    int size = 0;
>  +    @Parameter(names="--vis", converter=VisibilityConverter.class)
>  +    ColumnVisibility visiblity = new ColumnVisibility("");
>  +    @Parameter(names="--seed", description="seed for pseudo-random
> number generator")
>  +    Long seed = null;
>  +  }
>  +
>  +  /**
>  +   * Writes a specified number of entries to Accumulo using a {@link
> BatchWriter}.
>  +   *
>  +   * @throws AccumuloException
>  +   * @throws AccumuloSecurityException
>  +   * @throws TableNotFoundException
>  +   */
>  +  public static void main(String[] args) throws AccumuloException,
> AccumuloSecurityException, TableNotFoundException {
>  +    Opts opts = new Opts();
>  +    BatchWriterOpts bwOpts = new BatchWriterOpts();
>  +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
>  +
>  +    Random r;
>  +    if (opts.seed == null)
>  +      r = new Random();
>  +    else {
>  +      r = new Random(opts.seed);
>  +    }
>  +
>  +    Connector connector = opts.getConnector();
>  +    BatchWriter bw = connector.createBatchWriter(opts.tableName,
> bwOpts.getBatchWriterConfig());
>  +
>  +    // reuse the ColumnVisibility object to improve performance
>  +    ColumnVisibility cv = opts.visiblity;
>  +
>  +    for (int i = 0; i < opts.num; i++) {
>  +
>  +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) +
> opts.min;
>  +
>  +      Mutation m = createMutation(rowid, opts.size, cv);
>  +
>  +      bw.addMutation(m);
>  +
>  +    }
>  +
>  +    try {
>  +      bw.close();
>  +    } catch (MutationsRejectedException e) {
>  +      if (e.getAuthorizationFailuresMap().size() > 0) {
>  +        HashMap<String,Set<SecurityErrorCode>> tables = new
> HashMap<String,Set<SecurityErrorCode>>();
>  +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke :
> e.getAuthorizationFailuresMap().entrySet()) {
>  +          Set<SecurityErrorCode> secCodes =
> tables.get(ke.getKey().getTableId().toString());
>  +          if (secCodes == null) {
>  +            secCodes = new HashSet<SecurityErrorCode>();
>  +            tables.put(ke.getKey().getTableId().toString(), secCodes);
>  +          }
>  +          secCodes.addAll(ke.getValue());
>  +        }
>  +        System.err.println("ERROR : Not authorized to write to tables :
> " + tables);
>  +      }
>  +
>  +      if (e.getConstraintViolationSummaries().size() > 0) {
>  +        System.err.println("ERROR : Constraint violations occurred : " +
> e.getConstraintViolationSummaries());
>  +      }
> ++      System.exit(1);
>  +    }
>  +  }
>  +}
>
>
> http://git-wip-us.apache.org/repos/asf/accumulo/blob/1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
> ----------------------------------------------------------------------
> diff --cc
> minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
> index a579397,0000000..a1f58f6
> mode 100644,000000..100644
> ---
> a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
> +++
> b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
> @@@ -1,129 -1,0 +1,150 @@@
>  +/*
>  + * Licensed to the Apache Software Foundation (ASF) under one or more
>  + * contributor license agreements.  See the NOTICE file distributed with
>  + * this work for additional information regarding copyright ownership.
>  + * The ASF licenses this file to You under the Apache License, Version
> 2.0
>  + * (the "License"); you may not use this file except in compliance with
>  + * the License.  You may obtain a copy of the License at
>  + *
>  + *     http://www.apache.org/licenses/LICENSE-2.0
>  + *
>  + * Unless required by applicable law or agreed to in writing, software
>  + * distributed under the License is distributed on an "AS IS" BASIS,
>  + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
> implied.
>  + * See the License for the specific language governing permissions and
>  + * limitations under the License.
>  + */
>  +package org.apache.accumulo.minicluster;
>  +
>  +import java.io.File;
>  +import java.util.Map;
>  +
>  +import org.apache.accumulo.core.client.BatchWriter;
>  +import org.apache.accumulo.core.client.BatchWriterConfig;
>  +import org.apache.accumulo.core.client.Connector;
>  +import org.apache.accumulo.core.client.ZooKeeperInstance;
>  +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
>  +import org.apache.accumulo.core.conf.Property;
>  +import org.apache.accumulo.core.data.Mutation;
>  +import org.apache.accumulo.server.util.PortUtils;
>  +import org.apache.commons.io.FileUtils;
>  +import org.apache.commons.io.filefilter.SuffixFileFilter;
>  +import org.apache.commons.io.filefilter.TrueFileFilter;
>  +import org.apache.log4j.Level;
>  +import org.apache.log4j.Logger;
> - import org.junit.AfterClass;
>  +import org.junit.Assert;
> - import org.junit.BeforeClass;
> ++import org.junit.Ignore;
>  +import org.junit.Test;
>  +import org.junit.rules.TemporaryFolder;
>  +
>  +import com.google.common.collect.ImmutableMap;
> ++import com.google.common.io.Files;
>  +
>  +/**
>  + *
>  + */
>  +public class MiniAccumuloClusterGCTest {
>  +
> ++  @Test
> ++  public void testGcConfig() throws Exception {
> ++    File f = Files.createTempDir();
> ++    f.deleteOnExit();
> ++    try {
> ++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
> ++      macConfig.setNumTservers(1);
> ++
> ++      Assert.assertEquals(false, macConfig.shouldRunGC());
> ++
> ++      // Turn on the garbage collector
> ++      macConfig.runGC(true);
> ++
> ++      Assert.assertEquals(true, macConfig.shouldRunGC());
> ++    } finally {
> ++      if (null != f && f.exists()) {
> ++        f.delete();
> ++      }
> ++    }
> ++  }
> ++
> ++
>  +  private static TemporaryFolder tmpDir = new TemporaryFolder();
>  +  private static MiniAccumuloConfig macConfig;
>  +  private static MiniAccumuloCluster accumulo;
>  +  private static final String passwd = "password";
>  +
> -   @BeforeClass
>  +  public static void setupMiniCluster() throws Exception {
>  +    tmpDir.create();
>  +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
>  +
>  +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
>  +    macConfig.setNumTservers(1);
>  +
>  +    // Turn on the garbage collector
>  +    macConfig.runGC(true);
>  +
>  +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
>  +
>  +    // And tweak the settings to make it run often
>  +    Map<String,String> config =
> ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s",
> Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(), gcPort);
>  +    macConfig.setSiteConfig(config);
>  +
>  +    accumulo = new MiniAccumuloCluster(macConfig);
>  +    accumulo.start();
>  +  }
>  +
> -   @AfterClass
>  +  public static void tearDownMiniCluster() throws Exception {
>  +    accumulo.stop();
>  +    tmpDir.delete();
>  +  }
>  +
> -   @Test(timeout = 20000)
> ++  // This test seems to be a little too unstable for a unit test
> ++  @Ignore
>  +  public void test() throws Exception {
>  +    ZooKeeperInstance inst = new
> ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
>  +    Connector c = inst.getConnector("root", new PasswordToken(passwd));
>  +
>  +    final String table = "foobar";
>  +    c.tableOperations().create(table);
>  +
>  +    BatchWriter bw = null;
>  +
>  +    // Add some data
>  +    try {
>  +      bw = c.createBatchWriter(table, new BatchWriterConfig());
>  +      Mutation m = new Mutation("a");
>  +      for (int i = 0; i < 50; i++) {
>  +        m.put("colf", Integer.toString(i), "");
>  +      }
>  +
>  +      bw.addMutation(m);
>  +    } finally {
>  +      if (null != bw) {
>  +        bw.close();
>  +      }
>  +    }
>  +
>  +    final boolean flush = true, wait = true;
>  +
>  +    // Compact the tables to get some rfiles which we can gc
>  +    c.tableOperations().compact(table, null, null, flush, wait);
>  +    c.tableOperations().compact("!METADATA", null, null, flush, wait);
>  +
>  +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(),
> "accumulo");
>  +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
>  +
>  +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new
> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>  +
>  +    // Sleep for 4s to let the GC do its thing
>  +    for (int i = 1; i < 5; i++) {
>  +      Thread.sleep(1000);
>  +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new
> SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
>  +
>  +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
>  +        return;
>  +      }
>  +    }
>  +
>  +    Assert.fail("Expected to find less files after compaction and pause
> for GC");
>  +  }
>  +
>  +}
>
>

[4/4] git commit: Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Posted by el...@apache.org.
Merge branch '1.4.5-SNAPSHOT' into 1.5.1-SNAPSHOT

Conflicts:
	examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/1261625b
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/1261625b
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/1261625b

Branch: refs/heads/1.5.1-SNAPSHOT
Commit: 1261625b30691d57473efed313f3baf841d791e6
Parents: ac20fe0 a40a6d4
Author: Josh Elser <el...@apache.org>
Authored: Fri Nov 15 11:43:10 2013 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Fri Nov 15 11:43:10 2013 -0800

----------------------------------------------------------------------
 .../simple/client/RandomBatchWriter.java        |  1 +
 .../minicluster/MiniAccumuloClusterGCTest.java  | 31 ++++++++++++++++----
 2 files changed, 27 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/1261625b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
----------------------------------------------------------------------
diff --cc examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
index 886c4ba,0000000..f9acfd9
mode 100644,000000..100644
--- a/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
+++ b/examples/simple/src/main/java/org/apache/accumulo/examples/simple/client/RandomBatchWriter.java
@@@ -1,169 -1,0 +1,170 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.examples.simple.client;
 +
 +import java.util.HashMap;
 +import java.util.HashSet;
 +import java.util.Map.Entry;
 +import java.util.Random;
 +import java.util.Set;
 +
 +import org.apache.accumulo.core.cli.BatchWriterOpts;
 +import org.apache.accumulo.core.cli.ClientOnRequiredTable;
 +import org.apache.accumulo.core.client.AccumuloException;
 +import org.apache.accumulo.core.client.AccumuloSecurityException;
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.MutationsRejectedException;
 +import org.apache.accumulo.core.client.TableNotFoundException;
 +import org.apache.accumulo.core.client.security.SecurityErrorCode;
 +import org.apache.accumulo.core.data.KeyExtent;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.security.ColumnVisibility;
 +import org.apache.hadoop.io.Text;
 +
 +import com.beust.jcommander.Parameter;
 +
 +/**
 + * Simple example for writing random data to Accumulo. See docs/examples/README.batch for instructions.
 + * 
 + * The rows of the entries will be randomly generated numbers between a specified min and max (prefixed by "row_"). The column families will be "foo" and column
 + * qualifiers will be "1". The values will be random byte arrays of a specified size.
 + */
 +public class RandomBatchWriter {
 +  
 +  /**
 +   * Creates a random byte array of specified size using the specified seed.
 +   * 
 +   * @param rowid
 +   *          the seed to use for the random number generator
 +   * @param dataSize
 +   *          the size of the array
 +   * @return a random byte array
 +   */
 +  public static byte[] createValue(long rowid, int dataSize) {
 +    Random r = new Random(rowid);
 +    byte value[] = new byte[dataSize];
 +    
 +    r.nextBytes(value);
 +    
 +    // transform to printable chars
 +    for (int j = 0; j < value.length; j++) {
 +      value[j] = (byte) (((0xff & value[j]) % 92) + ' ');
 +    }
 +    
 +    return value;
 +  }
 +  
 +  /**
 +   * Creates a mutation on a specified row with column family "foo", column qualifier "1", specified visibility, and a random value of specified size.
 +   * 
 +   * @param rowid
 +   *          the row of the mutation
 +   * @param dataSize
 +   *          the size of the random value
 +   * @param visibility
 +   *          the visibility of the entry to insert
 +   * @return a mutation
 +   */
 +  public static Mutation createMutation(long rowid, int dataSize, ColumnVisibility visibility) {
 +    Text row = new Text(String.format("row_%010d", rowid));
 +    
 +    Mutation m = new Mutation(row);
 +    
 +    // create a random value that is a function of the
 +    // row id for verification purposes
 +    byte value[] = createValue(rowid, dataSize);
 +    
 +    m.put(new Text("foo"), new Text("1"), visibility, new Value(value));
 +    
 +    return m;
 +  }
 +  
 +  static class Opts extends ClientOnRequiredTable {
 +    @Parameter(names="--num", required=true)
 +    int num = 0;
 +    @Parameter(names="--min")
 +    long min = 0;
 +    @Parameter(names="--max")
 +    long max = Long.MAX_VALUE;
 +    @Parameter(names="--size", required=true, description="size of the value to write")
 +    int size = 0;
 +    @Parameter(names="--vis", converter=VisibilityConverter.class)
 +    ColumnVisibility visiblity = new ColumnVisibility("");
 +    @Parameter(names="--seed", description="seed for pseudo-random number generator")
 +    Long seed = null;
 +  }
 + 
 +  /**
 +   * Writes a specified number of entries to Accumulo using a {@link BatchWriter}.
 +   * 
 +   * @throws AccumuloException
 +   * @throws AccumuloSecurityException
 +   * @throws TableNotFoundException
 +   */
 +  public static void main(String[] args) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
 +    Opts opts = new Opts();
 +    BatchWriterOpts bwOpts = new BatchWriterOpts();
 +    opts.parseArgs(RandomBatchWriter.class.getName(), args, bwOpts);
 +    
 +    Random r;
 +    if (opts.seed == null)
 +      r = new Random();
 +    else {
 +      r = new Random(opts.seed);
 +    }
 +    
 +    Connector connector = opts.getConnector();
 +    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
 +    
 +    // reuse the ColumnVisibility object to improve performance
 +    ColumnVisibility cv = opts.visiblity;
 +    
 +    for (int i = 0; i < opts.num; i++) {
 +      
 +      long rowid = (Math.abs(r.nextLong()) % (opts.max - opts.min)) + opts.min;
 +      
 +      Mutation m = createMutation(rowid, opts.size, cv);
 +      
 +      bw.addMutation(m);
 +      
 +    }
 +    
 +    try {
 +      bw.close();
 +    } catch (MutationsRejectedException e) {
 +      if (e.getAuthorizationFailuresMap().size() > 0) {
 +        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<String,Set<SecurityErrorCode>>();
 +        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke : e.getAuthorizationFailuresMap().entrySet()) {
 +          Set<SecurityErrorCode> secCodes = tables.get(ke.getKey().getTableId().toString());
 +          if (secCodes == null) {
 +            secCodes = new HashSet<SecurityErrorCode>();
 +            tables.put(ke.getKey().getTableId().toString(), secCodes);
 +          }
 +          secCodes.addAll(ke.getValue());
 +        }
 +        System.err.println("ERROR : Not authorized to write to tables : " + tables);
 +      }
 +      
 +      if (e.getConstraintViolationSummaries().size() > 0) {
 +        System.err.println("ERROR : Constraint violations occurred : " + e.getConstraintViolationSummaries());
 +      }
++      System.exit(1);
 +    }
 +  }
 +}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/1261625b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
----------------------------------------------------------------------
diff --cc minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
index a579397,0000000..a1f58f6
mode 100644,000000..100644
--- a/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
+++ b/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
@@@ -1,129 -1,0 +1,150 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one or more
 + * contributor license agreements.  See the NOTICE file distributed with
 + * this work for additional information regarding copyright ownership.
 + * The ASF licenses this file to You under the Apache License, Version 2.0
 + * (the "License"); you may not use this file except in compliance with
 + * the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.accumulo.minicluster;
 +
 +import java.io.File;
 +import java.util.Map;
 +
 +import org.apache.accumulo.core.client.BatchWriter;
 +import org.apache.accumulo.core.client.BatchWriterConfig;
 +import org.apache.accumulo.core.client.Connector;
 +import org.apache.accumulo.core.client.ZooKeeperInstance;
 +import org.apache.accumulo.core.client.security.tokens.PasswordToken;
 +import org.apache.accumulo.core.conf.Property;
 +import org.apache.accumulo.core.data.Mutation;
 +import org.apache.accumulo.server.util.PortUtils;
 +import org.apache.commons.io.FileUtils;
 +import org.apache.commons.io.filefilter.SuffixFileFilter;
 +import org.apache.commons.io.filefilter.TrueFileFilter;
 +import org.apache.log4j.Level;
 +import org.apache.log4j.Logger;
- import org.junit.AfterClass;
 +import org.junit.Assert;
- import org.junit.BeforeClass;
++import org.junit.Ignore;
 +import org.junit.Test;
 +import org.junit.rules.TemporaryFolder;
 +
 +import com.google.common.collect.ImmutableMap;
++import com.google.common.io.Files;
 +
 +/**
 + * 
 + */
 +public class MiniAccumuloClusterGCTest {
 +  
++  @Test
++  public void testGcConfig() throws Exception {
++    File f = Files.createTempDir();
++    f.deleteOnExit();
++    try {
++      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
++      macConfig.setNumTservers(1);
++  
++      Assert.assertEquals(false, macConfig.shouldRunGC());
++      
++      // Turn on the garbage collector
++      macConfig.runGC(true);
++  
++      Assert.assertEquals(true, macConfig.shouldRunGC());
++    } finally {
++      if (null != f && f.exists()) {
++        f.delete();
++      }
++    }
++  }
++
++  
 +  private static TemporaryFolder tmpDir = new TemporaryFolder();
 +  private static MiniAccumuloConfig macConfig;
 +  private static MiniAccumuloCluster accumulo;
 +  private static final String passwd = "password";
 +  
-   @BeforeClass
 +  public static void setupMiniCluster() throws Exception {
 +    tmpDir.create();
 +    Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
 +    
 +    macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
 +    macConfig.setNumTservers(1);
 +    
 +    // Turn on the garbage collector
 +    macConfig.runGC(true);
 +    
 +    String gcPort = Integer.toString(PortUtils.getRandomFreePort());
 +    
 +    // And tweak the settings to make it run often
 +    Map<String,String> config = ImmutableMap.of(Property.GC_CYCLE_DELAY.getKey(), "1s", Property.GC_CYCLE_START.getKey(), "0s", Property.GC_PORT.getKey(), gcPort);
 +    macConfig.setSiteConfig(config);
 +    
 +    accumulo = new MiniAccumuloCluster(macConfig);
 +    accumulo.start();
 +  }
 +  
-   @AfterClass
 +  public static void tearDownMiniCluster() throws Exception {
 +    accumulo.stop();
 +    tmpDir.delete();
 +  }
 +  
-   @Test(timeout = 20000)
++  // This test seems to be a little too unstable for a unit test
++  @Ignore
 +  public void test() throws Exception {
 +    ZooKeeperInstance inst = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
 +    Connector c = inst.getConnector("root", new PasswordToken(passwd));
 +    
 +    final String table = "foobar";
 +    c.tableOperations().create(table);
 +    
 +    BatchWriter bw = null;
 +    
 +    // Add some data
 +    try {
 +      bw = c.createBatchWriter(table, new BatchWriterConfig());
 +      Mutation m = new Mutation("a");
 +      for (int i = 0; i < 50; i++) {
 +        m.put("colf", Integer.toString(i), "");
 +      }
 +      
 +      bw.addMutation(m);
 +    } finally {
 +      if (null != bw) {
 +        bw.close();
 +      }
 +    }
 +    
 +    final boolean flush = true, wait = true;
 +    
 +    // Compact the tables to get some rfiles which we can gc
 +    c.tableOperations().compact(table, null, null, flush, wait);
 +    c.tableOperations().compact("!METADATA", null, null, flush, wait);
 +    
 +    File accumuloDir = new File(tmpDir.getRoot().getAbsolutePath(), "accumulo");
 +    File tables = new File(accumuloDir.getAbsolutePath(), "tables");
 +    
 +    int fileCountAfterCompaction = FileUtils.listFiles(tables, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
 +    
 +    // Sleep for 4s to let the GC do its thing
 +    for (int i = 1; i < 5; i++) {
 +      Thread.sleep(1000);
 +      int fileCountAfterGCWait = FileUtils.listFiles(tables, new SuffixFileFilter(".rf"), TrueFileFilter.TRUE).size();
 +
 +      if (fileCountAfterGCWait < fileCountAfterCompaction) {
 +        return;
 +      }
 +    }
 +    
 +    Assert.fail("Expected to find less files after compaction and pause for GC");
 +  }
 +  
 +}


[2/4] git commit: ACCUMULO-1890 Clean up the test to avoid spinning up a MAC

Posted by el...@apache.org.
ACCUMULO-1890 Clean up the test to avoid spinning up a MAC


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/7e23cf28
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/7e23cf28
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/7e23cf28

Branch: refs/heads/1.5.1-SNAPSHOT
Commit: 7e23cf2854387a3b27d460a174e12eb0f40bfe0d
Parents: 4119611
Author: Josh Elser <el...@apache.org>
Authored: Fri Nov 15 11:26:30 2013 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Fri Nov 15 11:30:31 2013 -0800

----------------------------------------------------------------------
 .../minicluster/MiniAccumuloClusterGCTest.java  | 23 +++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/7e23cf28/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
----------------------------------------------------------------------
diff --git a/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java b/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
index b73523f..d58eb47 100644
--- a/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
+++ b/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
@@ -30,9 +30,8 @@ import org.apache.commons.io.filefilter.SuffixFileFilter;
 import org.apache.commons.io.filefilter.TrueFileFilter;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
@@ -43,12 +42,26 @@ import com.google.common.collect.ImmutableMap;
  */
 public class MiniAccumuloClusterGCTest {
   
+  @Test
+  public void testGcConfig() throws Exception {
+
+    MiniAccumuloConfig macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
+    macConfig.setNumTservers(1);
+
+    Assert.assertEquals(false, macConfig.shouldRunGC());
+    
+    // Turn on the garbage collector
+    macConfig.runGC(true);
+
+    Assert.assertEquals(true, macConfig.shouldRunGC());
+  }
+
+  
   private static TemporaryFolder tmpDir = new TemporaryFolder();
   private static MiniAccumuloConfig macConfig;
   private static MiniAccumuloCluster accumulo;
   private static final String passwd = "password";
   
-  @BeforeClass
   public static void setupMiniCluster() throws Exception {
     tmpDir.create();
     Logger.getLogger("org.apache.zookeeper").setLevel(Level.ERROR);
@@ -69,13 +82,13 @@ public class MiniAccumuloClusterGCTest {
     accumulo.start();
   }
   
-  @AfterClass
   public static void tearDownMiniCluster() throws Exception {
     accumulo.stop();
     tmpDir.delete();
   }
   
-  @Test(timeout = 20000)
+  // This test seems to be a little too unstable for a unit test
+  @Ignore
   public void test() throws Exception {
     ZooKeeperInstance inst = new ZooKeeperInstance(accumulo.getInstanceName(), accumulo.getZooKeepers());
     Connector c = inst.getConnector("root", passwd);


[3/4] git commit: ACCUMULO-1890 Forgot to re-add changes before commit

Posted by el...@apache.org.
ACCUMULO-1890 Forgot to re-add changes before commit


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/a40a6d42
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/a40a6d42
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/a40a6d42

Branch: refs/heads/1.5.1-SNAPSHOT
Commit: a40a6d423e42da45193a4ecf480a3dcbe43b88a8
Parents: 7e23cf2
Author: Josh Elser <el...@apache.org>
Authored: Fri Nov 15 11:37:33 2013 -0800
Committer: Josh Elser <el...@apache.org>
Committed: Fri Nov 15 11:37:33 2013 -0800

----------------------------------------------------------------------
 .../minicluster/MiniAccumuloClusterGCTest.java  | 28 +++++++++++++-------
 1 file changed, 18 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/a40a6d42/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
----------------------------------------------------------------------
diff --git a/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java b/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
index d58eb47..c32e719 100644
--- a/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
+++ b/src/minicluster/src/test/java/org/apache/accumulo/minicluster/MiniAccumuloClusterGCTest.java
@@ -36,6 +36,7 @@ import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
 
 import com.google.common.collect.ImmutableMap;
+import com.google.common.io.Files;
 
 /**
  * 
@@ -44,16 +45,23 @@ public class MiniAccumuloClusterGCTest {
   
   @Test
   public void testGcConfig() throws Exception {
-
-    MiniAccumuloConfig macConfig = new MiniAccumuloConfig(tmpDir.getRoot(), passwd);
-    macConfig.setNumTservers(1);
-
-    Assert.assertEquals(false, macConfig.shouldRunGC());
-    
-    // Turn on the garbage collector
-    macConfig.runGC(true);
-
-    Assert.assertEquals(true, macConfig.shouldRunGC());
+    File f = Files.createTempDir();
+    f.deleteOnExit();
+    try {
+      MiniAccumuloConfig macConfig = new MiniAccumuloConfig(f, passwd);
+      macConfig.setNumTservers(1);
+  
+      Assert.assertEquals(false, macConfig.shouldRunGC());
+      
+      // Turn on the garbage collector
+      macConfig.runGC(true);
+  
+      Assert.assertEquals(true, macConfig.shouldRunGC());
+    } finally {
+      if (null != f && f.exists()) {
+        f.delete();
+      }
+    }
   }