You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2014/12/06 02:06:30 UTC

[1/2] accumulo git commit: ACCUMULO-3178 Create example preferred volumes chooser

Repository: accumulo
Updated Branches:
  refs/heads/master 8031dcda6 -> ef909d5fb


ACCUMULO-3178 Create example preferred volumes chooser

Signed-off-by: Christopher Tubbs <ct...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/ef909d5f
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/ef909d5f
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/ef909d5f

Branch: refs/heads/master
Commit: ef909d5fb6ce0eb83d35ae1e702885fb9b98cf94
Parents: 3f44f8c
Author: Jenna Huston <je...@gmail.com>
Authored: Tue Oct 28 09:29:52 2014 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Dec 5 20:04:22 2014 -0500

----------------------------------------------------------------------
 .../server/fs/PreferredVolumeChooser.java       |  80 ++++
 .../accumulo/server/fs/RandomVolumeChooser.java |   2 +-
 .../apache/accumulo/test/VolumeChooserIT.java   | 395 +++++++++++++++++++
 3 files changed, 476 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/ef909d5f/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
new file mode 100644
index 0000000..7ed7bba
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PreferredVolumeChooser.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.fs;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.accumulo.core.conf.AccumuloConfiguration.PropertyFilter;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.conf.TableConfiguration;
+import org.apache.log4j.Logger;
+
+public class PreferredVolumeChooser extends RandomVolumeChooser implements VolumeChooser {
+  private static final Logger log = Logger.getLogger(PreferredVolumeChooser.class);
+
+  public static final String PREFERRED_VOLUMES_CUSTOM_KEY = Property.TABLE_ARBITRARY_PROP_PREFIX.getKey() + "preferredVolumes";
+
+  public PreferredVolumeChooser() {}
+
+  @Override
+  public String choose(VolumeChooserEnvironment env, String[] options) {
+    if (!env.hasTableId())
+      return super.choose(env, options);
+
+    // Get the current table's properties, and find the preferred volumes property
+    TableConfiguration config = new ServerConfigurationFactory(HdfsZooInstance.getInstance()).getTableConfiguration(env.getTableId());
+    PropertyFilter filter = new PropertyFilter() {
+      @Override
+      public boolean accept(String key) {
+        return PREFERRED_VOLUMES_CUSTOM_KEY.equals(key);
+      }
+    };
+    Map<String,String> props = new HashMap<>();
+    config.getProperties(props, filter);
+    if (props.isEmpty()) {
+      log.warn("No preferred volumes specified. Defaulting to randomly choosing from instance volumes");
+      return super.choose(env, options);
+    }
+    String volumes = props.get(PREFERRED_VOLUMES_CUSTOM_KEY);
+    log.trace("In custom chooser");
+    log.trace("Volumes: " + volumes);
+    log.trace("TableID: " + env.getTableId());
+
+    ArrayList<String> prefVol = new ArrayList<String>();
+    // If the preferred volumes property is specified, split the returned string by the comma and add them to a preferred volumes list
+    prefVol.addAll(Arrays.asList(volumes.split(",")));
+
+    // Change the given array to a List and only keep the preferred volumes that are in the given array.
+    prefVol.retainAll(Arrays.asList(options));
+
+    // If there are no preferred volumes left, then warn the user and choose randomly from the instance volumes
+    if (prefVol.isEmpty()) {
+      log.warn("Preferred volumes are not instance volumes. Defaulting to randomly choosing from instance volumes");
+      return super.choose(env, options);
+    }
+
+    // Randomly choose the volume from the preferred volumes
+    String choice = prefVol.get(random.nextInt(prefVol.size()));
+    log.trace("Choice = " + choice);
+    return choice;
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ef909d5f/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
index 85d4e2b..f2eb211 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
@@ -19,7 +19,7 @@ package org.apache.accumulo.server.fs;
 import java.util.Random;
 
 public class RandomVolumeChooser implements VolumeChooser {
-  private static Random random = new Random();
+  protected static Random random = new Random();
 
   @Override
   public String choose(VolumeChooserEnvironment env, String[] options) {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/ef909d5f/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java
----------------------------------------------------------------------
diff --git a/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java b/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java
new file mode 100644
index 0000000..8ca141b
--- /dev/null
+++ b/test/src/test/java/org/apache/accumulo/test/VolumeChooserIT.java
@@ -0,0 +1,395 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.accumulo.core.client.AccumuloException;
+import org.apache.accumulo.core.client.AccumuloSecurityException;
+import org.apache.accumulo.core.client.BatchWriter;
+import org.apache.accumulo.core.client.BatchWriterConfig;
+import org.apache.accumulo.core.client.Connector;
+import org.apache.accumulo.core.client.Scanner;
+import org.apache.accumulo.core.client.TableNotFoundException;
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.core.data.Key;
+import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
+import org.apache.accumulo.core.data.Value;
+import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
+import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
+import org.apache.accumulo.core.security.Authorizations;
+import org.apache.accumulo.minicluster.impl.MiniAccumuloConfigImpl;
+import org.apache.accumulo.server.fs.PerTableVolumeChooser;
+import org.apache.accumulo.server.fs.PreferredVolumeChooser;
+import org.apache.accumulo.server.fs.RandomVolumeChooser;
+import org.apache.accumulo.test.functional.ConfigurableMacIT;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.apache.hadoop.io.Text;
+import org.junit.Test;
+
+/**
+ *
+ */
+public class VolumeChooserIT extends ConfigurableMacIT {
+
+  private static final Text EMPTY = new Text();
+  private static final Value EMPTY_VALUE = new Value(new byte[] {});
+  private File volDirBase;
+  private Path v1, v2, v3, v4;
+  private String[] rows = "a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u,v,w,x,y,z".split(",");
+  private String namespace1;
+  private String namespace2;
+
+  @Override
+  protected int defaultTimeoutSeconds() {
+    return 30;
+  };
+
+  @Override
+  public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
+    // Get 2 tablet servers
+    cfg.setNumTservers(2);
+    namespace1 = "ns_" + getUniqueNames(2)[0];
+    namespace2 = "ns_" + getUniqueNames(2)[1];
+
+    // Set the general volume chooser to the PerTableVolumeChooser so that different choosers can be specified
+    Map<String,String> siteConfig = new HashMap<String,String>();
+    siteConfig.put(Property.GENERAL_VOLUME_CHOOSER.getKey(), PerTableVolumeChooser.class.getName());
+    cfg.setSiteConfig(siteConfig);
+
+    // Set up 4 different volume paths
+    File baseDir = cfg.getDir();
+    volDirBase = new File(baseDir, "volumes");
+    File v1f = new File(volDirBase, "v1");
+    File v2f = new File(volDirBase, "v2");
+    File v3f = new File(volDirBase, "v3");
+    File v4f = new File(volDirBase, "v4");
+    v1f.mkdir();
+    v2f.mkdir();
+    v4f.mkdir();
+    v1 = new Path("file://" + v1f.getAbsolutePath());
+    v2 = new Path("file://" + v2f.getAbsolutePath());
+    v3 = new Path("file://" + v3f.getAbsolutePath());
+    v4 = new Path("file://" + v4f.getAbsolutePath());
+
+    // Only add volumes 1, 2, and 4 to the list of instance volumes to have one volume that isn't in the options list when they are choosing
+    cfg.setProperty(Property.INSTANCE_VOLUMES, v1.toString() + "," + v2.toString() + "," + v4.toString());
+
+    // use raw local file system so walogs sync and flush will work
+    hadoopCoreSite.set("fs.file.impl", RawLocalFileSystem.class.getName());
+
+    super.configure(cfg, hadoopCoreSite);
+
+  }
+
+  public void addSplits(Connector connector, String tableName) throws TableNotFoundException, AccumuloException, AccumuloSecurityException {
+    // Add 10 splits to the table
+    SortedSet<Text> partitions = new TreeSet<Text>();
+    for (String s : "b,e,g,j,l,o,q,t,v,y".split(","))
+      partitions.add(new Text(s));
+    connector.tableOperations().addSplits(tableName, partitions);
+  }
+
+  public void writeAndReadData(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException {
+    // Write some data to the table
+    BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig());
+    for (String s : rows) {
+      Mutation m = new Mutation(new Text(s));
+      m.put(EMPTY, EMPTY, EMPTY_VALUE);
+      bw.addMutation(m);
+    }
+    bw.close();
+
+    // Write the data to disk, read it back
+    connector.tableOperations().flush(tableName, null, null, true);
+    Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY);
+    int i = 0;
+    for (Entry<Key,Value> entry : scanner) {
+      assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString());
+    }
+  }
+
+  public void verifyVolumes(Connector connector, String tableName, Range tableRange, String vol) throws TableNotFoundException {
+    // Verify the new files are written to the Volumes specified
+    ArrayList<String> volumes = new ArrayList<String>();
+    for (String s : vol.split(","))
+      volumes.add(s);
+
+    Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY);
+    scanner.setRange(tableRange);
+    scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
+    int fileCount = 0;
+    for (Entry<Key,Value> entry : scanner) {
+      boolean inVolume = false;
+      for (String volume : volumes) {
+        if (entry.getKey().getColumnQualifier().toString().contains(volume))
+          inVolume = true;
+      }
+      assertTrue("Data not written to the correct volumes", inVolume);
+      fileCount++;
+    }
+    assertEquals("Wrong number of files", 11, fileCount);
+  }
+
+  // Test that uses two tables with 10 split points each. They each use the PreferredVolumeChooser to choose volumes.
+  @Test
+  public void twoTablesPreferredVolumeChooser() throws Exception {
+    log.info("Starting twoTablesPreferredVolumeChooser");
+
+    // Create namespace
+    Connector connector = getConnector();
+    connector.namespaceOperations().create(namespace1);
+
+    // Set properties on the namespace
+    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    String volume = PreferredVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    propertyName = "table.custom.preferredVolumes";
+    volume = v2.toString();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    // Create table1 on namespace1
+    String tableName = namespace1 + ".1";
+    connector.tableOperations().create(tableName);
+    String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName);
+    // Write some data to the table
+    writeAndReadData(connector, tableName);
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), volume);
+
+    connector.namespaceOperations().create(namespace2);
+
+    // Set properties on the namespace
+    propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    volume = PreferredVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+    propertyName = "table.custom.preferredVolumes";
+    volume = v1.toString();
+    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+    // Create table2 on namespace2
+    String tableName2 = namespace2 + ".1";
+
+    connector.tableOperations().create(tableName2);
+    String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName2);
+    // Write some data to the table
+    writeAndReadData(connector, tableName2);
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
+  }
+
+  // Test that uses two tables with 10 split points each. They each use the RandomVolumeChooser to choose volumes.
+  @Test
+  public void twoTablesRandomVolumeChooser() throws Exception {
+    log.info("Starting twoTablesRandomVolumeChooser()");
+
+    // Create namespace
+    Connector connector = getConnector();
+    connector.namespaceOperations().create(namespace1);
+
+    // Set properties on the namespace
+    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    String volume = RandomVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    // Create table1 on namespace1
+    String tableName = namespace1 + ".1";
+    connector.tableOperations().create(tableName);
+    String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName);
+    // Write some data to the table
+    writeAndReadData(connector, tableName);
+    // Verify the new files are written to the Volumes specified
+
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+
+    connector.namespaceOperations().create(namespace2);
+
+    // Set properties on the namespace
+    propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    volume = RandomVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+    // Create table2 on namespace2
+    String tableName2 = namespace2 + ".1";
+    connector.tableOperations().create(tableName2);
+    String tableID2 = connector.tableOperations().tableIdMap().get(tableName);
+
+    // / Add 10 splits to the table
+    addSplits(connector, tableName2);
+    // Write some data to the table
+    writeAndReadData(connector, tableName2);
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), v1.toString() + "," + v2.toString() + "," + v4.toString());
+  }
+
+  // Test that uses two tables with 10 split points each. The first uses the RandomVolumeChooser and the second uses the
+  // StaticVolumeChooser to choose volumes.
+  @Test
+  public void twoTablesDiffChoosers() throws Exception {
+    log.info("Starting twoTablesDiffChoosers");
+
+    // Create namespace
+    Connector connector = getConnector();
+    connector.namespaceOperations().create(namespace1);
+
+    // Set properties on the namespace
+    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    String volume = RandomVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    // Create table1 on namespace1
+    String tableName = namespace1 + ".1";
+    connector.tableOperations().create(tableName);
+    String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName);
+    // Write some data to the table
+    writeAndReadData(connector, tableName);
+    // Verify the new files are written to the Volumes specified
+
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+
+    connector.namespaceOperations().create(namespace2);
+
+    // Set properties on the namespace
+    propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    volume = PreferredVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+    propertyName = "table.custom.preferredVolumes";
+    volume = v1.toString();
+    connector.namespaceOperations().setProperty(namespace2, propertyName, volume);
+
+    // Create table2 on namespace2
+    String tableName2 = namespace2 + ".1";
+    connector.tableOperations().create(tableName2);
+    String tableID2 = connector.tableOperations().tableIdMap().get(tableName2);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName2);
+    // Write some data to the table
+    writeAndReadData(connector, tableName2);
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName2, TabletsSection.getRange(tableID2), volume);
+  }
+
+  // Test that uses one table with 10 split points each. It uses the StaticVolumeChooser, but no preferred volume is specified. This means that the volume
+  // is chosen randomly from all instance volumes.
+  @Test
+  public void missingVolumePreferredVolumeChooser() throws Exception {
+    log.info("Starting missingVolumePreferredVolumeChooser");
+
+    // Create namespace
+    Connector connector = getConnector();
+    connector.namespaceOperations().create(namespace1);
+
+    // Set properties on the namespace
+    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    String volume = PreferredVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    // Create table1 on namespace1
+    String tableName = namespace1 + ".1";
+    connector.tableOperations().create(tableName);
+    String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName);
+    // Write some data to the table
+    writeAndReadData(connector, tableName);
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+  }
+
+  // Test that uses one table with 10 split points each. It uses the PreferredVolumeChooser, but preferred volume is not an instance volume. This means that the
+  // volume is chosen randomly from all instance volumes
+  @Test
+  public void notInstancePreferredVolumeChooser() throws Exception {
+    log.info("Starting notInstancePreferredVolumeChooser");
+
+    // Create namespace
+    Connector connector = getConnector();
+    connector.namespaceOperations().create(namespace1);
+
+    // Set properties on the namespace
+    String propertyName = Property.TABLE_VOLUME_CHOOSER.getKey();
+    String volume = PreferredVolumeChooser.class.getName();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    propertyName = "table.custom.preferredVolumes";
+    volume = v3.toString();
+    connector.namespaceOperations().setProperty(namespace1, propertyName, volume);
+
+    // Create table1 on namespace1
+    String tableName = namespace1 + ".1";
+    connector.tableOperations().create(tableName);
+    String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName);
+    // Write some data to the table
+    writeAndReadData(connector, tableName);
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+  }
+
+  // Test that uses one table with 10 split points each. It does not specify a specific chooser, so the volume is chosen randomly from all instance volumes.
+  @Test
+  public void chooserNotSpecified() throws Exception {
+    log.info("Starting chooserNotSpecified");
+
+    // Create a table
+    Connector connector = getConnector();
+    String tableName = getUniqueNames(2)[0];
+    connector.tableOperations().create(tableName);
+    String tableID = connector.tableOperations().tableIdMap().get(tableName);
+
+    // Add 10 splits to the table
+    addSplits(connector, tableName);
+    // Write some data to the table
+    writeAndReadData(connector, tableName);
+
+    // Verify the new files are written to the Volumes specified
+    verifyVolumes(connector, tableName, TabletsSection.getRange(tableID), v1.toString() + "," + v2.toString() + "," + v4.toString());
+  }
+
+}


[2/2] accumulo git commit: ACCUMULO-3177 Create a per table volume chooser

Posted by ct...@apache.org.
ACCUMULO-3177 Create a per table volume chooser

Signed-off-by: Christopher Tubbs <ct...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/3f44f8c1
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/3f44f8c1
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/3f44f8c1

Branch: refs/heads/master
Commit: 3f44f8c191941ecb57656eaa5ddd4177c71cbfe0
Parents: 8031dcd
Author: Jenna Huston <je...@gmail.com>
Authored: Mon Oct 27 14:06:32 2014 -0400
Committer: Christopher Tubbs <ct...@apache.org>
Committed: Fri Dec 5 20:04:22 2014 -0500

----------------------------------------------------------------------
 .../org/apache/accumulo/core/conf/Property.java |  4 +-
 .../server/fs/PerTableVolumeChooser.java        | 42 ++++++++++++++++++++
 .../accumulo/server/fs/RandomVolumeChooser.java |  7 ++--
 .../accumulo/server/fs/VolumeChooser.java       |  3 +-
 .../server/fs/VolumeChooserEnvironment.java     | 37 +++++++++++++++++
 .../accumulo/server/fs/VolumeManager.java       |  4 +-
 .../accumulo/server/fs/VolumeManagerImpl.java   |  5 ++-
 .../apache/accumulo/server/fs/VolumeUtil.java   |  4 +-
 .../apache/accumulo/server/init/Initialize.java | 37 ++++++++---------
 .../apache/accumulo/server/util/FileUtil.java   |  6 ++-
 .../accumulo/server/util/MetadataTableUtil.java |  6 ++-
 .../accumulo/server/util/RandomizeVolumes.java  |  4 +-
 .../accumulo/server/util/TabletOperations.java  |  4 +-
 .../java/org/apache/accumulo/master/Master.java |  3 +-
 .../accumulo/master/TabletGroupWatcher.java     |  3 +-
 .../accumulo/master/tableOps/CreateTable.java   |  4 +-
 .../accumulo/master/tableOps/ImportTable.java   |  4 +-
 .../master/tableOps/ImportTableTest.java        |  4 +-
 .../apache/accumulo/tserver/log/DfsLogger.java  |  4 +-
 .../tserver/TabletServerSyncCheckTest.java      |  3 +-
 .../apache/accumulo/test/FairVolumeChooser.java |  3 +-
 21 files changed, 145 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/core/src/main/java/org/apache/accumulo/core/conf/Property.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/accumulo/core/conf/Property.java b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
index cc7d548..4c2d0b4 100644
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@ -165,7 +165,7 @@ public enum Property {
   GENERAL_SIMPLETIMER_THREADPOOL_SIZE("general.server.simpletimer.threadpool.size", "1", PropertyType.COUNT, "The number of threads to use for "
       + "server-internal scheduled tasks"),
   @Experimental
-  GENERAL_VOLUME_CHOOSER("general.volume.chooser", "org.apache.accumulo.server.fs.RandomVolumeChooser", PropertyType.CLASSNAME,
+  GENERAL_VOLUME_CHOOSER("general.volume.chooser", "org.apache.accumulo.server.fs.PerTableVolumeChooser", PropertyType.CLASSNAME,
       "The class that will be used to select which volume will be used to create new files."),
   GENERAL_SECURITY_CREDENTIAL_PROVIDER_PATHS("general.security.credential.provider.paths", "", PropertyType.STRING, "Comma-separated list of paths to CredentialProviders"),
   GENERAL_LEGACY_METRICS("general.legacy.metrics", "false", PropertyType.BOOLEAN,
@@ -467,6 +467,8 @@ public enum Property {
   TABLE_REPLICATION_TARGET("table.replication.target.", null, PropertyType.PREFIX, "Enumerate a mapping of other systems which this table should " +
       "replicate their data to. The key suffix is the identifying cluster name and the value is an identifier for a location on the target system, " +
       "e.g. the ID of the table on the target to replicate to"),
+  TABLE_VOLUME_CHOOSER("table.volume.chooser", "org.apache.accumulo.server.fs.RandomVolumeChooser", PropertyType.CLASSNAME,
+      "The class that will be used to select which volume will be used to create new files for this table."),
 
   // VFS ClassLoader properties
   VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY(AccumuloVFSClassLoader.VFS_CLASSLOADER_SYSTEM_CLASSPATH_PROPERTY, "", PropertyType.STRING,

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
new file mode 100644
index 0000000..7a825c7
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/PerTableVolumeChooser.java
@@ -0,0 +1,42 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.fs;
+
+import org.apache.accumulo.core.conf.Property;
+import org.apache.accumulo.server.client.HdfsZooInstance;
+import org.apache.accumulo.server.conf.ServerConfigurationFactory;
+import org.apache.accumulo.server.conf.TableConfiguration;
+
+public class PerTableVolumeChooser implements VolumeChooser {
+
+  private static final VolumeChooser fallbackVolumeChooser = new RandomVolumeChooser();
+
+  public PerTableVolumeChooser() {}
+
+  @Override
+  public String choose(VolumeChooserEnvironment env, String[] options) {
+    VolumeChooser chooser;
+    if (env.hasTableId()) {
+      TableConfiguration conf = new ServerConfigurationFactory(HdfsZooInstance.getInstance()).getTableConfiguration(env.getTableId());
+      chooser = Property.createTableInstanceFromPropertyName(conf, Property.TABLE_VOLUME_CHOOSER, VolumeChooser.class, fallbackVolumeChooser);
+    } else {
+      chooser = fallbackVolumeChooser;
+    }
+
+    return chooser.choose(env, options);
+  }
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
index 2760b07..85d4e2b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/RandomVolumeChooser.java
@@ -19,11 +19,10 @@ package org.apache.accumulo.server.fs;
 import java.util.Random;
 
 public class RandomVolumeChooser implements VolumeChooser {
-  Random random = new Random();
-  
+  private static Random random = new Random();
+
   @Override
-  public String choose(String[] options) {
+  public String choose(VolumeChooserEnvironment env, String[] options) {
     return options[random.nextInt(options.length)];
   }
-
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
index 8713c97..f523057 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooser.java
@@ -16,7 +16,6 @@
  */
 package org.apache.accumulo.server.fs;
 
-
 public interface VolumeChooser {
-  String choose(String[] options);
+  String choose(VolumeChooserEnvironment env, String[] options);
 }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooserEnvironment.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooserEnvironment.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooserEnvironment.java
new file mode 100644
index 0000000..b6d27cb
--- /dev/null
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeChooserEnvironment.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.accumulo.server.fs;
+
+import com.google.common.base.Optional;
+
+public class VolumeChooserEnvironment {
+
+  private final Optional<String> tableId;
+
+  public VolumeChooserEnvironment(Optional<String> tableId) {
+    this.tableId = tableId;
+  }
+
+  public boolean hasTableId() {
+    return tableId.isPresent();
+  }
+
+  public String getTableId() {
+    return tableId.get();
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
index cbfdb5e..890651e 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManager.java
@@ -28,6 +28,8 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 
+import com.google.common.base.Optional;
+
 /**
  * A wrapper around multiple hadoop FileSystem objects, which are assumed to be different volumes. This also concentrates a bunch of meta-operations like
  * waiting for SAFE_MODE, and closing WALs.
@@ -156,7 +158,7 @@ public interface VolumeManager {
   ContentSummary getContentSummary(Path dir) throws IOException;
 
   // decide on which of the given locations to create a new file
-  String choose(String[] options);
+  String choose(Optional<String> tableId, String[] options);
 
   /**
    * Fetch the default Volume

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
index 37d5088..dc1be73 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeManagerImpl.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.util.Progressable;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
 
@@ -572,8 +573,8 @@ public class VolumeManagerImpl implements VolumeManager {
   }
 
   @Override
-  public String choose(String[] options) {
-    return chooser.choose(options);
+  public String choose(Optional<String> tableId, String[] options) {
+    return chooser.choose(new VolumeChooserEnvironment(tableId), options);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 6ebbe1e..877d01c 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -49,6 +49,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
+
 /**
  * Utility methods for managing absolute URIs contained in Accumulo metadata.
  */
@@ -263,7 +265,7 @@ public class VolumeUtil {
       throw new IllegalArgumentException("Unexpected table dir " + dir);
     }
 
-    Path newDir = new Path(vm.choose(ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + dir.getParent().getName()
+    Path newDir = new Path(vm.choose(Optional.of(extent.getTableId().toString()), ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + dir.getParent().getName()
         + Path.SEPARATOR + dir.getName());
 
     log.info("Updating directory for " + extent + " from " + dir + " to " + newDir);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 670c541..28bd63b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@ -106,6 +106,7 @@ import org.apache.zookeeper.ZooDefs.Ids;
 
 import com.beust.jcommander.Parameter;
 import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
 
 /**
  * This class is used to setup the directory structure and the root tablet to get an instance started
@@ -282,13 +283,13 @@ public class Initialize {
     return initialize(opts, instanceNamePath, fs);
   }
 
-  public static boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs) {
+  private static boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs) {
 
     UUID uuid = UUID.randomUUID();
     // the actual disk locations of the root table and tablets
     String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
-    final String rootTabletDir = new Path(fs.choose(configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + RootTable.ID
-        + RootTable.ROOT_TABLET_LOCATION).toString();
+    final String rootTabletDir = new Path(fs.choose(Optional.<String> absent(), configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR
+        + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();
 
     try {
       initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
@@ -319,7 +320,8 @@ public class Initialize {
     }
 
     try {
-      initSecurity(opts, uuid.toString());
+      AccumuloServerContext context = new AccumuloServerContext(new ServerConfigurationFactory(HdfsZooInstance.getInstance()));
+      initSecurity(context, opts, uuid.toString());
     } catch (Exception e) {
       log.fatal("Failed to initialize security", e);
       return false;
@@ -356,13 +358,12 @@ public class Initialize {
     // initialize initial system tables config in zookeeper
     initSystemTablesConfig();
 
-    String tableMetadataTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID
-        + TABLE_TABLETS_TABLET_DIR;
-    String replicationTableDefaultTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
-        + Constants.DEFAULT_TABLET_LOCATION;
-
-    String defaultMetadataTabletDir = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + MetadataTable.ID
-        + Constants.DEFAULT_TABLET_LOCATION;
+    String tableMetadataTabletDir = fs.choose(Optional.<String> absent(), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+        + MetadataTable.ID + TABLE_TABLETS_TABLET_DIR;
+    String replicationTableDefaultTabletDir = fs.choose(Optional.<String> absent(), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+        + ReplicationTable.ID + Constants.DEFAULT_TABLET_LOCATION;
+    String defaultMetadataTabletDir = fs.choose(Optional.<String> absent(), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR
+        + MetadataTable.ID + Constants.DEFAULT_TABLET_LOCATION;
 
     // create table and default tablets directories
     createDirectories(fs, rootTabletDir, tableMetadataTabletDir, defaultMetadataTabletDir, replicationTableDefaultTabletDir);
@@ -546,15 +547,8 @@ public class Initialize {
     return rootpass.getBytes(UTF_8);
   }
 
-  private static void initSecurity(Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException {
-    AccumuloServerContext context = new AccumuloServerContext(new ServerConfigurationFactory(HdfsZooInstance.getInstance()) {
-      @Override
-      public synchronized AccumuloConfiguration getConfiguration() {
-        return getSiteConfiguration();
-      }
-    });
-    AuditedSecurityOperation.getInstance(context, true).initializeSecurity(context.rpcCreds(), DEFAULT_ROOT_USER,
-        opts.rootpass);
+  private static void initSecurity(AccumuloServerContext context, Opts opts, String iid) throws AccumuloSecurityException, ThriftSecurityException {
+    AuditedSecurityOperation.getInstance(context, true).initializeSecurity(context.rpcCreds(), DEFAULT_ROOT_USER, opts.rootpass);
   }
 
   public static void initSystemTablesConfig() throws IOException {
@@ -659,9 +653,10 @@ public class Initialize {
       VolumeManager fs = VolumeManagerImpl.get(acuConf);
 
       if (opts.resetSecurity) {
+        AccumuloServerContext context = new AccumuloServerContext(new ServerConfigurationFactory(HdfsZooInstance.getInstance()));
         if (isInitialized(fs)) {
           opts.rootpass = getRootPassword(opts);
-          initSecurity(opts, HdfsZooInstance.getInstance().getInstanceID());
+          initSecurity(context, opts, HdfsZooInstance.getInstance().getInstanceID());
         } else {
           log.fatal("Attempted to reset security on accumulo before it was initialized");
         }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
index aa37e35..103ba05 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/FileUtil.java
@@ -56,6 +56,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
+
 public class FileUtil {
   
   public static class FileInfo {
@@ -79,8 +81,8 @@ public class FileUtil {
   private static final Logger log = Logger.getLogger(FileUtil.class);
   
   private static Path createTmpDir(AccumuloConfiguration acuConf, VolumeManager fs) throws IOException {
-    String accumuloDir = fs.choose(ServerConstants.getBaseUris());
-    
+    String accumuloDir = fs.choose(Optional.<String>absent(), ServerConstants.getBaseUris());
+
     Path result = null;
     while (result == null) {
       result = new Path(accumuloDir + Path.SEPARATOR + "tmp/idxReduce_" + String.format("%09d", new Random().nextInt(Integer.MAX_VALUE)));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index dd3355a..524abb0 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -95,6 +95,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 import org.apache.zookeeper.KeeperException;
 
+import com.google.common.base.Optional;
+
 /**
  * provides a reference to the metadata table for updates by tablet servers
  */
@@ -889,7 +891,7 @@ public class MetadataTableUtil {
       Key k = entry.getKey();
       Mutation m = new Mutation(k.getRow());
       m.putDelete(k.getColumnFamily(), k.getColumnQualifier());
-      String dir = volumeManager.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR
+      String dir = volumeManager.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR
           + new String(FastFormat.toZeroPaddedString(dirCount++, 8, 16, Constants.CLONE_PREFIX_BYTES));
       TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(dir.getBytes(UTF_8)));
 
@@ -981,7 +983,7 @@ public class MetadataTableUtil {
    * During an upgrade from 1.6 to 1.7, we need to add the replication table
    */
   public static void createReplicationTable(ClientContext context) throws IOException {
-    String dir = VolumeManagerImpl.get().choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
+    String dir = VolumeManagerImpl.get().choose(Optional.of(ReplicationTable.ID), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + ReplicationTable.ID
         + Constants.DEFAULT_TABLET_LOCATION;
 
     Mutation m = new Mutation(new Text(KeyExtent.getMetadataEntry(new Text(ReplicationTable.ID), null)));

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
index 82cc855..de360fe 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/RandomizeVolumes.java
@@ -47,6 +47,8 @@ import org.apache.accumulo.server.tables.TableManager;
 import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
+
 public class RandomizeVolumes {
   private static final Logger log = Logger.getLogger(RandomizeVolumes.class);
 
@@ -110,7 +112,7 @@ public class RandomizeVolumes {
       Key key = entry.getKey();
       Mutation m = new Mutation(key.getRow());
 
-      final String newLocation = vm.choose(ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
+      final String newLocation = vm.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory;
       m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8)));
       if (log.isTraceEnabled()) {
         log.trace("Replacing " + oldLocation + " with " + newLocation);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
----------------------------------------------------------------------
diff --git a/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java b/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
index 2c9fe9c..c0e1a9b 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/util/TabletOperations.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
+
 public class TabletOperations {
   
   private static final Logger log = Logger.getLogger(TabletOperations.class);
@@ -38,7 +40,7 @@ public class TabletOperations {
     String lowDirectory;
     
     UniqueNameAllocator namer = UniqueNameAllocator.getInstance();
-    String volume = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR;
+    String volume = fs.choose(Optional.of(tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR;
     
     while (true) {
       try {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/master/src/main/java/org/apache/accumulo/master/Master.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/Master.java b/server/master/src/main/java/org/apache/accumulo/master/Master.java
index 6e81354..de00041 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/Master.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/Master.java
@@ -147,6 +147,7 @@ import org.apache.zookeeper.WatchedEvent;
 import org.apache.zookeeper.Watcher;
 import org.apache.zookeeper.data.Stat;
 
+import com.google.common.base.Optional;
 import com.google.common.collect.Iterables;
 
 /**
@@ -254,7 +255,7 @@ public class Master extends AccumuloServerContext implements LiveTServerSet.List
     if (!zoo.exists(dirZPath)) {
       Path oldPath = fs.getFullPath(FileType.TABLE, "/" + MetadataTable.ID + "/root_tablet");
       if (fs.exists(oldPath)) {
-        String newPath = fs.choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID;
+        String newPath = fs.choose(Optional.of(RootTable.ID), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + RootTable.ID;
         fs.mkdirs(new Path(newPath));
         if (!fs.rename(oldPath, new Path(newPath))) {
           throw new IOException("Failed to move root tablet from " + oldPath + " to " + newPath);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
index 3d39891..93ed423 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/TabletGroupWatcher.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.thrift.TException;
 
+import com.google.common.base.Optional;
 import com.google.common.collect.Iterators;
 
 class TabletGroupWatcher extends Daemon {
@@ -549,7 +550,7 @@ class TabletGroupWatcher extends Daemon {
       } else {
         // Recreate the default tablet to hold the end of the table
         Master.log.debug("Recreating the last tablet to point to " + extent.getPrevEndRow());
-        String tdir = master.getFileSystem().choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId()
+        String tdir = master.getFileSystem().choose(Optional.of(extent.getTableId().toString()), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + extent.getTableId()
             + Constants.DEFAULT_TABLET_LOCATION;
         MetadataTableUtil.addTablet(new KeyExtent(extent.getTableId(), null, extent.getPrevEndRow()), tdir, master, timeType, this.master.masterLock);
       }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
index 247645b..95c9f79 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/CreateTable.java
@@ -44,6 +44,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
+
 class TableInfo implements Serializable {
 
   private static final long serialVersionUID = 1L;
@@ -175,7 +177,7 @@ class ChooseDir extends MasterRepo {
   @Override
   public Repo<Master> call(long tid, Master master) throws Exception {
     // Constants.DEFAULT_TABLET_LOCATION has a leading slash prepended to it so we don't need to add one here
-    tableInfo.dir = master.getFileSystem().choose(ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.tableId
+    tableInfo.dir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), ServerConstants.getBaseUris()) + Constants.HDFS_TABLES_DIR + Path.SEPARATOR + tableInfo.tableId
         + Constants.DEFAULT_TABLET_LOCATION;
     return new CreateDir(tableInfo);
   }

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
----------------------------------------------------------------------
diff --git a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
index 26a6928..979954b 100644
--- a/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
+++ b/server/master/src/main/java/org/apache/accumulo/master/tableOps/ImportTable.java
@@ -72,6 +72,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.log4j.Logger;
 
+import com.google.common.base.Optional;
+
 /**
  *
  */
@@ -325,7 +327,7 @@ class PopulateMetadataTable extends MasterRepo {
    */
   protected String getClonedTabletDir(Master master, String[] tableDirs, String tabletDir) {
     // We can try to spread out the tablet dirs across all volumes
-    String tableDir = master.getFileSystem().choose(tableDirs);
+    String tableDir = master.getFileSystem().choose(Optional.of(tableInfo.tableId), tableDirs);
 
     // Build up a full hdfs://localhost:8020/accumulo/tables/$id/c-XXXXXXX
     return tableDir + "/" + tableInfo.tableId + "/" + tabletDir;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/master/src/test/java/org/apache/accumulo/master/tableOps/ImportTableTest.java
----------------------------------------------------------------------
diff --git a/server/master/src/test/java/org/apache/accumulo/master/tableOps/ImportTableTest.java b/server/master/src/test/java/org/apache/accumulo/master/tableOps/ImportTableTest.java
index 31f6bde..080e0af 100644
--- a/server/master/src/test/java/org/apache/accumulo/master/tableOps/ImportTableTest.java
+++ b/server/master/src/test/java/org/apache/accumulo/master/tableOps/ImportTableTest.java
@@ -22,6 +22,8 @@ import org.easymock.EasyMock;
 import org.junit.Assert;
 import org.junit.Test;
 
+import com.google.common.base.Optional;
+
 /**
  *
  */
@@ -41,7 +43,7 @@ public class ImportTableTest {
 
     EasyMock.expect(master.getFileSystem()).andReturn(volumeManager);
     // Choose the 2nd element
-    EasyMock.expect(volumeManager.choose(tableDirs)).andReturn(tableDirs[1]);
+    EasyMock.expect(volumeManager.choose(Optional.of(iti.tableId), tableDirs)).andReturn(tableDirs[1]);
 
     EasyMock.replay(master, volumeManager);
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
index b7d5b0e..6fd2624 100644
--- a/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
+++ b/server/tserver/src/main/java/org/apache/accumulo/tserver/log/DfsLogger.java
@@ -43,6 +43,8 @@ import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.base.Joiner;
+import com.google.common.base.Optional;
+
 import org.apache.accumulo.core.client.Durability;
 import org.apache.accumulo.core.conf.AccumuloConfiguration;
 import org.apache.accumulo.core.conf.Property;
@@ -384,7 +386,7 @@ public class DfsLogger {
     log.debug("DfsLogger.open() begin");
     VolumeManager fs = conf.getFileSystem();
 
-    logPath = fs.choose(ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
+    logPath = fs.choose(Optional.<String> absent(), ServerConstants.getBaseUris()) + Path.SEPARATOR + ServerConstants.WAL_DIR + Path.SEPARATOR + logger + Path.SEPARATOR + filename;
 
     metaReference = toString();
     try {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerSyncCheckTest.java
----------------------------------------------------------------------
diff --git a/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerSyncCheckTest.java b/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerSyncCheckTest.java
index dad9a75..d35f07f 100644
--- a/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerSyncCheckTest.java
+++ b/server/tserver/src/test/java/org/apache/accumulo/tserver/TabletServerSyncCheckTest.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.junit.Test;
 
+import com.google.common.base.Optional;
 import com.google.common.collect.ImmutableMap;
 
 public class TabletServerSyncCheckTest {
@@ -226,7 +227,7 @@ public class TabletServerSyncCheckTest {
     }
 
     @Override
-    public String choose(String[] options) {
+    public String choose(Optional<String> tableID, String[] options) {
       return null;
     }
 

http://git-wip-us.apache.org/repos/asf/accumulo/blob/3f44f8c1/test/src/main/java/org/apache/accumulo/test/FairVolumeChooser.java
----------------------------------------------------------------------
diff --git a/test/src/main/java/org/apache/accumulo/test/FairVolumeChooser.java b/test/src/main/java/org/apache/accumulo/test/FairVolumeChooser.java
index 9eb0c84..2325086 100644
--- a/test/src/main/java/org/apache/accumulo/test/FairVolumeChooser.java
+++ b/test/src/main/java/org/apache/accumulo/test/FairVolumeChooser.java
@@ -19,6 +19,7 @@ package org.apache.accumulo.test;
 import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.accumulo.server.fs.VolumeChooser;
+import org.apache.accumulo.server.fs.VolumeChooserEnvironment;
 
 /**
  * Try to assign some fairness to choosing Volumes. Intended for tests, not for production
@@ -28,7 +29,7 @@ public class FairVolumeChooser implements VolumeChooser {
   private final ConcurrentHashMap<Integer,Integer> optionLengthToLastChoice = new ConcurrentHashMap<Integer,Integer>();
 
   @Override
-  public String choose(String[] options) {
+  public String choose(VolumeChooserEnvironment env, String[] options) {
     int currentChoice;
     Integer lastChoice = optionLengthToLastChoice.get(options.length);
     if (null == lastChoice) {