You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ct...@apache.org on 2012/12/13 21:56:10 UTC

svn commit: r1421521 - in /accumulo/trunk/core/src: main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/ test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/

Author: ctubbsii
Date: Thu Dec 13 20:56:09 2012
New Revision: 1421521

URL: http://svn.apache.org/viewvc?rev=1421521&view=rev
Log:
ACCUMULO-769 Updated partitioners to reflect the only valid, mutable JobContext, which is simply Job; This change is API-compatible with current code that isn't broken, because Job is a JobContext, so no deprecation is needed (though code that uses this may need to be recompiled).

Modified:
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java
    accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
    accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java?rev=1421521&r1=1421520&r2=1421521&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/KeyRangePartitioner.java Thu Dec 13 20:56:09 2012
@@ -20,7 +20,7 @@ import org.apache.accumulo.core.data.Key
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Partitioner;
 
 /**
@@ -47,14 +47,14 @@ public class KeyRangePartitioner extends
   /**
    * Sets the hdfs file name to use, containing a newline separated list of Base64 encoded split points that represent ranges for partitioning
    */
-  public static void setSplitFile(JobContext job, String file) {
+  public static void setSplitFile(Job job, String file) {
     RangePartitioner.setSplitFile(job, file);
   }
   
   /**
    * Sets the number of random sub-bins per range
    */
-  public static void setNumSubBins(JobContext job, int num) {
+  public static void setNumSubBins(Job job, int num) {
     RangePartitioner.setNumSubBins(job, num);
   }
 }

Modified: accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java?rev=1421521&r1=1421520&r2=1421521&view=diff
==============================================================================
--- accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java (original)
+++ accumulo/trunk/core/src/main/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitioner.java Thu Dec 13 20:56:09 2012
@@ -32,7 +32,7 @@ import org.apache.hadoop.filecache.Distr
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.Partitioner;
 
 /**
@@ -45,6 +45,7 @@ public class RangePartitioner extends Pa
   
   private Configuration conf;
   
+  @Override
   public int getPartition(Text key, Writable value, int numPartitions) {
     try {
       return findPartition(key, getCutPoints(), getNumSubBins());
@@ -117,7 +118,7 @@ public class RangePartitioner extends Pa
   /**
    * Sets the hdfs file name to use, containing a newline separated list of Base64 encoded split points that represent ranges for partitioning
    */
-  public static void setSplitFile(JobContext job, String file) {
+  public static void setSplitFile(Job job, String file) {
     URI uri = new Path(file).toUri();
     DistributedCache.addCacheFile(uri, job.getConfiguration());
     job.getConfiguration().set(CUTFILE_KEY, uri.getPath());
@@ -126,7 +127,7 @@ public class RangePartitioner extends Pa
   /**
    * Sets the number of random sub-bins per range
    */
-  public static void setNumSubBins(JobContext job, int num) {
+  public static void setNumSubBins(Job job, int num) {
     job.getConfiguration().setInt(NUM_SUBBINS, num);
   }
 }

Modified: accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java
URL: http://svn.apache.org/viewvc/accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java?rev=1421521&r1=1421520&r2=1421521&view=diff
==============================================================================
--- accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java (original)
+++ accumulo/trunk/core/src/test/java/org/apache/accumulo/core/client/mapreduce/lib/partition/RangePartitionerTest.java Thu Dec 13 20:56:09 2012
@@ -18,9 +18,10 @@ package org.apache.accumulo.core.client.
 
 import static org.junit.Assert.assertTrue;
 
-import org.apache.accumulo.core.util.ContextFactory;
+import java.io.IOException;
+
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.Job;
 import org.junit.Test;
 
 public class RangePartitionerTest {
@@ -28,7 +29,7 @@ public class RangePartitionerTest {
   private static Text[] cutArray = new Text[] {new Text("A"), new Text("B"), new Text("C")};
   
   @Test
-  public void testNoSubBins() {
+  public void testNoSubBins() throws IOException {
     for (int i = -2; i < 2; ++i) {
       checkExpectedBins(i, new String[] {"A", "B", "C"}, new int[] {0, 1, 2});
       checkExpectedBins(i, new String[] {"C", "A", "B"}, new int[] {2, 0, 1});
@@ -37,7 +38,7 @@ public class RangePartitionerTest {
   }
   
   @Test
-  public void testSubBins() {
+  public void testSubBins() throws IOException {
     checkExpectedRangeBins(2, new String[] {"A", "B", "C"}, new int[] {1, 3, 5});
     checkExpectedRangeBins(2, new String[] {"C", "A", "B"}, new int[] {5, 1, 3});
     checkExpectedRangeBins(2, new String[] {"", "AA", "BB", "CC"}, new int[] {1, 3, 5, 7});
@@ -51,15 +52,15 @@ public class RangePartitionerTest {
     checkExpectedRangeBins(10, new String[] {"", "AA", "BB", "CC"}, new int[] {9, 19, 29, 39});
   }
   
-  private RangePartitioner prepPartitioner(int numSubBins) {
-    JobContext job = ContextFactory.createJobContext();
+  private RangePartitioner prepPartitioner(int numSubBins) throws IOException {
+    Job job = new Job();
     RangePartitioner.setNumSubBins(job, numSubBins);
     RangePartitioner rp = new RangePartitioner();
     rp.setConf(job.getConfiguration());
     return rp;
   }
   
-  private void checkExpectedRangeBins(int numSubBins, String[] strings, int[] rangeEnds) {
+  private void checkExpectedRangeBins(int numSubBins, String[] strings, int[] rangeEnds) throws IOException {
     assertTrue(strings.length == rangeEnds.length);
     for (int i = 0; i < strings.length; ++i) {
       int endRange = rangeEnds[i];
@@ -70,7 +71,7 @@ public class RangePartitionerTest {
     }
   }
   
-  private void checkExpectedBins(int numSubBins, String[] strings, int[] bins) {
+  private void checkExpectedBins(int numSubBins, String[] strings, int[] bins) throws IOException {
     assertTrue(strings.length == bins.length);
     for (int i = 0; i < strings.length; ++i) {
       int bin = bins[i], part = prepPartitioner(numSubBins).findPartition(new Text(strings[i]), cutArray, numSubBins);