You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@accumulo.apache.org by ec...@apache.org on 2011/11/02 15:08:33 UTC

svn commit: r1196591 - in /incubator/accumulo/trunk: ./ bin/ conf/ docs/examples/ lib/ src/assemble/ src/examples/src/main/java/org/apache/accumulo/examples/dirlist/ src/server/src/main/c++/nativeMap/

Author: ecn
Date: Wed Nov  2 14:08:32 2011
New Revision: 1196591

URL: http://svn.apache.org/viewvc?rev=1196591&view=rev
Log:
ACCUMULO-86: merge 1.3 documentation and examples changes to trunk

Added:
    incubator/accumulo/trunk/CHANGES
      - copied unchanged from r1195622, incubator/accumulo/branches/1.3/CHANGES
    incubator/accumulo/trunk/src/assemble/build.sh
      - copied, changed from r1195622, incubator/accumulo/branches/1.3/src/assemble/build.sh
Modified:
    incubator/accumulo/trunk/   (props changed)
    incubator/accumulo/trunk/bin/tool.sh
    incubator/accumulo/trunk/conf/   (props changed)
    incubator/accumulo/trunk/docs/examples/README.bloom
    incubator/accumulo/trunk/docs/examples/README.bulkIngest
    incubator/accumulo/trunk/docs/examples/README.dirlist
    incubator/accumulo/trunk/docs/examples/README.filter
    incubator/accumulo/trunk/docs/examples/README.mapred
    incubator/accumulo/trunk/lib/   (props changed)
    incubator/accumulo/trunk/pom.xml
    incubator/accumulo/trunk/src/examples/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
    incubator/accumulo/trunk/src/server/src/main/c++/nativeMap/BlockAllocator.h

Propchange: incubator/accumulo/trunk/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Nov  2 14:08:32 2011
@@ -1 +1 @@
-/incubator/accumulo/branches/1.3:1190280,1190413,1190420,1190427,1190500
+/incubator/accumulo/branches/1.3:1190280,1190413,1190420,1190427,1190500,1195622,1195625,1195629,1195635,1196044,1196054,1196057,1196071-1196072,1196106

Modified: incubator/accumulo/trunk/bin/tool.sh
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/bin/tool.sh?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/bin/tool.sh (original)
+++ incubator/accumulo/trunk/bin/tool.sh Wed Nov  2 14:08:32 2011
@@ -31,10 +31,10 @@ fi
 
 LIB=$ACCUMULO_HOME/lib
 
-ZOOKEEPER_CMD='ls -1 $ZOOKEEPER_HOME/*.jar | grep -v -e ".*javadoc.jar$" | grep -v -e ".*src.jar$" | grep -v -e ".*bin.jar$"'
-CORE_CMD='ls -1 $LIB/accumulo-core-*.jar | grep -v -e ".*javadoc.jar$" | grep -v -e ".*sources.jar$"'
-THRIFT_CMD='ls -1 $LIB/libthrift-*.jar | grep -v -e ".*javadoc.jar$" | grep -v -e ".*sources.jar$"'
-CLOUDTRACE_CMD='ls -1 $LIB/cloudtrace-*.jar | grep -v -e ".*javadoc.jar$" | grep -v -e ".*sources.jar$"'
+ZOOKEEPER_CMD='ls -1 $ZOOKEEPER_HOME/zookeeper-[0-9]*[^csn].jar '
+CORE_CMD='ls -1 $LIB/accumulo-core-*[^cs].jar'
+THRIFT_CMD='ls -1 $LIB/libthrift-*[^cs].jar'
+CLOUDTRACE_CMD='ls -1 $LIB/cloudtrace-*[^cs].jar'
 
 if [ `eval $ZOOKEEPER_CMD | wc -l` != "1" ] ; then
     echo "Not exactly one zookeeper jar in $ZOOKEEPER_HOME"

Propchange: incubator/accumulo/trunk/conf/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Wed Nov  2 14:08:32 2011
@@ -3,3 +3,8 @@ masters
 slaves
 tracers
 test-*
+gc
+monitor
+accumulo-env.sh
+accumulo-site.xml
+accumulo_user_manual.pdf

Modified: incubator/accumulo/trunk/docs/examples/README.bloom
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/docs/examples/README.bloom?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/docs/examples/README.bloom (original)
+++ incubator/accumulo/trunk/docs/examples/README.bloom Wed Nov  2 14:08:32 2011
@@ -22,7 +22,7 @@ initialized with the seed 7.
     [user@localhost bin]$ ../bin/accumulo org.apache.accumulo.examples.client.RandomBatchWriter -s 7 testInstance localhost root secret bloom_test 1000000 0 1000000000 50 2000000 60000 3 CV1
 
 Below the table is flushed, look at the monitor page and wait for the flush to
-complete.
+complete.  
 
    [user@localhost bin]$ ./accumulo shell -u root -p secret
     Shell - Accumulo Interactive Shell
@@ -35,6 +35,9 @@ complete.
     Flush of table bloom_test initiated...
     root@localhost> quit
 
+The flush will be finished when there are no entries in memory and the 
+number of minor compactions goes to zero. Refresh the page to see changes to the table.
+
 After the flush completes, 500 random queries are done against the table.  The
 same seed is used to generate the queries, therefore everything is found in the
 table.

Modified: incubator/accumulo/trunk/docs/examples/README.bulkIngest
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/docs/examples/README.bulkIngest?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/docs/examples/README.bulkIngest (original)
+++ incubator/accumulo/trunk/docs/examples/README.bulkIngest Wed Nov  2 14:08:32 2011
@@ -12,10 +12,11 @@ NOTE: You should replace "root" with you
 permissions on the table, and "secret" with that user's password.
 $ACCUMULO_HOME should also be replaced with the home directory where accumulo is stored.
 
-  bin/accumulo org.apache.accumulo.examples.mapreduce.bulk.SetupTable testInstance localhost root secret test_bulk row_00000333 row_00000666
-  bin/accumulo org.apache.accumulo.examples.mapreduce.bulk.GenerateTestData 0 1000 bulk/test_1.txt
-  
-  bin/tool.sh $ACCUMULO_HOME/lib/accumulo-examples-[0-9].[0-9].[0-9].jar org.apache.accumulo.examples.mapreduce.bulk.BulkIngestExample testInstance localhost root secret test_bulk bulk tmp/bulkWork
-  bin/accumulo org.apache.accumulo.examples.mapreduce.bulk.VerifyIngest testInstance localhost root secret test_bulk 0 1000
+    $ ./bin/accumulo org.apache.accumulo.examples.mapreduce.bulk.SetupTable instance zookeepers username password test_bulk row_00000333 row_00000666
+    $ ./bin/accumulo org.apache.accumulo.examples.mapreduce.bulk.GenerateTestData 0 1000 bulk/test_1.txt
+    
+    $ ./bin/tool.sh lib/accumulo-examples-*[^c].jar org.apache.accumulo.examples.mapreduce.bulk.BulkIngestExample instance zookeepers username password test_bulk bulk tmp/bulkWork
+    $ ./bin/accumulo org.apache.accumulo.examples.mapreduce.bulk.VerifyIngest instance zookeepers username password test_bulk 0 1000
+
 
 For a high level discussion of bulk ingest, see the docs dir.

Modified: incubator/accumulo/trunk/docs/examples/README.dirlist
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/docs/examples/README.dirlist?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/docs/examples/README.dirlist (original)
+++ incubator/accumulo/trunk/docs/examples/README.dirlist Wed Nov  2 14:08:32 2011
@@ -31,17 +31,17 @@ To list the contents of specific directo
   bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil test-instance localhost root password dirTable auths /local/user1/workspace
 
 To perform searches on file or directory names, also use QueryUtil.java.  Search terms must contain no more than one wild card and cannot contain "/".
-Note these queries run on the indexTable table instead of the dirTable table.
+*Note* these queries run on the _dirindex_ table instead of the direxample table.
 
-  bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil test-instance localhost root password indexTable auths filename -search
-  bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil test-instance localhost root password indexTable auths filename* -search
-  bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil test-instance localhost root password indexTable auths *jar -search
-  bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil test-instance localhost root password indexTable auths filename*jar -search
+    $ ./bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil instance zookeepers username password dirindex exampleVis filename -search
+    $ ./bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil instance zookeepers username password dirindex exampleVis 'filename*' -search
+    $ ./bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil instance zookeepers username password dirindex exampleVis '*jar' -search
+    $ ./bin/accumulo org.apache.accumulo.examples.dirlist.QueryUtil instance zookeepers username password dirindex exampleVis filename*jar -search
 
 To count the number of direct children (directories and files) and descendants (children and children's descendents, directories and files), run the FileCountMR over the dirTable table.
 The results can be written back to the same table.
 
-  bin/tool.sh lib/accumulo-examples-*.jar org.apache.accumulo.examples.dirlist.FileCountMR test-instance localhost root password dirTable dirTable visibility visibility
+    $ ./bin/tool.sh lib/accumulo-examples-*[^c].jar org.apache.accumulo.examples.dirlist.FileCountMR instance zookeepers username password direxample direxample exampleVis exampleVis
 
 Alternatively, you can run FileCount.java which performs the same counts but is not a MapReduce.  FileCount will be faster for small data sets.
 

Modified: incubator/accumulo/trunk/docs/examples/README.filter
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/docs/examples/README.filter?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/docs/examples/README.filter (original)
+++ incubator/accumulo/trunk/docs/examples/README.filter Wed Nov  2 14:08:32 2011
@@ -5,23 +5,22 @@ ones).  Filters implement the org.apache
 contains a method accept(Key k, Value v).  This method returns true if the key, 
 value pair are to be delivered and false if they are to be ignored.
 
-user@example> createtable filtertest
-user@example filtertest> setiter -t filtertest -scan -p 10 -n myfilter -filter
-FilteringIterator uses Filters to accept or reject key/value pairs
-----------> entering options: <filterPriorityNumber> <ageoff|regex|filterClass>
-----------> set org.apache.accumulo.core.iterators.FilteringIterator option (<name> <value>, hit enter to skip): 0 ageoff
-----------> set org.apache.accumulo.core.iterators.FilteringIterator option (<name> <value>, hit enter to skip): 
-AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
-----------> set org.apache.accumulo.core.iterators.filter.AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day: 
-----------> set org.apache.accumulo.core.iterators.filter.AgeOffFilter parameter ttl, time to live (milliseconds): 30000
-user@example filtertest> 
-
-user@example filtertest> scan
-user@example filtertest> insert foo a b c
-insert successful
-user@example filtertest> scan
-foo a:b []	c
-
+    username@instance> createtable filtertest
+    username@instance filtertest> setiter -t filtertest -scan -p 10 -n myfilter -filter
+    FilteringIterator uses Filters to accept or reject key/value pairs
+    ----------> entering options: <filterPriorityNumber> <ageoff|regex|filterClass>
+    ----------> set org.apache.accumulo.core.iterators.FilteringIterator option (<name> <value>, hit enter to skip): 0 ageoff
+    ----------> set org.apache.accumulo.core.iterators.FilteringIterator option (<name> <value>, hit enter to skip): 
+    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
+    ----------> set org.apache.accumulo.core.iterators.filter.AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day: 
+    ----------> set org.apache.accumulo.core.iterators.filter.AgeOffFilter parameter ttl, time to live (milliseconds): 30000
+    username@instance filtertest> 
+    
+    username@instance filtertest> scan
+    username@instance filtertest> insert foo a b c
+    username@instance filtertest> scan
+    foo a:b []	c
+    
 ... wait 30 seconds ...
 
 user@example filtertest> scan
@@ -38,31 +37,47 @@ on the "minc" and "majc" scopes you can 
 happen automatically as a background operation on any table that is being 
 actively written to, but these are the commands to force compaction:
 
-user@example filtertest> flush -t filtertest
-08 11:13:55,745 [shell.Shell] INFO : Flush of table filtertest initiated...
-user@example filtertest> compact -t filtertest
-08 11:14:10,800 [shell.Shell] INFO : Compaction of table filtertest scheduled for 20110208111410EST
-user@example filtertest> 
+    username@instance filtertest> setiter -t filtertest -scan -minc -majc -p 10 -n myfilter -filter
+    FilteringIterator uses Filters to accept or reject key/value pairs
+    ----------> entering options: <filterPriorityNumber> <ageoff|regex|filterClass>
+    ----------> set org.apache.accumulo.core.iterators.FilteringIterator option (<name> <value>, hit enter to skip): 0 ageoff
+    ----------> set org.apache.accumulo.core.iterators.FilteringIterator option (<name> <value>, hit enter to skip): 
+    AgeOffFilter removes entries with timestamps more than <ttl> milliseconds old
+    ----------> set org.apache.accumulo.core.iterators.filter.AgeOffFilter parameter currentTime, if set, use the given value as the absolute time in milliseconds as the current time of day: 
+    ----------> set org.apache.accumulo.core.iterators.filter.AgeOffFilter parameter ttl, time to live (milliseconds): 30000
+    username@instance filtertest> 
+    
+    username@instance filtertest> flush -t filtertest
+    08 11:13:55,745 [shell.Shell] INFO : Flush of table filtertest initiated...
+    username@instance filtertest> compact -t filtertest
+    08 11:14:10,800 [shell.Shell] INFO : Compaction of table filtertest scheduled for 20110208111410EST
+    username@instance filtertest> 
 
 After the compaction runs, the newly created files will not contain any data that should be aged off, and the
 Accumulo garbage collector will remove the old files.
 
 To see the iterator settings for a table, use:
-user@example filtertest> config -t filtertest -f iterator
----------+------------------------------------------+----------------------------------------------------------
-SCOPE    | NAME                                     | VALUE
----------+------------------------------------------+----------------------------------------------------------
-table    | table.iterator.majc.vers................ | 20,org.apache.accumulo.core.iterators.VersioningIterator
-table    | table.iterator.majc.vers.opt.maxVersions | 1
-table    | table.iterator.minc.vers................ | 20,org.apache.accumulo.core.iterators.VersioningIterator
-table    | table.iterator.minc.vers.opt.maxVersions | 1
-table    | table.iterator.scan.myfilter............ | 10,org.apache.accumulo.core.iterators.FilteringIterator
-table    | table.iterator.scan.myfilter.opt.0...... | org.apache.accumulo.core.iterators.filter.AgeOffFilter
-table    | table.iterator.scan.myfilter.opt.0.ttl.. | 30000
-table    | table.iterator.scan.vers................ | 20,org.apache.accumulo.core.iterators.VersioningIterator
-table    | table.iterator.scan.vers.opt.maxVersions | 1
----------+------------------------------------------+----------------------------------------------------------
-user@example filtertest> 
+    username@instance filtertest> config -t filtertest -f iterator
+    ---------+------------------------------------------+----------------------------------------------------------
+    SCOPE    | NAME                                     | VALUE
+    ---------+------------------------------------------+----------------------------------------------------------
+    table    | table.iterator.majc.myfilter .............. | 10,org.apache.accumulo.core.iterators.FilteringIterator
+    table    | table.iterator.majc.myfilter.opt.0 ........ | org.apache.accumulo.core.iterators.filter.AgeOffFilter
+    table    | table.iterator.majc.myfilter.opt.0.ttl .... | 30000
+    table    | table.iterator.majc.vers .................. | 20,org.apache.accumulo.core.iterators.VersioningIterator
+    table    | table.iterator.majc.vers.opt.maxVersions .. | 1
+    table    | table.iterator.minc.myfilter .............. | 10,org.apache.accumulo.core.iterators.FilteringIterator
+    table    | table.iterator.minc.myfilter.opt.0 ........ | org.apache.accumulo.core.iterators.filter.AgeOffFilter
+    table    | table.iterator.minc.myfilter.opt.0.ttl .... | 30000
+    table    | table.iterator.minc.vers .................. | 20,org.apache.accumulo.core.iterators.VersioningIterator
+    table    | table.iterator.minc.vers.opt.maxVersions .. | 1
+    table    | table.iterator.scan.myfilter .............. | 10,org.apache.accumulo.core.iterators.FilteringIterator
+    table    | table.iterator.scan.myfilter.opt.0 ........ | org.apache.accumulo.core.iterators.filter.AgeOffFilter
+    table    | table.iterator.scan.myfilter.opt.0.ttl .... | 30000
+    table    | table.iterator.scan.vers .................. | 20,org.apache.accumulo.core.iterators.VersioningIterator
+    table    | table.iterator.scan.vers.opt.maxVersions .. | 1
+    ---------+------------------------------------------+----------------------------------------------------------
+    username@instance filtertest> 
 
 If you would like to apply multiple filters, this can be done using a single
 iterator. Just continue adding entries during the 

Modified: incubator/accumulo/trunk/docs/examples/README.mapred
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/docs/examples/README.mapred?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/docs/examples/README.mapred (original)
+++ incubator/accumulo/trunk/docs/examples/README.mapred Wed Nov  2 14:08:32 2011
@@ -2,6 +2,10 @@ This example uses mapreduce and accumulo
 set of documents.  This is accomplished using a map only map-reduce
 job and a accumulo table with aggregators.
 
+This example uses mapreduce and accumulo to compute word counts for a set of
+documents.  This is accomplished using a map-only mapreduce job and a
+accumulo table with aggregators.
+
 To run this example you will need a directory in HDFS containing text files.
 The accumulo readme will be used to show how to run this example.
 
@@ -26,29 +30,28 @@ for the column family count.
   
 After creating the table, run the word count map reduce job.
 
-  [user1@localhost accumulo]$ bin/tool.sh lib/accumulo-examples-*.jar org.apache.accumulo.examples.mapreduce.WordCount testInstance localhost /user/user1/wc wordCount
-
-  11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
-  11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
-  11/02/07 18:20:13 INFO mapred.JobClient:  map 0% reduce 0%
-  11/02/07 18:20:20 INFO mapred.JobClient:  map 100% reduce 0%
-  11/02/07 18:20:22 INFO mapred.JobClient: Job complete: job_201102071740_0003
-  11/02/07 18:20:22 INFO mapred.JobClient: Counters: 6
-  11/02/07 18:20:22 INFO mapred.JobClient:   Job Counters 
-  11/02/07 18:20:22 INFO mapred.JobClient:     Launched map tasks=1
-  11/02/07 18:20:22 INFO mapred.JobClient:     Data-local map tasks=1
-  11/02/07 18:20:22 INFO mapred.JobClient:   FileSystemCounters
-  11/02/07 18:20:22 INFO mapred.JobClient:     HDFS_BYTES_READ=10487
-  11/02/07 18:20:22 INFO mapred.JobClient:   Map-Reduce Framework
-  11/02/07 18:20:22 INFO mapred.JobClient:     Map input records=255
-  11/02/07 18:20:22 INFO mapred.JobClient:     Spilled Records=0
-  11/02/07 18:20:22 INFO mapred.JobClient:     Map output records=1452
-
+    $ bin/tool.sh lib/accumulo-examples-*[^c].jar org.apache.accumulo.examples.mapreduce.WordCount instance zookeepers /user/user1/wc wordCount -u username -p password
+    
+    11/02/07 18:20:11 INFO input.FileInputFormat: Total input paths to process : 1
+    11/02/07 18:20:12 INFO mapred.JobClient: Running job: job_201102071740_0003
+    11/02/07 18:20:13 INFO mapred.JobClient:  map 0% reduce 0%
+    11/02/07 18:20:20 INFO mapred.JobClient:  map 100% reduce 0%
+    11/02/07 18:20:22 INFO mapred.JobClient: Job complete: job_201102071740_0003
+    11/02/07 18:20:22 INFO mapred.JobClient: Counters: 6
+    11/02/07 18:20:22 INFO mapred.JobClient:   Job Counters 
+    11/02/07 18:20:22 INFO mapred.JobClient:     Launched map tasks=1
+    11/02/07 18:20:22 INFO mapred.JobClient:     Data-local map tasks=1
+    11/02/07 18:20:22 INFO mapred.JobClient:   FileSystemCounters
+    11/02/07 18:20:22 INFO mapred.JobClient:     HDFS_BYTES_READ=10487
+    11/02/07 18:20:22 INFO mapred.JobClient:   Map-Reduce Framework
+    11/02/07 18:20:22 INFO mapred.JobClient:     Map input records=255
+    11/02/07 18:20:22 INFO mapred.JobClient:     Spilled Records=0
+    11/02/07 18:20:22 INFO mapred.JobClient:     Map output records=1452
   
 After the map reduce job completes, query the accumulo table to see word
 counts.
 
-  [user1@localhost accumulo]$ ./bin/accumulo shell -u root -p secret
+  $ ./bin/accumulo shell -u root -p secret
   Shell - Accumulo Interactive Shell
   - version: 1.4.0-incubating-SNAPSHOT
   - instance id: 00000000-0000-0000-0000-000000000000

Propchange: incubator/accumulo/trunk/lib/
------------------------------------------------------------------------------
--- svn:ignore (original)
+++ svn:ignore Wed Nov  2 14:08:32 2011
@@ -1 +1,2 @@
 native
+*.jar

Modified: incubator/accumulo/trunk/pom.xml
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/pom.xml?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/pom.xml (original)
+++ incubator/accumulo/trunk/pom.xml Wed Nov  2 14:08:32 2011
@@ -190,8 +190,9 @@
             Accumulo is a large distributed structured store based on Google's
             BigTable design.
           </description>
-          <copyright>FIXME</copyright>
+          <copyright>2011 The Apache Software Foundation.</copyright>
           <url>http://incubator.apache.org/accumulo</url>
+          <needarch>true</needarch>
           <group>Utilities</group>
           <requires>
             <require>jdk</require>

Copied: incubator/accumulo/trunk/src/assemble/build.sh (from r1195622, incubator/accumulo/branches/1.3/src/assemble/build.sh)
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/src/assemble/build.sh?p2=incubator/accumulo/trunk/src/assemble/build.sh&p1=incubator/accumulo/branches/1.3/src/assemble/build.sh&r1=1195622&r2=1196591&rev=1196591&view=diff
==============================================================================
--- incubator/accumulo/branches/1.3/src/assemble/build.sh (original)
+++ incubator/accumulo/trunk/src/assemble/build.sh Wed Nov  2 14:08:32 2011
@@ -1,5 +1,20 @@
 #! /bin/bash
 
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 loc=`dirname "$0"`
 loc=`cd "$loc/../.."; pwd`
 
@@ -23,7 +38,15 @@ runAt() {
   ( cd $1 ; echo in `pwd`; shift ; run $@ ) || fail 
 }
 
-run mvn -U -P distclean clean package javadoc:aggregate javadoc:jar source:jar
+run mvn -U -P distclean clean 
+mvn rat:check 
+COUNT=`grep '!????' target/rat.txt | wc -l`
+EXPECTED=19
+if [ "$COUNT" -ne $EXPECTED ]
+then
+   fail expected $EXPECTED files missing licenses, but saw "$COUNT"
+fi
+run mvn package javadoc:aggregate javadoc:jar source:jar
 runAt ./src/server/src/main/c++ make 
 run mvn package source:jar assembly:single
-run mvn -N rpm:rpm
+test -x /usr/bin/rpmbuild && run mvn -N rpm:rpm

Modified: incubator/accumulo/trunk/src/examples/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/src/examples/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/src/examples/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java (original)
+++ incubator/accumulo/trunk/src/examples/src/main/java/org/apache/accumulo/examples/dirlist/Ingest.java Wed Nov  2 14:08:32 2011
@@ -18,6 +18,8 @@ package org.apache.accumulo.examples.dir
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 
 import org.apache.accumulo.core.client.BatchWriter;
 import org.apache.accumulo.core.client.Connector;
@@ -25,6 +27,7 @@ import org.apache.accumulo.core.client.Z
 import org.apache.accumulo.core.data.Mutation;
 import org.apache.accumulo.core.data.Value;
 import org.apache.accumulo.core.iterators.aggregation.LongSummation;
+import org.apache.accumulo.core.security.Authorizations;
 import org.apache.accumulo.core.security.ColumnVisibility;
 import org.apache.accumulo.examples.filedata.FileDataIngest;
 import org.apache.hadoop.io.Text;
@@ -121,8 +124,8 @@ public class Ingest {
     String nameTable = args[4];
     String indexTable = args[5];
     String dataTable = args[6];
+    byte[] visibility = args[7].getBytes();
     ColumnVisibility colvis = new ColumnVisibility(args[7]);
-    int chunkSize = Integer.parseInt(args[8]);
     
     Connector conn = new ZooKeeperInstance(instance, zooKeepers).getConnector(user, pass.getBytes());
     if (!conn.tableOperations().exists(nameTable))
@@ -131,6 +134,17 @@ public class Ingest {
       conn.tableOperations().create(indexTable);
     if (!conn.tableOperations().exists(dataTable))
       conn.tableOperations().create(dataTable);
+    Authorizations auths = conn.securityOperations().getUserAuthorizations(user);
+    if (!auths.contains(visibility)) {
+      List<byte[]> copy = new ArrayList<byte[]>(auths.getAuthorizations());
+      copy.add(visibility);
+      try {
+        conn.securityOperations().changeUserAuthorizations(user, new Authorizations(copy));
+      } catch (Exception ex) {
+        System.out.println("Unable to add visiblity to user " + user + ": " + ex);
+        System.exit(1);
+      }
+    }
     BatchWriter dirBW = conn.createBatchWriter(nameTable, 50000000, 300000l, 4);
     BatchWriter indexBW = conn.createBatchWriter(indexTable, 50000000, 300000l, 4);
     BatchWriter dataBW = conn.createBatchWriter(dataTable, 50000000, 300000l, 4);

Modified: incubator/accumulo/trunk/src/server/src/main/c++/nativeMap/BlockAllocator.h
URL: http://svn.apache.org/viewvc/incubator/accumulo/trunk/src/server/src/main/c%2B%2B/nativeMap/BlockAllocator.h?rev=1196591&r1=1196590&r2=1196591&view=diff
==============================================================================
--- incubator/accumulo/trunk/src/server/src/main/c++/nativeMap/BlockAllocator.h (original)
+++ incubator/accumulo/trunk/src/server/src/main/c++/nativeMap/BlockAllocator.h Wed Nov  2 14:08:32 2011
@@ -25,6 +25,7 @@
 #include <string>
 #include <vector>
 #include <stdlib.h>
+#include <stddef.h>
 
 struct Block {
 	unsigned char *data;