You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2009/03/30 17:24:30 UTC

svn commit: r760005 - in /incubator/cassandra/trunk/src/org/apache/cassandra: db/ dht/ io/ locator/ service/

Author: jbellis
Date: Mon Mar 30 15:24:23 2009
New Revision: 760005

URL: http://svn.apache.org/viewvc?rev=760005&view=rev
Log:
r/m unused code dealing with Ranges and tokens

Removed:
    incubator/cassandra/trunk/src/org/apache/cassandra/service/LocationInfoVerbHandler.java
    incubator/cassandra/trunk/src/org/apache/cassandra/service/TokenInfoVerbHandler.java
Modified:
    incubator/cassandra/trunk/src/org/apache/cassandra/db/ColumnFamilyStore.java
    incubator/cassandra/trunk/src/org/apache/cassandra/db/Memtable.java
    incubator/cassandra/trunk/src/org/apache/cassandra/db/MinorCompactionManager.java
    incubator/cassandra/trunk/src/org/apache/cassandra/db/Table.java
    incubator/cassandra/trunk/src/org/apache/cassandra/dht/BootStrapper.java
    incubator/cassandra/trunk/src/org/apache/cassandra/dht/Range.java
    incubator/cassandra/trunk/src/org/apache/cassandra/io/SSTable.java
    incubator/cassandra/trunk/src/org/apache/cassandra/io/SequenceFile.java
    incubator/cassandra/trunk/src/org/apache/cassandra/locator/TokenMetadata.java
    incubator/cassandra/trunk/src/org/apache/cassandra/service/StorageService.java

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/db/ColumnFamilyStore.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/db/ColumnFamilyStore.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/db/ColumnFamilyStore.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/db/ColumnFamilyStore.java Mon Mar 30 15:24:23 2009
@@ -47,8 +47,8 @@
 import org.apache.cassandra.io.SSTable;
 import org.apache.cassandra.io.SequenceFile;
 import org.apache.cassandra.net.EndPoint;
-import org.apache.cassandra.service.PartitionerType;
 import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.service.PartitionerType;
 import org.apache.cassandra.utils.BloomFilter;
 import org.apache.cassandra.utils.FileUtils;
 import org.apache.cassandra.utils.LogUtil;
@@ -81,7 +81,7 @@
     private ReentrantReadWriteLock lock_ = new ReentrantReadWriteLock(true);
 
     /* Flag indicates if a compaction is in process */
-    public AtomicBoolean isCompacting_ = new AtomicBoolean(false);
+    private AtomicBoolean isCompacting_ = new AtomicBoolean(false);
 
     ColumnFamilyStore(String table, String columnFamily) throws IOException
     {
@@ -131,7 +131,7 @@
             for (File file : files)
             {
                 String filename = file.getName();
-                if(((file.length() == 0) || (filename.indexOf("-" + SSTable.temporaryFile_) != -1) ) && (filename.indexOf(columnFamily_) != -1))
+                if(((file.length() == 0) || (filename.contains("-" + SSTable.temporaryFile_)) ) && (filename.contains(columnFamily_)))
                 {
                 	file.delete();
                 	continue;
@@ -140,7 +140,7 @@
                 String[] tblCfName = getTableAndColumnFamilyName(filename);
                 if (tblCfName[0].equals(table_)
                         && tblCfName[1].equals(columnFamily_)
-                        && filename.indexOf("-Data.db") != -1)
+                        && filename.contains("-Data.db"))
                 {
                     ssTables.add(file.getAbsoluteFile());
                 }
@@ -178,7 +178,7 @@
      * disk and the total space oocupied by the data files
      * associated with this Column Family.
     */
-    public String cfStats(String newLineSeparator, java.text.DecimalFormat df)
+    public String cfStats(String newLineSeparator)
     {
         StringBuilder sb = new StringBuilder();
         /*
@@ -261,7 +261,7 @@
     	if( ranges != null)
     		futurePtr = MinorCompactionManager.instance().submit(ColumnFamilyStore.this, ranges, target, fileList);
     	else
-    		MinorCompactionManager.instance().submitMajor(ColumnFamilyStore.this, ranges, skip);
+    		MinorCompactionManager.instance().submitMajor(ColumnFamilyStore.this, skip);
     	
         boolean result = true;
         try
@@ -334,8 +334,7 @@
     {
     	// Psuedo increment so that we do not generate consecutive numbers 
     	fileIndexGenerator_.incrementAndGet();
-        String name = table_ + "-" + columnFamily_ + "-" + fileIndexGenerator_.incrementAndGet();
-        return name;
+        return table_ + "-" + columnFamily_ + "-" + fileIndexGenerator_.incrementAndGet();
     }
 
     /*
@@ -345,8 +344,7 @@
     {
     	// Psuedo increment so that we do not generate consecutive numbers 
     	fileIndexGenerator_.incrementAndGet();
-        String name = table_ + "-" + columnFamily_ + "-" + SSTable.temporaryFile_ + "-" + fileIndexGenerator_.incrementAndGet() ;
-        return name;
+        return table_ + "-" + columnFamily_ + "-" + SSTable.temporaryFile_ + "-" + fileIndexGenerator_.incrementAndGet();
     }
 
     /*
@@ -367,9 +365,8 @@
     	lowestIndex = getIndexFromFileName(files.get(0));
    		
    		index = lowestIndex + 1 ;
-    	
-        String name = table_ + "-" + columnFamily_ + "-" + SSTable.temporaryFile_ + "-" + index ;
-        return name;
+
+        return table_ + "-" + columnFamily_ + "-" + SSTable.temporaryFile_ + "-" + index;
     }
 
     
@@ -387,14 +384,6 @@
     }
 
     /*
-     * This version is used when we forceflush.
-     */
-    void switchMemtable() throws IOException
-    {
-        memtable_.set( new Memtable(table_, columnFamily_) );
-    }
-
-    /*
      * This version is used only on start up when we are recovering from logs.
      * In the future we may want to parellelize the log processing for a table
      * by having a thread per log file present for recovery. Re-visit at that
@@ -413,7 +402,7 @@
         memtable_.get().forceflush(this);
     }
 
-    void forceFlushBinary() throws IOException
+    void forceFlushBinary()
     {
         BinaryMemtableManager.instance().submit(getColumnFamilyName(), binaryMemtable_.get());
         //binaryMemtable_.get().flush(true);
@@ -456,7 +445,15 @@
      */
     List<ColumnFamily> getColumnFamilies(String key, String columnFamilyColumn, IFilter filter) throws IOException
     {
-        List<ColumnFamily> columnFamilies = getMemoryColumnFamilies(key, columnFamilyColumn, filter);
+        List<ColumnFamily> columnFamilies1 = new ArrayList<ColumnFamily>();
+        /* Get the ColumnFamily from Memtable */
+        getColumnFamilyFromCurrentMemtable(key, columnFamilyColumn, filter, columnFamilies1);
+        if (columnFamilies1.size() == 0 || !filter.isDone())
+        {
+            /* Check if MemtableManager has any historical information */
+            MemtableManager.instance().getColumnFamily(key, columnFamily_, columnFamilyColumn, filter, columnFamilies1);
+        }
+        List<ColumnFamily> columnFamilies = columnFamilies1;
         if (columnFamilies.size() == 0 || !filter.isDone())
         {
             long start = System.currentTimeMillis();
@@ -466,24 +463,6 @@
         return columnFamilies;
     }
 
-    private List<ColumnFamily> getMemoryColumnFamilies(String key, String columnFamilyColumn, IFilter filter)
-    {
-        List<ColumnFamily> columnFamilies = new ArrayList<ColumnFamily>();
-        /* Get the ColumnFamily from Memtable */
-        getColumnFamilyFromCurrentMemtable(key, columnFamilyColumn, filter, columnFamilies);
-        if (columnFamilies.size() == 0 || !filter.isDone())
-        {
-            /* Check if MemtableManager has any historical information */
-            MemtableManager.instance().getColumnFamily(key, columnFamily_, columnFamilyColumn, filter, columnFamilies);
-        }
-        return columnFamilies;
-    }
-
-    public ColumnFamily getColumnFamilyFromMemory(String key, String columnFamilyColumn, IFilter filter)
-    {
-        return resolveAndRemoveDeleted(getMemoryColumnFamilies(key, columnFamilyColumn, filter));
-    }
-
     /**
      * Fetch from disk files and go in sorted order  to be efficient
      * This fn exits as soon as the required data is found.
@@ -651,7 +630,7 @@
      * param @ filename - filename just flushed to disk
      * param @ bf - bloom filter which indicates the keys that are in this file.
     */
-    void storeLocation(String filename, BloomFilter bf) throws IOException
+    void storeLocation(String filename, BloomFilter bf)
     {
         boolean doCompaction = false;
         int ssTableSize = 0;
@@ -686,7 +665,7 @@
         }
     }
 
-    PriorityQueue<FileStruct> initializePriorityQueue(List<String> files, List<Range> ranges, int minBufferSize) throws IOException
+    PriorityQueue<FileStruct> initializePriorityQueue(List<String> files, List<Range> ranges, int minBufferSize)
     {
         PriorityQueue<FileStruct> pq = new PriorityQueue<FileStruct>();
         if (files.size() > 1 || (ranges != null &&  files.size() > 0))
@@ -721,8 +700,7 @@
             		{
             			logger_.warn("Unable to close file :" + file);
             		}
-                    continue;
-            	}
+                }
             }
         }
         return pq;
@@ -774,7 +752,7 @@
     /*
      * Break the files into buckets and then compact.
      */
-    void doCompaction()  throws IOException
+    void doCompaction()
     {
         isCompacting_.set(true);
         List<String> files = new ArrayList<String>(ssTables_);
@@ -816,7 +794,6 @@
         {
         	isCompacting_.set(false);
         }
-        return;
     }
 
     void doMajorCompaction(long skip)  throws IOException
@@ -824,18 +801,13 @@
     	doMajorCompactionInternal( skip );
     }
 
-    void doMajorCompaction()  throws IOException
-    {
-    	doMajorCompactionInternal( 0 );
-    }
-    
     /*
      * Compact all the files irrespective of the size.
      * skip : is the ammount in Gb of the files to be skipped
      * all files greater than skip GB are skipped for this compaction.
      * Except if skip is 0 , in that case this is ignored and all files are taken.
      */
-    void doMajorCompactionInternal(long skip)  throws IOException
+    void doMajorCompactionInternal(long skip)
     {
         isCompacting_.set(true);
         List<String> filesInternal = new ArrayList<String>(ssTables_);
@@ -868,7 +840,6 @@
         {
         	isCompacting_.set(false);
         }
-        return ;
     }
 
     /*
@@ -906,41 +877,14 @@
     	return maxFile;
     }
 
-    Range getMaxRange( List<Range> ranges )
-    {
-    	Range maxRange = new Range( BigInteger.ZERO, BigInteger.ZERO );
-    	for( Range range : ranges)
-    	{
-    		if( range.left().compareTo(maxRange.left()) > 0 )
-    		{
-    			maxRange = range;
-    		}
-    	}
-    	return maxRange;
-    }
-
-    boolean isLoopAround ( List<Range> ranges )
-    {
-    	boolean isLoop = false;
-    	for( Range range : ranges)
-    	{
-    		if( range.left().compareTo(range.right()) > 0 )
-    		{
-    			isLoop = true;
-    			break;
-    		}
-    	}
-    	return isLoop;
-    }
-
-    boolean doAntiCompaction(List<Range> ranges, EndPoint target, List<String> fileList) throws IOException
+    boolean doAntiCompaction(List<Range> ranges, EndPoint target, List<String> fileList)
     {
         isCompacting_.set(true);
         List<String> files = new ArrayList<String>(ssTables_);
         boolean result = true;
         try
         {
-        	 result = doFileAntiCompaction(files, ranges, target, bufSize_, fileList, null);
+        	 result = doFileAntiCompaction(files, ranges, target, fileList, null);
         }
         catch ( Exception ex)
         {
@@ -996,7 +940,7 @@
      * and only keeps keys that this node is responsible for.
      * @throws IOException
      */
-    void doCleanupCompaction() throws IOException
+    void doCleanupCompaction()
     {
         isCompacting_.set(true);
         List<String> files = new ArrayList<String>(ssTables_);
@@ -1030,7 +974,7 @@
     	Map<EndPoint, List<Range>> endPointtoRangeMap = StorageService.instance().constructEndPointToRangesMap();
     	myRanges = endPointtoRangeMap.get(StorageService.getLocalStorageEndPoint());
     	List<BloomFilter> compactedBloomFilters = new ArrayList<BloomFilter>();
-        doFileAntiCompaction(files, myRanges, null, bufSize_, newFiles, compactedBloomFilters);
+        doFileAntiCompaction(files, myRanges, null, newFiles, compactedBloomFilters);
         logger_.debug("Original file : " + file + " of size " + new File(file).length());
         lock_.writeLock().lock();
         try
@@ -1061,12 +1005,11 @@
      * @param files
      * @param ranges
      * @param target
-     * @param minBufferSize
      * @param fileList
      * @return
      * @throws IOException
      */
-    boolean doFileAntiCompaction(List<String> files, List<Range> ranges, EndPoint target, int minBufferSize, List<String> fileList, List<BloomFilter> compactedBloomFilters) throws IOException
+    boolean doFileAntiCompaction(List<String> files, List<Range> ranges, EndPoint target, List<String> fileList, List<BloomFilter> compactedBloomFilters)
     {
     	boolean result = false;
         long startTime = System.currentTimeMillis();
@@ -1092,7 +1035,7 @@
 	                    + expectedRangeFileSize + "   is greater than the safe limit of the disk space available.");
 	            return result;
 	        }
-	        PriorityQueue<FileStruct> pq = initializePriorityQueue(files, ranges, minBufferSize);
+	        PriorityQueue<FileStruct> pq = initializePriorityQueue(files, ranges, ColumnFamilyStore.bufSize_);
 	        if (pq.size() > 0)
 	        {
 	            mergedFileName = getTempFileName();
@@ -1149,8 +1092,7 @@
 		                    	catch ( Exception ex)
 		                    	{
                                     logger_.warn(LogUtil.throwableToString(ex));
-		                            continue;
-		                    	}
+                                }
 		                    }
 		                    // Now after merging all crap append to the sstable
 		                    columnFamily = resolveAndRemoveDeleted(columnFamilies);
@@ -1235,8 +1177,7 @@
 	                    		// and it will be deleted after compaction.
                                 logger_.warn(LogUtil.throwableToString(ex));
 	                            filestruct.reader_.close();
-	                            continue;
-	                    	}
+                            }
 	                    }
 	                    lfs.clear();
 	                    lastkey = null;
@@ -1315,7 +1256,7 @@
      * to get the latest data.
      *
      */
-    void  doFileCompaction(List<String> files,  int minBufferSize) throws IOException
+    void  doFileCompaction(List<String> files,  int minBufferSize)
     {
     	String newfile = null;
         long startTime = System.currentTimeMillis();
@@ -1390,9 +1331,9 @@
 			                        columnFamilies.add(ColumnFamily.serializer().deserialize(filestruct.bufIn_));
 		                    	}
 		                    	catch ( Exception ex)
-		                    	{                                    		                    		
-		                            continue;
-		                    	}
+		                    	{
+                                    logger_.warn("error in filecompaction", ex);
+                                }
 		                    }
 		                    // Now after merging all crap append to the sstable
 		                    columnFamily = resolveAndRemoveDeleted(columnFamilies);
@@ -1448,8 +1389,7 @@
 	                    		// in any case we have read as far as possible from it
 	                    		// and it will be deleted after compaction.
 	                            filestruct.reader_.close();
-	                            continue;
-	                    	}
+                            }
 	                    }
 	                    lfs.clear();
 	                    lastkey = null;
@@ -1500,7 +1440,6 @@
         logger_.debug("Total bytes Read for compaction  ..." + totalBytesRead);
         logger_.debug("Total bytes written for compaction  ..."
                 + totalBytesWritten + "   Total keys read ..." + totalkeysRead);
-        return;
     }
 
     public boolean isSuper()

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/db/Memtable.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/db/Memtable.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/db/Memtable.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/db/Memtable.java Mon Mar 30 15:24:23 2009
@@ -81,7 +81,6 @@
     private Map<String, ColumnFamily> columnFamilies_ = new HashMap<String, ColumnFamily>();
     /* Lock and Condition for notifying new clients about Memtable switches */
     Lock lock_ = new ReentrantLock();
-    Condition condition_;
 
     Memtable(String table, String cfName) throws IOException
     {
@@ -96,7 +95,6 @@
                     ));
         }
 
-        condition_ = lock_.newCondition();
         table_ = table;
         cfName_ = cfName;
         creationTime_ = System.currentTimeMillis();
@@ -197,13 +195,6 @@
         currentObjectCount_.addAndGet(newCount - oldCount);
     }
 
-    private boolean isLifetimeViolated()
-    {
-      /* Memtable lifetime in terms of milliseconds */
-      long lifetimeInMillis = DatabaseDescriptor.getMemtableLifetime() * 3600 * 1000;
-      return ( ( System.currentTimeMillis() - creationTime_ ) >= lifetimeInMillis );
-    }
-
     boolean isThresholdViolated(String key)
     {
     	boolean bVal = false;//isLifetimeViolated();
@@ -368,26 +359,6 @@
         return filter.filter(columnFamilyColumn, columnFamily);
     }
 
-    ColumnFamily get(String key, String cfName)
-    {
-    	printExecutorStats();
-    	Callable<ColumnFamily> call = new Getter(key, cfName);
-    	ColumnFamily cf = null;
-    	try
-    	{
-    		cf = apartments_.get(cfName_).submit(call).get();
-    	}
-    	catch ( ExecutionException ex )
-    	{
-    		logger_.debug(LogUtil.throwableToString(ex));
-    	}
-    	catch ( InterruptedException ex2 )
-    	{
-    		logger_.debug(LogUtil.throwableToString(ex2));
-    	}
-    	return cf;
-    }
-
     ColumnFamily get(String key, String cfName, IFilter filter)
     {
     	printExecutorStats();

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/db/MinorCompactionManager.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/db/MinorCompactionManager.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/db/MinorCompactionManager.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/db/MinorCompactionManager.java Mon Mar 30 15:24:23 2009
@@ -87,10 +87,6 @@
             	columnFamilyStore_.doCompaction();
                 logger_.debug("Finished compaction ..."+columnFamilyStore_.columnFamily_);
             }
-            catch (IOException e)
-            {
-                logger_.debug( LogUtil.throwableToString(e) );
-            }
             catch (Throwable th)
             {
                 logger_.error( LogUtil.throwableToString(th) );
@@ -122,16 +118,9 @@
         public Boolean call()
         {
         	boolean result = true;
-            try
-            {
-                logger_.debug("Started  compaction ..."+columnFamilyStore_.columnFamily_);
-                result = columnFamilyStore_.doAntiCompaction(ranges_, target_,fileList_);
-                logger_.debug("Finished compaction ..."+columnFamilyStore_.columnFamily_);
-            }
-            catch (IOException e)
-            {
-                logger_.debug( LogUtil.throwableToString(e) );
-            }
+            logger_.debug("Started  compaction ..."+columnFamilyStore_.columnFamily_);
+            result = columnFamilyStore_.doAntiCompaction(ranges_, target_,fileList_);
+            logger_.debug("Finished compaction ..."+columnFamilyStore_.columnFamily_);
             return result;
         }
     }
@@ -180,10 +169,6 @@
             	columnFamilyStore_.doCleanupCompaction();
                 logger_.debug("Finished compaction ..."+columnFamilyStore_.columnFamily_);
             }
-            catch (IOException e)
-            {
-                logger_.debug( LogUtil.throwableToString(e) );
-            }
             catch (Throwable th)
             {
                 logger_.error( LogUtil.throwableToString(th) );
@@ -223,14 +208,9 @@
     public Future<Boolean> submit(ColumnFamilyStore columnFamilyStore, List<Range> ranges, EndPoint target, List<String> fileList)
     {
         return compactor_.submit( new FileCompactor2(columnFamilyStore, ranges, target, fileList) );
-    } 
-
-    public Future<Boolean> submit(ColumnFamilyStore columnFamilyStore, List<Range> ranges)
-    {
-        return compactor_.submit( new FileCompactor2(columnFamilyStore, ranges) );
     }
 
-    public void  submitMajor(ColumnFamilyStore columnFamilyStore, List<Range> ranges, long skip)
+    public void  submitMajor(ColumnFamilyStore columnFamilyStore, long skip)
     {
         compactor_.submit( new OnDemandCompactor(columnFamilyStore, skip) );
     }

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/db/Table.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/db/Table.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/db/Table.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/db/Table.java Mon Mar 30 15:24:23 2009
@@ -488,7 +488,7 @@
         for ( String cfName : cfNames )
         {
             ColumnFamilyStore cfStore = columnFamilyStores_.get(cfName);
-            sb.append(cfStore.cfStats(newLineSeparator, df));
+            sb.append(cfStore.cfStats(newLineSeparator));
         }
         int newLength = sb.toString().length();
         
@@ -592,7 +592,7 @@
         {
             ColumnFamilyStore cfStore = columnFamilyStores_.get( columnFamily );
             if ( cfStore != null )
-                MinorCompactionManager.instance().submitMajor(cfStore, null, 0);
+                MinorCompactionManager.instance().submitMajor(cfStore, 0);
         }
     }
 
@@ -684,26 +684,6 @@
         dbAnalyticsSource_.updateReadStatistics(timeTaken);
         return row;
     }
-    
-    public Row getRowFromMemory(String key)
-    {
-        Row row = new Row(key);
-        Set<String> columnFamilies = tableMetadata_.getColumnFamilies();
-        long start = System.currentTimeMillis();
-        for ( String columnFamily : columnFamilies )
-        {
-            ColumnFamilyStore cfStore = columnFamilyStores_.get(columnFamily);
-            if ( cfStore != null )
-            {    
-                ColumnFamily cf = cfStore.getColumnFamilyFromMemory(key, columnFamily, new IdentityFilter());
-                if ( cf != null )
-                    row.addColumnFamily(cf);
-            }
-        }
-        long timeTaken = System.currentTimeMillis() - start;
-        dbAnalyticsSource_.updateReadStatistics(timeTaken);
-        return row;
-    }
 
 
     /**

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/dht/BootStrapper.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/dht/BootStrapper.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/dht/BootStrapper.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/dht/BootStrapper.java Mon Mar 30 15:24:23 2009
@@ -130,21 +130,5 @@
             logger_.debug( LogUtil.throwableToString(th) );
         }
     }
- 
-    private Range getMyOldRange()
-    {
-        Map<EndPoint, BigInteger> oldEndPointToTokenMap = tokenMetadata_.cloneEndPointTokenMap();
-        Map<BigInteger, EndPoint> oldTokenToEndPointMap = tokenMetadata_.cloneTokenEndPointMap();
 
-        oldEndPointToTokenMap.remove(targets_);
-        oldTokenToEndPointMap.remove(tokens_);
-
-        BigInteger myToken = oldEndPointToTokenMap.get(StorageService.getLocalStorageEndPoint());
-        List<BigInteger> allTokens = new ArrayList<BigInteger>(oldTokenToEndPointMap.keySet());
-        Collections.sort(allTokens);
-        int index = Collections.binarySearch(allTokens, myToken);
-        /* Calculate the lhs for the range */
-        BigInteger lhs = (index == 0) ? allTokens.get(allTokens.size() - 1) : allTokens.get( index - 1);
-        return new Range( lhs, myToken );
-    }
 }

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/dht/Range.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/dht/Range.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/dht/Range.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/dht/Range.java Mon Mar 30 15:24:23 2009
@@ -95,45 +95,7 @@
     {
         return right_;
     }
-    
-    boolean isSplitRequired()
-    {
-        return ( left_.subtract(right_).signum() >= 0 );
-    }
-    
-    public boolean isSplitBy(BigInteger bi)
-    {
-        if ( left_.subtract(right_).signum() > 0 )
-        {
-            /* 
-             * left is greater than right we are wrapping around.
-             * So if the interval is [a,b) where a > b then we have
-             * 3 cases one of which holds for any given token k.
-             * (1) k > a -- return true
-             * (2) k < b -- return true
-             * (3) b < k < a -- return false
-            */
-            if ( bi.subtract(left_).signum() > 0 )
-                return true;
-            else if (right_.subtract(bi).signum() > 0 )
-                return true;
-            else
-                return false;
-        }
-        else if ( left_.subtract(right_).signum() < 0 )
-        {
-            /*
-             * This is the range [a, b) where a < b. 
-            */
-            return ( bi.subtract(left_).signum() > 0 && right_.subtract(bi).signum() > 0 );
-        }        
-        else
-        {
-            // should never be here.
-            return true;
-        }       
-    }
-    
+
     /**
      * Helps determine if a given point on the DHT ring is contained
      * in the range in question.
@@ -154,10 +116,7 @@
             */
             if ( bi.subtract(left_).signum() >= 0 )
                 return true;
-            else if (right_.subtract(bi).signum() > 0 )
-                return true;
-            else
-                return false;
+            else return right_.subtract(bi).signum() > 0;
         }
         else if ( left_.subtract(right_).signum() < 0 )
         {
@@ -171,58 +130,7 @@
     		return true;
     	}    	
     }
-    
-    /**
-     * Helps determine if a given range on the DHT ring is contained
-     * within the range associated with the <i>this</i> pointer.
-     * @param rhs rhs in question
-     * @return true if the point contains within the range else false.
-     */
-    public boolean contains(Range rhs)
-    {
-        /* 
-         * If (a, b] and (c, d} are not wrap arounds
-         * then return true if a <= c <= d <= b.
-         */
-        if ( !isWrapAround(this) && !isWrapAround(rhs) )
-        {
-            if ( rhs.left_.subtract(left_).signum() >= 0 && right_.subtract(rhs.right_).signum() >= 0 )
-                return true;
-            else
-                return false;
-        }
-        
-        /*
-         * If lhs is a wrap around and rhs is not then
-         * rhs.left >= lhs.left and rhs.right >= lhs.left.
-         */
-        if ( isWrapAround(this) && !isWrapAround(rhs) )
-        {
-            if ( rhs.left_.subtract(left_).signum() >= 0 && rhs.right_.subtract(right_).signum() >= 0 )
-                return true;
-            else
-                return false;
-        }
-        
-        /* 
-         * If lhs is not a wrap around and rhs is a wrap 
-         * around then we just return false.
-         */
-        if ( !isWrapAround(this) && isWrapAround(rhs) )
-            return false;        
-        
-        if( isWrapAround(this) && isWrapAround(rhs) )
-        {
-            if ( rhs.left_.subtract(left_).signum() >= 0 && right_.subtract(right_).signum() >= 0 )
-                return true;
-            else
-                return false;
-        }
-        
-        /* should never be here */
-        return false;
-    }
-    
+
     /**
      * Tells if the given range is a wrap around.
      * @param range
@@ -230,8 +138,7 @@
      */
     private boolean isWrapAround(Range range)
     {
-        boolean bVal = ( range.left_.subtract(range.right_).signum() > 0 ) ? true : false;
-        return bVal;
+        return range.left_.subtract(range.right_).signum() > 0;
     }
     
     public int compareTo(Range rhs)
@@ -254,10 +161,7 @@
         if ( !(o instanceof Range) )
             return false;
         Range rhs = (Range)o;
-        if ( left_.equals(rhs.left_) && right_.equals(rhs.right_) )
-            return true;
-        else
-            return false;
+        return left_.equals(rhs.left_) && right_.equals(rhs.right_);
     }
     
     public int hashCode()

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/io/SSTable.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/io/SSTable.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/io/SSTable.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/io/SSTable.java Mon Mar 30 15:24:23 2009
@@ -189,11 +189,6 @@
             position_ = position;
         }
 
-        public String key()
-        {
-            return key_;
-        }
-
         public long position()
         {
             return position_;
@@ -592,11 +587,6 @@
         return getFile(dataFile_);
     }
 
-    public long lastModified()
-    {
-        return dataWriter_.lastModified();
-    }
-    
     /*
      * Seeks to the specified key on disk.
     */

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/io/SequenceFile.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/io/SequenceFile.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/io/SequenceFile.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/io/SequenceFile.java Mon Mar 30 15:24:23 2009
@@ -21,25 +21,19 @@
 import java.io.*;
 import java.lang.reflect.Method;
 import java.nio.ByteBuffer;
-import java.nio.CharBuffer;
 import java.nio.MappedByteBuffer;
 import java.nio.channels.FileChannel;
 import java.security.AccessController;
 import java.security.PrivilegedAction;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import java.util.SortedMap;
-import java.util.StringTokenizer;
-import java.util.TreeMap;
+
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.continuations.Suspendable;
 import org.apache.cassandra.db.RowMutation;
 import org.apache.cassandra.service.PartitionerType;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.BloomFilter;
-import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.LogUtil;
 import org.apache.log4j.Logger;
 
@@ -212,13 +206,11 @@
     }
 
     public static class BufferWriter extends Writer
-    {        
-        private int size_;
+    {
 
         BufferWriter(String filename, int size) throws IOException
         {
             super(filename, size);
-            size_ = size;
         }
         
         @Override
@@ -241,12 +233,10 @@
     
     public static class ChecksumWriter extends Writer
     {
-        private int size_;
 
         ChecksumWriter(String filename, int size) throws IOException
         {
             super(filename, size);
-            size_ = size;
         }
         
         @Override
@@ -1444,17 +1434,7 @@
     {
         return new BufferWriter(filename, size);
     }
-    
-    public static IFileWriter chksumWriter(String filename, int size) throws IOException
-    {
-        return new ChecksumWriter(filename, size);
-    }
 
-    public static IFileWriter concurrentWriter(String filename) throws IOException
-    {
-        return new ConcurrentWriter(filename);
-    }
-    
     public static IFileWriter fastWriter(String filename, int size) throws IOException
     {
         return new FastConcurrentWriter(filename, size);
@@ -1469,16 +1449,6 @@
     {
         return new BufferReader(filename, size);
     }
-    
-    public static IFileReader chksumReader(String filename, int size) throws IOException
-    {
-        return new ChecksumReader(filename, size);
-    }
-
-    public static boolean readBoolean(ByteBuffer buffer)
-    {
-        return ( buffer.get() == 1 ? true : false );
-    }
 
     /**
      * Efficiently writes a UTF8 string to the buffer.
@@ -1548,83 +1518,4 @@
         buffer.put(bytearr, 0, utflen + 2);
     }
 
-    /**
-     * Read a UTF8 string from a serialized buffer.
-     * @param buffer buffer from which a UTF8 string is read
-     * @return a Java String
-    */
-    protected static String readUTF(ByteBuffer in) throws IOException
-    {
-        int utflen = in.getShort();
-        byte[] bytearr = new byte[utflen];
-        char[] chararr = new char[utflen];
-
-        int c, char2, char3;
-        int count = 0;
-        int chararr_count = 0;
-
-        in.get(bytearr, 0, utflen);
-
-        while (count < utflen)
-        {
-            c = (int) bytearr[count] & 0xff;
-            if (c > 127)
-                break;
-            count++;
-            chararr[chararr_count++] = (char) c;
-        }
-
-        while (count < utflen)
-        {
-            c = (int) bytearr[count] & 0xff;
-            switch (c >> 4)
-            {
-            case 0:
-            case 1:
-            case 2:
-            case 3:
-            case 4:
-            case 5:
-            case 6:
-            case 7:
-                /* 0xxxxxxx */
-                count++;
-                chararr[chararr_count++] = (char) c;
-                break;
-            case 12:
-            case 13:
-                /* 110x xxxx 10xx xxxx */
-                count += 2;
-                if (count > utflen)
-                    throw new UTFDataFormatException(
-                    "malformed input: partial character at end");
-                char2 = (int) bytearr[count - 1];
-                if ((char2 & 0xC0) != 0x80)
-                    throw new UTFDataFormatException(
-                            "malformed input around byte " + count);
-                chararr[chararr_count++] = (char) (((c & 0x1F) << 6) | (char2 & 0x3F));
-                break;
-            case 14:
-                /* 1110 xxxx 10xx xxxx 10xx xxxx */
-                count += 3;
-                if (count > utflen)
-                    throw new UTFDataFormatException(
-                    "malformed input: partial character at end");
-                char2 = (int) bytearr[count - 2];
-                char3 = (int) bytearr[count - 1];
-                if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80))
-                    throw new UTFDataFormatException(
-                            "malformed input around byte " + (count - 1));
-                chararr[chararr_count++] = (char) (((c & 0x0F) << 12)
-                        | ((char2 & 0x3F) << 6) | ((char3 & 0x3F) << 0));
-                break;
-            default:
-                /* 10xx xxxx, 1111 xxxx */
-                throw new UTFDataFormatException("malformed input around byte "
-                        + count);
-            }
-        }
-        // The number of chars produced may be less than utflen
-        return new String(chararr, 0, chararr_count);
-    }
 }

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/locator/TokenMetadata.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/locator/TokenMetadata.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/locator/TokenMetadata.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/locator/TokenMetadata.java Mon Mar 30 15:24:23 2009
@@ -39,29 +39,19 @@
 
 public class TokenMetadata
 {
-    private static ICompactSerializer<TokenMetadata> serializer_ = new TokenMetadataSerializer();
-    
-    public static ICompactSerializer<TokenMetadata> serializer()
-    {
-        return serializer_;
-    }
-    
-    /* Maintains token to endpoint map of every node in the cluster. */    
+    /* Maintains token to endpoint map of every node in the cluster. */
     private Map<BigInteger, EndPoint> tokenToEndPointMap_ = new HashMap<BigInteger, EndPoint>();    
     /* Maintains a reverse index of endpoint to token in the cluster. */
     private Map<EndPoint, BigInteger> endPointToTokenMap_ = new HashMap<EndPoint, BigInteger>();
     
     /* Use this lock for manipulating the token map */
-    private ReadWriteLock lock_ = new ReentrantReadWriteLock(true);
-    
-    /*
-     * For JAXB purposes. 
-    */
+    private final ReadWriteLock lock_ = new ReentrantReadWriteLock(true);
+
     public TokenMetadata()
     {
     }
-    
-    protected TokenMetadata(Map<BigInteger, EndPoint> tokenToEndPointMap, Map<EndPoint, BigInteger> endPointToTokenMap)
+
+    private TokenMetadata(Map<BigInteger, EndPoint> tokenToEndPointMap, Map<EndPoint, BigInteger> endPointToTokenMap)
     {
         tokenToEndPointMap_ = tokenToEndPointMap;
         endPointToTokenMap_ = endPointToTokenMap;
@@ -188,51 +178,3 @@
         return sb.toString();
     }
 }
-
-class TokenMetadataSerializer implements ICompactSerializer<TokenMetadata>
-{
-    public void serialize(TokenMetadata tkMetadata, DataOutputStream dos) throws IOException
-    {        
-        Map<BigInteger, EndPoint> tokenToEndPointMap = tkMetadata.cloneTokenEndPointMap();
-        Set<BigInteger> tokens = tokenToEndPointMap.keySet();
-        /* write the size */
-        dos.writeInt(tokens.size());        
-        for ( BigInteger token : tokens )
-        {
-            byte[] bytes = token.toByteArray();
-            /* Convert the BigInteger to byte[] and persist */
-            dos.writeInt(bytes.length);
-            dos.write(bytes); 
-            /* Write the endpoint out */
-            CompactEndPointSerializationHelper.serialize(tokenToEndPointMap.get(token), dos);
-        }
-    }
-    
-    public TokenMetadata deserialize(DataInputStream dis) throws IOException
-    {
-        TokenMetadata tkMetadata = null;
-        int size = dis.readInt();
-        
-        if ( size > 0 )
-        {
-            Map<BigInteger, EndPoint> tokenToEndPointMap = new HashMap<BigInteger, EndPoint>();
-            Map<EndPoint, BigInteger> endPointToTokenMap = new HashMap<EndPoint, BigInteger>();
-            
-            for ( int i = 0; i < size; ++i )
-            {
-                /* Read the byte[] and convert to BigInteger */
-                byte[] bytes = new byte[dis.readInt()];
-                dis.readFully(bytes);
-                BigInteger token = new BigInteger(bytes);
-                /* Read the endpoint out */
-                EndPoint endpoint = CompactEndPointSerializationHelper.deserialize(dis);
-                tokenToEndPointMap.put(token, endpoint);
-                endPointToTokenMap.put(endpoint, token);
-            }
-            
-            tkMetadata = new TokenMetadata( tokenToEndPointMap, endPointToTokenMap );
-        }
-        
-        return tkMetadata;
-    }
-}

Modified: incubator/cassandra/trunk/src/org/apache/cassandra/service/StorageService.java
URL: http://svn.apache.org/viewvc/incubator/cassandra/trunk/src/org/apache/cassandra/service/StorageService.java?rev=760005&r1=760004&r2=760005&view=diff
==============================================================================
--- incubator/cassandra/trunk/src/org/apache/cassandra/service/StorageService.java (original)
+++ incubator/cassandra/trunk/src/org/apache/cassandra/service/StorageService.java Mon Mar 30 15:24:23 2009
@@ -128,16 +128,9 @@
     public final static String bootStrapInitiateVerbHandler_ = "BOOTSTRAP-INITIATE-VERB-HANDLER";
     public final static String bootStrapInitiateDoneVerbHandler_ = "BOOTSTRAP-INITIATE-DONE-VERB-HANDLER";
     public final static String bootStrapTerminateVerbHandler_ = "BOOTSTRAP-TERMINATE-VERB-HANDLER";
-    public final static String tokenInfoVerbHandler_ = "TOKEN-INFO-VERB-HANDLER";
-    public final static String locationInfoVerbHandler_ = "LOCATION-INFO-VERB-HANDLER";
     public final static String dataFileVerbHandler_ = "DATA-FILE-VERB-HANDLER";
     public final static String mbrshipCleanerVerbHandler_ = "MBRSHIP-CLEANER-VERB-HANDLER";
     public final static String bsMetadataVerbHandler_ = "BS-METADATA-VERB-HANDLER";
-    public final static String jobConfigurationVerbHandler_ = "JOB-CONFIGURATION-VERB-HANDLER";
-    public final static String taskMetricVerbHandler_ = "TASK-METRIC-VERB-HANDLER";
-    public final static String mapAssignmentVerbHandler_ = "MAP-ASSIGNMENT-VERB-HANDLER"; 
-    public final static String reduceAssignmentVerbHandler_ = "REDUCE-ASSIGNMENT-VERB-HANDLER";
-    public final static String mapCompletionVerbHandler_ = "MAP-COMPLETION-VERB-HANDLER";
     public final static String calloutDeployVerbHandler_ = "CALLOUT-DEPLOY-VERB-HANDLER";
     public final static String touchVerbHandler_ = "TOUCH-VERB-HANDLER";
     
@@ -327,8 +320,6 @@
         MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.bootStrapInitiateDoneVerbHandler_, new StorageService.BootstrapInitiateDoneVerbHandler());
         MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.bootStrapTerminateVerbHandler_, new StreamManager.BootstrapTerminateVerbHandler());
         MessagingService.getMessagingInstance().registerVerbHandlers(HttpConnection.httpRequestVerbHandler_, new HttpRequestVerbHandler(this) );
-        MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.tokenInfoVerbHandler_, new TokenInfoVerbHandler() );
-        MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.locationInfoVerbHandler_, new LocationInfoVerbHandler() );
         MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.dataFileVerbHandler_, new DataFileVerbHandler() );
         MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.mbrshipCleanerVerbHandler_, new MembershipCleanerVerbHandler() );
         MessagingService.getMessagingInstance().registerVerbHandlers(StorageService.bsMetadataVerbHandler_, new BootstrapMetadataVerbHandler() );        
@@ -696,77 +687,6 @@
         }
         return endPointToRangesMap;
     }
-    
-    /**
-     * Get the estimated disk space of the target endpoint in its
-     * primary range.
-     * @param target whose primary range we are interested in.
-     * @return disk space of the target in the primary range.
-     */
-    private double getDiskSpaceForPrimaryRange(EndPoint target)
-    {
-        double primaryDiskSpace = 0d;
-        Map<BigInteger, EndPoint> tokenToEndPointMap = tokenMetadata_.cloneTokenEndPointMap();
-        Set<BigInteger> tokens = tokenToEndPointMap.keySet();
-        Range[] allRanges = getAllRanges(tokens);
-        Arrays.sort(allRanges);
-        /* Mapping from Range to its ordered position on the ring */
-        Map<Range, Integer> rangeIndex = new HashMap<Range, Integer>();
-        for ( int i = 0; i < allRanges.length; ++i )
-        {
-            rangeIndex.put(allRanges[i], i);
-        }
-        /* Get the coefficients for the equations */
-        List<double[]> equations = new ArrayList<double[]>();
-        /* Get the endpoint to range map */
-        Map<EndPoint, List<Range>> endPointToRangesMap = constructEndPointToRangesMap();
-        Set<EndPoint> eps = endPointToRangesMap.keySet();
-        
-        for ( EndPoint ep : eps )
-        {
-            List<Range> ranges = endPointToRangesMap.get(ep);
-            double[] equation = new double[allRanges.length];
-            for ( Range range : ranges )
-            {                
-                int index = rangeIndex.get(range);
-                equation[index] = 1;
-            }
-            equations.add(equation);
-        }
-        double[][] coefficients = equations.toArray( new double[0][0] );
-        
-        /* Get the constants which are the aggregate disk space for each endpoint */
-        double[] constants = new double[allRanges.length];
-        int index = 0;
-        for ( EndPoint ep : eps )
-        {
-            /* reset the port back to control port */
-            ep.setPort(DatabaseDescriptor.getControlPort());
-            String lInfo = null;
-            if ( ep.equals(StorageService.udpAddr_) )
-                lInfo = getLoadInfo();
-            else                
-                lInfo = getLoadInfo(ep);
-            LoadInfo li = new LoadInfo(lInfo);
-            constants[index++] = FileUtils.stringToFileSize(li.diskSpace());
-        }
-        
-        RealMatrix matrix = new RealMatrixImpl(coefficients);
-        double[] solutions = matrix.solve(constants);
-        Range primaryRange = getPrimaryRangeForEndPoint(target);
-        primaryDiskSpace = solutions[rangeIndex.get(primaryRange)];
-        return primaryDiskSpace;
-    }
-    
-    /**
-     * This is very dangerous. This is used only on the client
-     * side to set up the client library. This is then used to
-     * find the appropriate nodes to route the key to.
-    */
-    public void setTokenMetadata(TokenMetadata tokenMetadata)
-    {
-        tokenMetadata_ = tokenMetadata;
-    }
 
     /**
      *  Called when there is a change in application state. In particular
@@ -840,17 +760,6 @@
         }
     }
 
-    public static BigInteger generateRandomToken()
-    {
-	    byte[] randomBytes = new byte[24];
-	    Random random = new Random();
-	    for ( int i = 0 ; i < 24 ; i++)
-	    {
-	    randomBytes[i] = (byte)(31 + random.nextInt(256 - 31));
-	    }
-	    return hash(new String(randomBytes));
-    }
-
     /**
      * Get the count of primary keys from the sampler.
     */
@@ -870,37 +779,6 @@
         LoadInfo li = storageLoadBalancer_.getLoad(ep);
         return ( li == null ) ? "N/A" : li.toString();
     }
-    
-    /**
-     * Get the endpoint that has the largest primary count.
-     * @return
-     */
-    EndPoint getEndPointWithLargestPrimaryCount()
-    {
-        Set<EndPoint> allMbrs = Gossiper.instance().getAllMembers();
-        Map<LoadInfo, EndPoint> loadInfoToEndPointMap = new HashMap<LoadInfo, EndPoint>();
-        List<LoadInfo> lInfos = new ArrayList<LoadInfo>();
-        
-        for ( EndPoint mbr : allMbrs )
-        {
-            mbr.setPort(DatabaseDescriptor.getStoragePort());
-            LoadInfo li = null;
-            if ( mbr.equals(StorageService.tcpAddr_) )
-            {
-                li = new LoadInfo( getLoadInfo() );
-                lInfos.add( li );
-            }
-            else
-            {
-                li = storageLoadBalancer_.getLoad(mbr);
-                lInfos.add( li );
-            }
-            loadInfoToEndPointMap.put(li, mbr);
-        }
-        
-        Collections.sort(lInfos, new LoadInfo.DiskSpaceComparator());
-        return loadInfoToEndPointMap.get( lInfos.get(lInfos.size() - 1) );
-    }
 
     /*
      * This method updates the token on disk and modifies the cached
@@ -1178,35 +1056,6 @@
     }
 
     /**
-     * This method returns the range handled by this node.
-     */
-    public Range getMyRange()
-    {
-        BigInteger myToken = tokenMetadata_.getToken(StorageService.tcpAddr_);
-        Map<BigInteger, EndPoint> tokenToEndPointMap = tokenMetadata_.cloneTokenEndPointMap();
-        List<BigInteger> allTokens = new ArrayList<BigInteger>(tokenToEndPointMap.keySet());
-        Collections.sort(allTokens);
-        int index = Collections.binarySearch(allTokens, myToken);
-        /* Calculate the lhs for the range */
-        BigInteger lhs = (index == 0) ? allTokens.get(allTokens.size() - 1) : allTokens.get( index - 1);
-        return new Range( lhs, myToken );
-    }
-    
-    /**
-     * Get the primary for the given range. Use the replica placement
-     * strategies to determine which are the replicas. The first replica
-     * in the list is the primary.
-     * 
-     * @param range on the ring.
-     * @return endpoint responsible for the range.
-     */
-    public EndPoint getPrimaryStorageEndPointForRange(Range range)
-    {
-        EndPoint[] replicas = nodePicker_.getStorageEndPoints(range.left());
-        return replicas[0];
-    }
-    
-    /**
      * Get the primary range for the specified endpoint.
      * @param ep endpoint we are interested in.
      * @return range for the specified endpoint.
@@ -1274,20 +1123,6 @@
     }
 
     /**
-     * Get all ranges that span the ring given a set
-     * of endpoints.
-    */
-    public Range[] getPrimaryRangesForEndPoints(Set<EndPoint> endpoints)
-    {
-        List<Range> allRanges = new ArrayList<Range>();
-        for ( EndPoint endpoint : endpoints )
-        {
-            allRanges.add( getPrimaryRangeForEndPoint( endpoint) );
-        }
-        return allRanges.toArray(new Range[0]);
-    }
-    
-    /**
      * This method returns the endpoint that is responsible for storing the
      * specified key.
      *
@@ -1335,63 +1170,7 @@
         EndPoint endpoint = getPrimary(key);
         return StorageService.tcpAddr_.equals(endpoint);
     }
-    
-    /**
-     * This method determines whether the target endpoint is the
-     * primary for the given key.
-     * @param key
-     * @param target the target enpoint 
-     * @return true if the local endpoint is the primary replica.
-    */
-    public boolean isPrimary(String key, EndPoint target)
-    {
-        EndPoint endpoint = getPrimary(key);
-        return target.equals(endpoint);
-    }
-    
-    /**
-     * This method determines whether the local endpoint is the
-     * seondary replica for the given key.
-     * @param key
-     * @return true if the local endpoint is the secondary replica.
-     */
-    public boolean isSecondary(String key)
-    {
-        EndPoint[] topN = getNStorageEndPoint(key);
-        if ( topN.length < DatabaseDescriptor.getReplicationFactor() )
-            return false;
-        return topN[1].equals(StorageService.tcpAddr_);
-    }
-    
-    /**
-     * This method determines whether the local endpoint is the
-     * seondary replica for the given key.
-     * @param key
-     * @return true if the local endpoint is the tertiary replica.
-     */
-    public boolean isTertiary(String key)
-    {
-        EndPoint[] topN = getNStorageEndPoint(key);
-        if ( topN.length < DatabaseDescriptor.getReplicationFactor() )
-            return false;
-        return topN[2].equals(StorageService.tcpAddr_);
-    }
-    
-    /**
-     * This method determines if the local endpoint is
-     * in the topN of N nodes passed in.
-    */
-    public boolean isInTopN(String key)
-    {
-    	EndPoint[] topN = getNStorageEndPoint(key);
-        for ( EndPoint ep : topN )
-        {
-            if ( ep.equals( StorageService.tcpAddr_ ) )
-                return true;
-        }
-        return false;
-    }
-    
+
     /**
      * This method returns the N endpoints that are responsible for storing the
      * specified key i.e for replication.
@@ -1447,21 +1226,6 @@
 
     /**
      * This method returns the N endpoints that are responsible for storing the
-     * specified key i.e for replication. But it makes sure that the N endpoints
-     * that are returned are live as reported by the FD. It returns the hint information
-     * if some nodes in the top N are not live.
-     *
-     * param @ key - key for which we need to find the endpoint return value -
-     * the endpoint responsible for this key
-     */
-    public Map<EndPoint, EndPoint> getNHintedStorageEndPoint(String key)
-    {
-        BigInteger token = hash(key);
-        return nodePicker_.getHintedStorageEndPoints(token);
-    }
-
-    /**
-     * This method returns the N endpoints that are responsible for storing the
      * specified token i.e for replication.
      *
      * param @ token - position on the ring
@@ -1485,19 +1249,6 @@
     }
 
     /**
-     * This method returns the N endpoints that are responsible for storing the
-     * specified key i.e for replication. But it makes sure that the N endpoints
-     * that are returned are live as reported by the FD. It returns the hint information
-     * if some nodes in the top N are not live.
-     *
-     * param @ token - position on the ring
-     */
-    public Map<EndPoint, EndPoint> getNHintedStorageEndPoint(BigInteger token)
-    {
-        return nodePicker_.getHintedStorageEndPoints(token);
-    }
-    
-    /**
      * This function finds the most suitable endpoint given a key.
      * It checks for loclity and alive test.
      */