You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by jb...@apache.org on 2012/06/27 09:20:19 UTC

[1/3] git commit: merge from 0.1

Updated Branches:
  refs/heads/cassandra-1.1 98dc41393 -> 0627c8aae
  refs/heads/trunk 681e2dea7 -> 9f867ea4c


merge from 0.1


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9f867ea4
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9f867ea4
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9f867ea4

Branch: refs/heads/trunk
Commit: 9f867ea4c20b40e2bcd07fb43dc888bd3601474a
Parents: 681e2de 0627c8a
Author: Jonathan Ellis <jb...@apache.org>
Authored: Wed Jun 27 02:18:55 2012 -0500
Committer: Jonathan Ellis <jb...@apache.org>
Committed: Wed Jun 27 02:20:01 2012 -0500

----------------------------------------------------------------------
 CHANGES.txt                                        |    4 +-
 .../apache/cassandra/cache/AutoSavingCache.java    |  144 +++++++--------
 .../org/apache/cassandra/db/ColumnFamilyStore.java |  128 ++++++++------
 .../unit/org/apache/cassandra/db/RowCacheTest.java |    2 +-
 4 files changed, 142 insertions(+), 136 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f867ea4/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index dbc90b9,4c2b836..c2fecb2
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,32 -1,5 +1,32 @@@
 +1.2-dev
 + * add UseCondCardMark XX jvm settings on jdk 1.7 (CASSANDRA-4366)
 + * split up rpc timeout by operation type (CASSANDRA-2819)
 + * rewrite key cache save/load to use only sequential i/o (CASSANDRA-3762)
 + * update MS protocol with a version handshake + broadcast address id
 +   (CASSANDRA-4311)
 + * multithreaded hint replay (CASSANDRA-4189)
 + * add inter-node message compression (CASSANDRA-3127)
 + * remove COPP (CASSANDRA-2479)
 + * Track tombstone expiration and compact when tombstone content is
 +   higher than a configurable threshold, default 20% (CASSANDRA-3442)
 + * update MurmurHash to version 3 (CASSANDRA-2975)
 + * (CLI) track elapsed time for `delete' operation (CASSANDRA-4060)
 + * (CLI) jline version is bumped to 1.0 to properly  support
 +   'delete' key function (CASSANDRA-4132)
-  * Save IndexSummary into new SSTable 'Summary' component (CASSANDRA-2392)
++ * Save IndexSummary into new SSTable 'Summary' component (CASSANDRA-2392, 4289)
 + * Add support for range tombstones (CASSANDRA-3708)
 + * Improve MessagingService efficiency (CASSANDRA-3617)
 + * Avoid ID conflicts from concurrent schema changes (CASSANDRA-3794)
 + * Set thrift HSHA server thread limit to unlimited by default (CASSANDRA-4277)
 + * Avoids double serialization of CF id in RowMutation messages
 +   (CASSANDRA-4293)
-  * fix Summary component and caches to use correct partitioner (CASSANDRA-4289)
 + * stream compressed sstables directly with java nio (CASSANDRA-4297)
 + * Support multiple ranges in SliceQueryFilter (CASSANDRA-3885)
 + * Add column metadata to system column families (CASSANDRA-4018)
 +
 +
  1.1.2
+  * Use correct partitioner when saving + loading caches (CASSANDRA-4331)
   * Check schema before trying to export sstable (CASSANDRA-2760)
   * Raise a meaningful exception instead of NPE when PFS encounters
     an unconfigured node + no default (CASSANDRA-4349)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f867ea4/src/java/org/apache/cassandra/cache/AutoSavingCache.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/cache/AutoSavingCache.java
index 41b8f5d,004e46e..7eed2a0
--- a/src/java/org/apache/cassandra/cache/AutoSavingCache.java
+++ b/src/java/org/apache/cassandra/cache/AutoSavingCache.java
@@@ -34,7 -35,8 +35,7 @@@ import org.apache.cassandra.config.Data
  import org.apache.cassandra.db.compaction.CompactionInfo;
  import org.apache.cassandra.db.compaction.CompactionManager;
  import org.apache.cassandra.db.compaction.OperationType;
- import org.apache.cassandra.db.ColumnFamilyStore;
+ import org.apache.cassandra.db.DecoratedKey;
 -import org.apache.cassandra.dht.IPartitioner;
  import org.apache.cassandra.io.util.FileUtils;
  import org.apache.cassandra.io.util.SequentialWriter;
  import org.apache.cassandra.service.CacheService;
@@@ -96,11 -93,10 +92,10 @@@ public class AutoSavingCache<K extends 
          }
      }
  
-     public int loadSaved(ColumnFamilyStore cfs)
 -    public Set<DecoratedKey> readSaved(String ksName, String cfName, IPartitioner partitioner)
++    public Set<DecoratedKey> readSaved(String ksName, String cfName)
      {
-         int count = 0;
-         long start = System.currentTimeMillis();
-         File path = getCachePath(cfs.table.name, cfs.columnFamily, null);
+         File path = getCachePath(ksName, cfName);
+         Set<DecoratedKey> keys = new TreeSet<DecoratedKey>();
          if (path.exists())
          {
              DataInputStream in = null;
@@@ -137,10 -108,26 +107,26 @@@
                  in = new DataInputStream(new BufferedInputStream(new FileInputStream(path)));
                  while (in.available() > 0)
                  {
-                     Pair<K, V> entry = cacheLoader.deserialize(in, cfs);
-                     put(entry.left, entry.right);
-                     count++;
+                     int size = in.readInt();
+                     byte[] bytes = new byte[size];
+                     in.readFully(bytes);
+                     ByteBuffer buffer = ByteBuffer.wrap(bytes);
+                     DecoratedKey key;
+                     try
+                     {
 -                        key = partitioner.decorateKey(buffer);
++                        key = StorageService.getPartitioner().decorateKey(buffer);
+                     }
+                     catch (Exception e)
+                     {
+                         logger.info(String.format("unable to read entry #%s from saved cache %s; skipping remaining entries",
+                                 keys.size(), path.getAbsolutePath()), e);
+                         break;
+                     }
+                     keys.add(key);
                  }
+                 if (logger.isDebugEnabled())
+                     logger.debug(String.format("completed reading (%d ms; %d keys) saved cache %s",
+                             System.currentTimeMillis() - start, keys.size(), path));
              }
              catch (Exception e)
              {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f867ea4/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index dc2459b,5c7e3b2..582c097
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@@ -222,10 -226,12 +226,12 @@@ public class ColumnFamilyStore implemen
  
          // scan for sstables corresponding to this cf and load them
          data = new DataTracker(this);
+         Set<DecoratedKey> savedKeys = caching == Caching.NONE || caching == Caching.ROWS_ONLY
+                                        ? Collections.<DecoratedKey>emptySet()
 -                                       : CacheService.instance.keyCache.readSaved(table.name, columnFamily, partitioner);
++                                       : CacheService.instance.keyCache.readSaved(table.name, columnFamily);
+ 
          Directories.SSTableLister sstables = directories.sstableLister().skipCompacted(true).skipTemporary(true);
-         data.addInitialSSTables(SSTableReader.batchOpen(sstables.list().entrySet(), data, metadata, this.partitioner));
-         if (caching == Caching.ALL || caching == Caching.KEYS_ONLY)
-             CacheService.instance.keyCache.loadSaved(this);
+         data.addInitialSSTables(SSTableReader.batchOpen(sstables.list().entrySet(), savedKeys, data, metadata, this.partitioner));
  
          // compaction strategy should be created after the CFS has been prepared
          this.compactionStrategy = metadata.createCompactionStrategyInstance(this);
@@@ -402,7 -408,19 +408,19 @@@
  
          long start = System.currentTimeMillis();
  
-         int cachedRowsRead = CacheService.instance.rowCache.loadSaved(this);
+         AutoSavingCache<RowCacheKey, IRowCacheEntry> rowCache = CacheService.instance.rowCache;
+ 
+         // results are sorted on read (via treeset) because there are few reads and many writes and reads only happen at startup
+         int cachedRowsRead = 0;
 -        for (DecoratedKey key : rowCache.readSaved(table.name, columnFamily, partitioner))
++        for (DecoratedKey key : rowCache.readSaved(table.name, columnFamily))
+         {
+             ColumnFamily data = getTopLevelColumns(QueryFilter.getIdentityFilter(key, new QueryPath(columnFamily)),
+                                                    Integer.MIN_VALUE,
+                                                    true);
+             CacheService.instance.rowCache.put(new RowCacheKey(metadata.cfId, key), data);
+             cachedRowsRead++;
+         }
+ 
          if (cachedRowsRead > 0)
              logger.info(String.format("completed loading (%d ms; %d keys) row cache for %s.%s",
                          System.currentTimeMillis() - start,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f867ea4/test/unit/org/apache/cassandra/db/RowCacheTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/db/RowCacheTest.java
index 99b8cbc,7535968..e6b4578
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@@ -153,6 -153,6 +153,6 @@@ public class RowCacheTest extends Schem
          // empty the cache again to make sure values came from disk
          CacheService.instance.invalidateRowCache();
          assert CacheService.instance.rowCache.size() == 0;
-         assert CacheService.instance.rowCache.loadSaved(store) == (keysToSave == Integer.MAX_VALUE ? totalKeys : keysToSave);
 -        assert CacheService.instance.rowCache.readSaved(KEYSPACE, COLUMN_FAMILY, cfs.partitioner).size() == (keysToSave == Integer.MAX_VALUE ? totalKeys : keysToSave);
++        assert CacheService.instance.rowCache.readSaved(KEYSPACE, COLUMN_FAMILY).size() == (keysToSave == Integer.MAX_VALUE ? totalKeys : keysToSave);
      }
  }