You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@cassandra.apache.org by sa...@apache.org on 2016/03/09 16:05:16 UTC
[1/6] cassandra git commit: Replace recursion with iteration in
CompositesSearcher
Repository: cassandra
Updated Branches:
refs/heads/cassandra-3.0 f6af142f1 -> d0b7a02b2
refs/heads/cassandra-3.5 96792f638 -> a38a4e5e5
refs/heads/trunk 9841cb72b -> 30784fd29
Replace recursion with iteration in CompositesSearcher
Patch by Sam Tunnicliffe; reviewed by Stefania Alborghetti for
CASSANDRA-11304
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d0b7a02b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d0b7a02b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d0b7a02b
Branch: refs/heads/cassandra-3.0
Commit: d0b7a02b230f65a3ba2665a8327b6e6cc585dde9
Parents: f6af142
Author: Sam Tunnicliffe <sa...@beobal.com>
Authored: Tue Mar 8 12:21:14 2016 +0000
Committer: Sam Tunnicliffe <sa...@beobal.com>
Committed: Wed Mar 9 14:42:13 2016 +0000
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../internal/composites/CompositesSearcher.java | 115 ++++++++++---------
2 files changed, 60 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/d0b7a02b/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 384e7ea..e6c40aa 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
3.0.5
+ * Remove recursive call from CompositesSearcher (CASSANDRA-11304)
* Fix filtering on non-primary key columns for queries without index (CASSANDRA-6377)
* Fix sstableloader fail when using materialized view (CASSANDRA-11275)
Merged from 2.2:
http://git-wip-us.apache.org/repos/asf/cassandra/blob/d0b7a02b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
index 765ae4d..135839b 100644
--- a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
@@ -95,70 +95,73 @@ public class CompositesSearcher extends CassandraIndexSearcher
private boolean prepareNext()
{
- if (next != null)
- return true;
-
- if (nextEntry == null)
+ while (true)
{
- if (!indexHits.hasNext())
- return false;
+ if (next != null)
+ return true;
- nextEntry = index.decodeEntry(indexKey, indexHits.next());
- }
+ if (nextEntry == null)
+ {
+ if (!indexHits.hasNext())
+ return false;
- // Gather all index hits belonging to the same partition and query the data for those hits.
- // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
- // 1 read per index hit. However, this basically mean materializing all hits for a partition
- // in memory so we should consider adding some paging mechanism. However, index hits should
- // be relatively small so it's much better than the previous code that was materializing all
- // *data* for a given partition.
- BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
- List<IndexEntry> entries = new ArrayList<>();
- DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
-
- while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
- {
- // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
- if (isMatchingEntry(partitionKey, nextEntry, command))
+ nextEntry = index.decodeEntry(indexKey, indexHits.next());
+ }
+
+ // Gather all index hits belonging to the same partition and query the data for those hits.
+ // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
+ // 1 read per index hit. However, this basically mean materializing all hits for a partition
+ // in memory so we should consider adding some paging mechanism. However, index hits should
+ // be relatively small so it's much better than the previous code that was materializing all
+ // *data* for a given partition.
+ BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
+ List<IndexEntry> entries = new ArrayList<>();
+ DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
+
+ while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
{
- clusterings.add(nextEntry.indexedEntryClustering);
- entries.add(nextEntry);
+ // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
+ if (isMatchingEntry(partitionKey, nextEntry, command))
+ {
+ clusterings.add(nextEntry.indexedEntryClustering);
+ entries.add(nextEntry);
+ }
+
+ nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
}
- nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
- }
+ // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
+ if (clusterings.isEmpty())
+ continue;
+
+ // Query the gathered index hits. We still need to filter stale hits from the resulting query.
+ ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
+ SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata,
+ command.nowInSec(),
+ command.columnFilter(),
+ command.rowFilter(),
+ DataLimits.NONE,
+ partitionKey,
+ filter);
+ @SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
+ // by the next caller of next, or through closing this iterator is this come before.
+ UnfilteredRowIterator dataIter =
+ filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs,
+ orderGroup.baseReadOpOrderGroup()),
+ indexKey.getKey(),
+ entries,
+ orderGroup.writeOpOrderGroup(),
+ command.nowInSec());
+
+ if (dataIter.isEmpty())
+ {
+ dataIter.close();
+ continue;
+ }
- // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
- if (clusterings.isEmpty())
- return prepareNext();
-
- // Query the gathered index hits. We still need to filter stale hits from the resulting query.
- ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
- SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata,
- command.nowInSec(),
- command.columnFilter(),
- command.rowFilter(),
- DataLimits.NONE,
- partitionKey,
- filter);
- @SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
- // by the next caller of next, or through closing this iterator is this come before.
- UnfilteredRowIterator dataIter = filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs,
- orderGroup.baseReadOpOrderGroup()),
- indexKey.getKey(),
- entries,
- orderGroup.writeOpOrderGroup(),
- command.nowInSec());
-
-
- if (dataIter.isEmpty())
- {
- dataIter.close();
- return prepareNext();
+ next = dataIter;
+ return true;
}
-
- next = dataIter;
- return true;
}
public void remove()
[2/6] cassandra git commit: Replace recursion with iteration in
CompositesSearcher
Posted by sa...@apache.org.
Replace recursion with iteration in CompositesSearcher
Patch by Sam Tunnicliffe; reviewed by Stefania Alborghetti for
CASSANDRA-11304
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d0b7a02b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d0b7a02b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d0b7a02b
Branch: refs/heads/cassandra-3.5
Commit: d0b7a02b230f65a3ba2665a8327b6e6cc585dde9
Parents: f6af142
Author: Sam Tunnicliffe <sa...@beobal.com>
Authored: Tue Mar 8 12:21:14 2016 +0000
Committer: Sam Tunnicliffe <sa...@beobal.com>
Committed: Wed Mar 9 14:42:13 2016 +0000
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../internal/composites/CompositesSearcher.java | 115 ++++++++++---------
2 files changed, 60 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/d0b7a02b/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 384e7ea..e6c40aa 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
3.0.5
+ * Remove recursive call from CompositesSearcher (CASSANDRA-11304)
* Fix filtering on non-primary key columns for queries without index (CASSANDRA-6377)
* Fix sstableloader fail when using materialized view (CASSANDRA-11275)
Merged from 2.2:
http://git-wip-us.apache.org/repos/asf/cassandra/blob/d0b7a02b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
index 765ae4d..135839b 100644
--- a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
@@ -95,70 +95,73 @@ public class CompositesSearcher extends CassandraIndexSearcher
private boolean prepareNext()
{
- if (next != null)
- return true;
-
- if (nextEntry == null)
+ while (true)
{
- if (!indexHits.hasNext())
- return false;
+ if (next != null)
+ return true;
- nextEntry = index.decodeEntry(indexKey, indexHits.next());
- }
+ if (nextEntry == null)
+ {
+ if (!indexHits.hasNext())
+ return false;
- // Gather all index hits belonging to the same partition and query the data for those hits.
- // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
- // 1 read per index hit. However, this basically mean materializing all hits for a partition
- // in memory so we should consider adding some paging mechanism. However, index hits should
- // be relatively small so it's much better than the previous code that was materializing all
- // *data* for a given partition.
- BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
- List<IndexEntry> entries = new ArrayList<>();
- DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
-
- while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
- {
- // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
- if (isMatchingEntry(partitionKey, nextEntry, command))
+ nextEntry = index.decodeEntry(indexKey, indexHits.next());
+ }
+
+ // Gather all index hits belonging to the same partition and query the data for those hits.
+ // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
+ // 1 read per index hit. However, this basically mean materializing all hits for a partition
+ // in memory so we should consider adding some paging mechanism. However, index hits should
+ // be relatively small so it's much better than the previous code that was materializing all
+ // *data* for a given partition.
+ BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
+ List<IndexEntry> entries = new ArrayList<>();
+ DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
+
+ while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
{
- clusterings.add(nextEntry.indexedEntryClustering);
- entries.add(nextEntry);
+ // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
+ if (isMatchingEntry(partitionKey, nextEntry, command))
+ {
+ clusterings.add(nextEntry.indexedEntryClustering);
+ entries.add(nextEntry);
+ }
+
+ nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
}
- nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
- }
+ // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
+ if (clusterings.isEmpty())
+ continue;
+
+ // Query the gathered index hits. We still need to filter stale hits from the resulting query.
+ ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
+ SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata,
+ command.nowInSec(),
+ command.columnFilter(),
+ command.rowFilter(),
+ DataLimits.NONE,
+ partitionKey,
+ filter);
+ @SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
+ // by the next caller of next, or through closing this iterator is this come before.
+ UnfilteredRowIterator dataIter =
+ filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs,
+ orderGroup.baseReadOpOrderGroup()),
+ indexKey.getKey(),
+ entries,
+ orderGroup.writeOpOrderGroup(),
+ command.nowInSec());
+
+ if (dataIter.isEmpty())
+ {
+ dataIter.close();
+ continue;
+ }
- // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
- if (clusterings.isEmpty())
- return prepareNext();
-
- // Query the gathered index hits. We still need to filter stale hits from the resulting query.
- ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
- SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata,
- command.nowInSec(),
- command.columnFilter(),
- command.rowFilter(),
- DataLimits.NONE,
- partitionKey,
- filter);
- @SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
- // by the next caller of next, or through closing this iterator is this come before.
- UnfilteredRowIterator dataIter = filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs,
- orderGroup.baseReadOpOrderGroup()),
- indexKey.getKey(),
- entries,
- orderGroup.writeOpOrderGroup(),
- command.nowInSec());
-
-
- if (dataIter.isEmpty())
- {
- dataIter.close();
- return prepareNext();
+ next = dataIter;
+ return true;
}
-
- next = dataIter;
- return true;
}
public void remove()
[6/6] cassandra git commit: Merge branch 'cassandra-3.5' into trunk
Posted by sa...@apache.org.
Merge branch 'cassandra-3.5' into trunk
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/30784fd2
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/30784fd2
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/30784fd2
Branch: refs/heads/trunk
Commit: 30784fd294d268deb302662319fba4dcd76fa3e0
Parents: 9841cb7 a38a4e5
Author: Sam Tunnicliffe <sa...@beobal.com>
Authored: Wed Mar 9 14:58:19 2016 +0000
Committer: Sam Tunnicliffe <sa...@beobal.com>
Committed: Wed Mar 9 14:58:19 2016 +0000
----------------------------------------------------------------------
----------------------------------------------------------------------
[4/6] cassandra git commit: Merge branch 'cassandra-3.0' into
cassandra-3.5
Posted by sa...@apache.org.
Merge branch 'cassandra-3.0' into cassandra-3.5
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/a38a4e5e
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/a38a4e5e
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/a38a4e5e
Branch: refs/heads/trunk
Commit: a38a4e5e548465efe99558951b5ea1271b604c1b
Parents: 96792f6 d0b7a02
Author: Sam Tunnicliffe <sa...@beobal.com>
Authored: Wed Mar 9 14:49:26 2016 +0000
Committer: Sam Tunnicliffe <sa...@beobal.com>
Committed: Wed Mar 9 14:49:26 2016 +0000
----------------------------------------------------------------------
----------------------------------------------------------------------
[5/6] cassandra git commit: Merge branch 'cassandra-3.0' into
cassandra-3.5
Posted by sa...@apache.org.
Merge branch 'cassandra-3.0' into cassandra-3.5
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/a38a4e5e
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/a38a4e5e
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/a38a4e5e
Branch: refs/heads/cassandra-3.5
Commit: a38a4e5e548465efe99558951b5ea1271b604c1b
Parents: 96792f6 d0b7a02
Author: Sam Tunnicliffe <sa...@beobal.com>
Authored: Wed Mar 9 14:49:26 2016 +0000
Committer: Sam Tunnicliffe <sa...@beobal.com>
Committed: Wed Mar 9 14:49:26 2016 +0000
----------------------------------------------------------------------
----------------------------------------------------------------------
[3/6] cassandra git commit: Replace recursion with iteration in
CompositesSearcher
Posted by sa...@apache.org.
Replace recursion with iteration in CompositesSearcher
Patch by Sam Tunnicliffe; reviewed by Stefania Alborghetti for
CASSANDRA-11304
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d0b7a02b
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d0b7a02b
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d0b7a02b
Branch: refs/heads/trunk
Commit: d0b7a02b230f65a3ba2665a8327b6e6cc585dde9
Parents: f6af142
Author: Sam Tunnicliffe <sa...@beobal.com>
Authored: Tue Mar 8 12:21:14 2016 +0000
Committer: Sam Tunnicliffe <sa...@beobal.com>
Committed: Wed Mar 9 14:42:13 2016 +0000
----------------------------------------------------------------------
CHANGES.txt | 1 +
.../internal/composites/CompositesSearcher.java | 115 ++++++++++---------
2 files changed, 60 insertions(+), 56 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/cassandra/blob/d0b7a02b/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 384e7ea..e6c40aa 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
3.0.5
+ * Remove recursive call from CompositesSearcher (CASSANDRA-11304)
* Fix filtering on non-primary key columns for queries without index (CASSANDRA-6377)
* Fix sstableloader fail when using materialized view (CASSANDRA-11275)
Merged from 2.2:
http://git-wip-us.apache.org/repos/asf/cassandra/blob/d0b7a02b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
index 765ae4d..135839b 100644
--- a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
+++ b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
@@ -95,70 +95,73 @@ public class CompositesSearcher extends CassandraIndexSearcher
private boolean prepareNext()
{
- if (next != null)
- return true;
-
- if (nextEntry == null)
+ while (true)
{
- if (!indexHits.hasNext())
- return false;
+ if (next != null)
+ return true;
- nextEntry = index.decodeEntry(indexKey, indexHits.next());
- }
+ if (nextEntry == null)
+ {
+ if (!indexHits.hasNext())
+ return false;
- // Gather all index hits belonging to the same partition and query the data for those hits.
- // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
- // 1 read per index hit. However, this basically mean materializing all hits for a partition
- // in memory so we should consider adding some paging mechanism. However, index hits should
- // be relatively small so it's much better than the previous code that was materializing all
- // *data* for a given partition.
- BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
- List<IndexEntry> entries = new ArrayList<>();
- DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
-
- while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
- {
- // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
- if (isMatchingEntry(partitionKey, nextEntry, command))
+ nextEntry = index.decodeEntry(indexKey, indexHits.next());
+ }
+
+ // Gather all index hits belonging to the same partition and query the data for those hits.
+ // TODO: it's much more efficient to do 1 read for all hits to the same partition than doing
+ // 1 read per index hit. However, this basically mean materializing all hits for a partition
+ // in memory so we should consider adding some paging mechanism. However, index hits should
+ // be relatively small so it's much better than the previous code that was materializing all
+ // *data* for a given partition.
+ BTreeSet.Builder<Clustering> clusterings = BTreeSet.builder(index.baseCfs.getComparator());
+ List<IndexEntry> entries = new ArrayList<>();
+ DecoratedKey partitionKey = index.baseCfs.decorateKey(nextEntry.indexedKey);
+
+ while (nextEntry != null && partitionKey.getKey().equals(nextEntry.indexedKey))
{
- clusterings.add(nextEntry.indexedEntryClustering);
- entries.add(nextEntry);
+ // We're queried a slice of the index, but some hits may not match some of the clustering column constraints
+ if (isMatchingEntry(partitionKey, nextEntry, command))
+ {
+ clusterings.add(nextEntry.indexedEntryClustering);
+ entries.add(nextEntry);
+ }
+
+ nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
}
- nextEntry = indexHits.hasNext() ? index.decodeEntry(indexKey, indexHits.next()) : null;
- }
+ // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
+ if (clusterings.isEmpty())
+ continue;
+
+ // Query the gathered index hits. We still need to filter stale hits from the resulting query.
+ ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
+ SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata,
+ command.nowInSec(),
+ command.columnFilter(),
+ command.rowFilter(),
+ DataLimits.NONE,
+ partitionKey,
+ filter);
+ @SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
+ // by the next caller of next, or through closing this iterator is this come before.
+ UnfilteredRowIterator dataIter =
+ filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs,
+ orderGroup.baseReadOpOrderGroup()),
+ indexKey.getKey(),
+ entries,
+ orderGroup.writeOpOrderGroup(),
+ command.nowInSec());
+
+ if (dataIter.isEmpty())
+ {
+ dataIter.close();
+ continue;
+ }
- // Because we've eliminated entries that don't match the clustering columns, it's possible we added nothing
- if (clusterings.isEmpty())
- return prepareNext();
-
- // Query the gathered index hits. We still need to filter stale hits from the resulting query.
- ClusteringIndexNamesFilter filter = new ClusteringIndexNamesFilter(clusterings.build(), false);
- SinglePartitionReadCommand dataCmd = SinglePartitionReadCommand.create(index.baseCfs.metadata,
- command.nowInSec(),
- command.columnFilter(),
- command.rowFilter(),
- DataLimits.NONE,
- partitionKey,
- filter);
- @SuppressWarnings("resource") // We close right away if empty, and if it's assign to next it will be called either
- // by the next caller of next, or through closing this iterator is this come before.
- UnfilteredRowIterator dataIter = filterStaleEntries(dataCmd.queryMemtableAndDisk(index.baseCfs,
- orderGroup.baseReadOpOrderGroup()),
- indexKey.getKey(),
- entries,
- orderGroup.writeOpOrderGroup(),
- command.nowInSec());
-
-
- if (dataIter.isEmpty())
- {
- dataIter.close();
- return prepareNext();
+ next = dataIter;
+ return true;
}
-
- next = dataIter;
- return true;
}
public void remove()