You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@jena.apache.org by an...@apache.org on 2017/10/03 19:34:05 UTC

[19/65] [abbrv] jena git commit: JENA-1397: Rename java packages

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsConst.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsConst.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsConst.java
new file mode 100644
index 0000000..e191ba3
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsConst.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.setup;
+
+import org.apache.jena.dboe.base.block.FileMode;
+import org.apache.jena.dboe.sys.Names;
+import org.apache.jena.tdb2.sys.SystemTDB;
+
+public class StoreParamsConst {
+    // SystemParams are built with a SystemParamsBuilder
+    // Initial values are the system defaults.
+    
+    /** Database and query configuration */ 
+    // Key names are the base name -  encode/decode may add a prefix.
+    
+    public static final String   fFileMode             = "file_mode" ;
+    public static final FileMode fileMode              = SystemTDB.fileMode() ;
+    
+    public static final String   fBlockReadCacheSize   = "block_read_cache_size" ;
+    public static final int      blockReadCacheSize    = SystemTDB.BlockReadCacheSize ;
+    
+    public static final String   fBlockWriteCacheSize  = "block_write_cache_size" ;
+    public static final int      blockWriteCacheSize   = SystemTDB.BlockWriteCacheSize ;
+    
+    public static final String   fNode2NodeIdCacheSize = "node2nodeid_cache_size" ;
+    public static final int      Node2NodeIdCacheSize  = SystemTDB.Node2NodeIdCacheSize ;
+    
+    public static final String   fNodeId2NodeCacheSize = "nodeid2node_cache_size" ;
+    public static final int      NodeId2NodeCacheSize  = SystemTDB.NodeId2NodeCacheSize ;
+    
+    public static final String   fNodeMissCacheSize    = "node_miss_cache_size" ;
+    public static final int      NodeMissCacheSize     = SystemTDB.NodeMissCacheSize ;
+    
+    /** Database layout - ignored after a database is created */
+    public static final String   fBlockSize            = "block_size" ;
+    public static final int      blockSize             = SystemTDB.BlockSize ;
+    
+    public static final String   fNodeTableBaseName    = "nodetable" ;
+    public static final String   nodeTableBaseName     = Names.nodeTableBaseName ;
+    
+    public static final String   fPrimaryIndexTriples  = "triple_index_primary" ;
+    public static final String   primaryIndexTriples   = Names.primaryIndexTriples ;
+    
+    public static final String   fTripleIndexes        = "triple_indexes" ;
+    public static final String[] tripleIndexes         = Names.tripleIndexes ;
+    
+    public static final String   fPrimaryIndexQuads    = "quad_index_primary" ;
+    public static final String   primaryIndexQuads     = Names.primaryIndexQuads ;
+    
+    public static final String   fQuadIndexes          = "quad_indexes" ;
+    public static final String[] quadIndexes           = Names.quadIndexes ;
+    
+    public static final String   fPrefixTableBaseName  = "prefixtable" ;
+    public static final String   prefixTableBaseName   = Names.prefixTableBaseName ;
+   
+    public static final String   fPrimaryIndexPrefix   = "prefix_index_primary" ;
+    public static final String   primaryIndexPrefix    = Names.primaryIndexPrefix ;
+    
+    public static final String   fPrefixIndexes        = "prefix_indexes" ;
+    public static final String[] prefixIndexes         = Names.prefixIndexes ;
+
+    // Must be after the constants above to get initialization order right
+    // because StoreParamsBuilder uses these constants.
+     
+    /** The system default parameters for on-disk databases. */
+    static StoreParams dftStoreParams = StoreParams.builder().build() ;
+
+    /** The system default parameters for in-memory databases. */
+    static StoreParams dftMemStoreParams = StoreParams.builder()
+        .fileMode(FileMode.direct)
+        // Small block caches, mainly so it behaves like a direct on-disk database.  
+        .blockReadCacheSize(10)
+        .blockWriteCacheSize(10)
+        .node2NodeIdCacheSize(10000)
+        .nodeId2NodeCacheSize(10000)
+        .nodeMissCacheSize(100)
+        .build() ;
+    
+    /** The "small store" parameters. */
+    static StoreParams smallStoreParams = StoreParams.builder()
+        .fileMode(FileMode.direct)
+        .blockReadCacheSize(100)
+        .blockWriteCacheSize(100)
+        .node2NodeIdCacheSize(10000)
+        .nodeId2NodeCacheSize(10000)
+        .nodeMissCacheSize(100)
+        .build() ;
+}
+

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsDynamic.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsDynamic.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsDynamic.java
new file mode 100644
index 0000000..5939acc
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsDynamic.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.setup;
+
+import org.apache.jena.dboe.base.block.FileMode;
+
+/** Store parameters that can be adjusted after a store has been created,
+ *  and given different values when the JVM attachs to a store area. 
+ *  (They are still fixed for any given database once created in a JVM.) 
+ */
+
+public interface StoreParamsDynamic {
+    
+    /** Store-wide file access mode */ 
+    public FileMode getFileMode() ;
+    public boolean isSetFileMode() ;
+    
+    /** Block read cache (note: mapped files do not have a block cache) */
+    public Integer getBlockReadCacheSize() ;
+    public boolean isSetBlockReadCacheSize() ;
+
+    /** Block write cache (note: mapped files do not have a block cache) */
+    public Integer getBlockWriteCacheSize() ;
+    public boolean isSetBlockWriteCacheSize() ;
+    
+    /** Node cache for Node->NodeId. */
+    public Integer getNode2NodeIdCacheSize() ;
+    public boolean isSetNode2NodeIdCacheSize() ;
+    
+    /** Node cache for NodeId->Node. Important for SPARQL results. */
+    public Integer getNodeId2NodeCacheSize() ;
+    public boolean isSetNodeId2NodeCacheSize() ;
+
+    /** Node cache for recording known misses */
+    public Integer getNodeMissCacheSize() ;
+    public boolean isSetNodeMissCacheSize() ;
+}
+

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsFactory.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsFactory.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsFactory.java
new file mode 100644
index 0000000..741f247
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/StoreParamsFactory.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.setup;
+
+import org.apache.jena.dboe.base.file.Location;
+import org.apache.jena.dboe.sys.Names;
+import org.apache.jena.tdb2.TDB2Factory;
+
+public class StoreParamsFactory {
+    /** Choose the StoreParams.  This is the policy applied when creating or reattaching to a database.
+     *  (extracted and put here to keep the size of DatasetBuildStd  
+     * <p>
+     * If the location has parameters in a <tt>tdb.cfg</tt> file, use them, as modified by any
+     * application-supplied internal parameters.
+     * <p>
+     * Otherwise, if this is a new database, use the application provided
+     * parameters or if there are no application provided 
+     * parameters, use the system default parameters.
+     * Write the parameters used to the location in <tt>tdb.cfg</tt>
+     * <p>If this is an existing database and there are no location recorded parameters,
+     * use system default parameters, modified by application parameters.   
+     * <p>
+     * Notes:
+     * <ul>
+     * <li><i>Modification</i> involves setting any of the parameters than can vary from run to run. 
+     * These are the cache sizes and the file mode. 
+     * <li><i>Block size</i>: it is critical that this set correctly. Silent corruption
+     * of a database may occur if this is changed.  At the moment, it is not possible to provide
+     * a complete check of block size.
+     * <ul>  
+     * <p>
+     * Do not edit store parameters recorded at a location after the database has been created.
+     * Only the dynamic parameters cna be safely changed. That is better done though the application
+     * providing some parameters in the {@link TDB2Factory} call.
+     * <p>
+     * This includes changing filenames,  indexing choices and block size. 
+     * Otherwise, the database may be permanetly and irrecovably corrupted.
+     * You have been warned. 
+     * 
+     * @param location The place where the database is or will be.
+     * @param isNew  Whether the database is being created or whether there is an existing database.
+     * @param pApp   Application-provide store parameters.
+     * @param pLoc   Store parameters foud at the location.
+     * @param pDft   System default store parameters.
+     * @return       StoreParams
+     * 
+     * @see StoreParams
+     * @see StoreParamsDynamic
+     */
+    public static StoreParams decideStoreParams(Location location, boolean isNew, StoreParams pApp, StoreParams pLoc, StoreParams pDft) {
+        StoreParams p = null ;
+        if ( pLoc != null ) {
+            // pLoc so use it, modify by pApp.
+            // Covers new and reconnect cases.
+            p = pLoc ;
+            if ( pApp != null )
+                p = StoreParamsBuilder.modify(pLoc, pApp) ;
+            return p ;
+        }
+        // No pLoc.
+        // Use pApp if available.  Write to location if new.
+        if ( pApp != null ) {
+            if ( isNew ) {
+                if ( ! location.isMem() ) {
+                    String filename = location.getPath(Names.TDB_CONFIG_FILE) ;
+                    StoreParamsCodec.write(filename, pApp) ;
+                }
+                return pApp ;
+            }
+            // Not new : pLoc is implicitly pDft.
+            return StoreParamsBuilder.modify(pDft, pApp) ;
+        }
+        // no pLoc, no pApp
+        return pDft ;
+    }
+
+}
+

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBBuilder.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBBuilder.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBBuilder.java
new file mode 100644
index 0000000..9b8c694
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBBuilder.java
@@ -0,0 +1,291 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.setup;
+
+import java.io.File ;
+import java.io.FileFilter ;
+import java.util.UUID ;
+
+import org.apache.jena.atlas.lib.tuple.TupleMap ;
+import org.apache.jena.dboe.DBOpEnvException;
+import org.apache.jena.dboe.base.file.*;
+import org.apache.jena.dboe.base.record.RecordFactory;
+import org.apache.jena.dboe.index.Index;
+import org.apache.jena.dboe.index.RangeIndex;
+import org.apache.jena.dboe.sys.Names;
+import org.apache.jena.dboe.trans.bplustree.BPlusTree;
+import org.apache.jena.dboe.trans.bplustree.BPlusTreeFactory;
+import org.apache.jena.dboe.trans.data.TransBinaryDataFile;
+import org.apache.jena.dboe.transaction.txn.ComponentId;
+import org.apache.jena.dboe.transaction.txn.TransactionCoordinator;
+import org.apache.jena.dboe.transaction.txn.TransactionalBase;
+import org.apache.jena.dboe.transaction.txn.TransactionalSystem;
+import org.apache.jena.dboe.transaction.txn.journal.Journal;
+import org.apache.jena.sparql.engine.main.QC ;
+import org.apache.jena.sparql.engine.optimizer.reorder.ReorderLib ;
+import org.apache.jena.tdb2.TDBException;
+import org.apache.jena.tdb2.solver.OpExecutorTDB1;
+import org.apache.jena.tdb2.store.*;
+import org.apache.jena.tdb2.store.nodetable.NodeTable;
+import org.apache.jena.tdb2.store.nodetable.NodeTableCache;
+import org.apache.jena.tdb2.store.nodetable.NodeTableInline;
+import org.apache.jena.tdb2.store.nodetable.NodeTableTRDF;
+import org.apache.jena.tdb2.store.nodetupletable.NodeTupleTable;
+import org.apache.jena.tdb2.store.nodetupletable.NodeTupleTableConcrete;
+import org.apache.jena.tdb2.store.tupletable.TupleIndex;
+import org.apache.jena.tdb2.store.tupletable.TupleIndexRecord;
+import org.apache.jena.tdb2.sys.SystemTDB;
+import org.slf4j.Logger ;
+import org.slf4j.LoggerFactory ;
+
+// Takes from TDB2Builder
+// Converted to statics.
+
+/** Build TDB2 databases.
+ * <p>
+ * <b>Do not call these operations directly - use StoreConnection.</b>
+ */
+public class TDBBuilder {
+    private Logger log = LoggerFactory.getLogger(TDBBuilder.class) ;
+    
+    public static DatasetGraphTxn build(Location location) {
+        return build(location, StoreParams.getDftStoreParams()) ;
+    }
+
+    public static DatasetGraphTxn build(Location location, StoreParams appParams) {
+        StoreParams locParams = StoreParamsCodec.read(location) ;
+        StoreParams dftParams = StoreParams.getDftStoreParams() ;
+        // This can write the chosen parameters if necessary (new database, appParams != null, locParams == null)
+        boolean newArea = isNewDatabaseArea(location) ;
+        if ( newArea ) {
+        }
+        StoreParams params = StoreParamsFactory.decideStoreParams(location, newArea, appParams, locParams, dftParams) ;
+        return create(location, params).build$() ; 
+    }
+
+    private DatasetGraphTxn build$() {
+        NodeTable nodeTable = buildNodeTable(params.getNodeTableBaseName()) ;
+        
+        TripleTable tripleTable = buildTripleTable(nodeTable) ;
+        QuadTable quadTable = buildQuadTable(nodeTable) ;
+        
+        NodeTable nodeTablePrefixes = buildNodeTable(params.getPrefixTableBaseName()) ;
+        DatasetPrefixesTDB prefixes = buildPrefixTable(nodeTablePrefixes) ;
+        
+        TransactionalSystem trans = new TransactionalBase(txnCoord) ;
+        DatasetGraphTxn dsg = new DatasetGraphTDB(trans, 
+                                                  tripleTable, quadTable, prefixes, 
+                                                  ReorderLib.fixed(), location, params) ;
+        QC.setFactory(dsg.getContext(), OpExecutorTDB1.OpExecFactoryTDB) ;
+        txnCoord.start() ;
+        return dsg ;
+    }
+
+    public static TransactionCoordinator buildTransactionCoordinator(Location location) {
+        Journal journal = Journal.create(location) ;
+        TransactionCoordinator txnCoord = new TransactionCoordinator(journal) ;
+        return txnCoord ;
+    }
+
+    public static String choosePrimaryForIndex(StoreParams params, String index) {
+        String primary3 = params.getPrimaryIndexTriples() ;
+        String primary4 = params.getPrimaryIndexQuads() ;
+        
+        if ( index.length() == primary3.length() )
+            return primary3 ;
+        if ( index.length() == primary4.length() )
+            return primary4 ;
+        throw new DBOpEnvException("Can't find primary for '"+index+"'") ;
+    }
+
+    // ---- Object starts
+    final Location location ;
+    final StoreParams params ;
+    final ComponentIdMgr componentIdMgr ;
+    final TransactionCoordinator txnCoord ;
+
+    private TDBBuilder(TransactionCoordinator txnCoord, Location location, StoreParams params, ComponentIdMgr componentIdMgr) {
+        this.txnCoord = txnCoord ;
+        this.location = location ;
+        this.params = params ;
+        this.componentIdMgr = componentIdMgr ;
+    }
+
+    public Location getLocation()               { return location ; }
+    public StoreParams getParams()              { return params ; }
+    public TransactionCoordinator getTxnCoord() { return txnCoord ; }
+
+    public static TDBBuilder create(Location location) {
+        return create(location, StoreParams.getDftStoreParams()) ; 
+    }
+    
+    public static TDBBuilder create(Location location, StoreParams params) {
+        TransactionCoordinator txnCoord = buildTransactionCoordinator(location) ;
+        return new TDBBuilder(txnCoord, location, params, new ComponentIdMgr(UUID.randomUUID())) ;
+    }
+
+    public static TDBBuilder create(TransactionCoordinator txnCoord, Location location, StoreParams params) {
+        return new TDBBuilder(txnCoord, location, params, new ComponentIdMgr(UUID.randomUUID())) ;
+    }
+
+    /** Look at a directory and see if it is a new area */
+    private static boolean isNewDatabaseArea(Location location) {
+        if ( location.isMem() )
+            return true ;
+        File d = new File(location.getDirectoryPath()) ;
+        if ( !d.exists() )
+            return true ;
+        FileFilter ff = fileFilterNewDB ;
+        File[] entries = d.listFiles(ff) ;
+        return entries.length == 0 ;
+    }
+    
+    /** FileFilter
+     * Skips "..", ".", "tdb.lock" and "tdb.cfg"
+     * 
+     */
+  private static  FileFilter fileFilterNewDB  = (pathname)->{
+        String fn = pathname.getName() ;
+        if ( fn.equals(".") || fn.equals("..") )
+            return false ;
+        if ( pathname.isDirectory() )
+            return true ;
+        if ( fn.equals(Names.TDB_LOCK_FILE) )
+            return false ;
+        if ( fn.equals(Names.TDB_CONFIG_FILE) )
+            return false ;
+        
+        return true ;
+    } ;
+    
+    public TripleTable buildTripleTable(NodeTable nodeTable) {    
+        String primary = params.getPrimaryIndexTriples() ;
+        String[] indexes = params.getTripleIndexes() ;
+
+        // Validation checks - common index forms.  
+        if ( indexes.length != 3 && indexes.length != 2 )
+            error(log, "Wrong number of triple table indexes: "+String.join(",", indexes)) ;
+        log.debug("Triple table: "+primary+" :: "+String.join(",", indexes)) ;
+
+        TupleIndex tripleIndexes[] = makeTupleIndexes(primary, indexes) ;
+
+        if ( tripleIndexes.length != indexes.length )
+            error(log, "Wrong number of triple table tuples indexes: "+tripleIndexes.length) ;
+        TripleTable tripleTable = new TripleTable(tripleIndexes, nodeTable) ;
+        return tripleTable ;
+    }
+
+    public QuadTable buildQuadTable(NodeTable nodeTable) {    
+        String primary = params.getPrimaryIndexQuads() ;
+        String[] indexes = params.getQuadIndexes() ;
+
+        // Validation checks - common index forms.  
+        if ( indexes.length != 6 && indexes.length != 4 )
+            error(log, "Wrong number of quad table indexes: "+String.join(",", indexes)) ;
+        log.debug("Quad table: "+primary+" :: "+String.join(",", indexes)) ;
+
+        TupleIndex tripleIndexes[] = makeTupleIndexes(primary, indexes) ;
+
+        if ( tripleIndexes.length != indexes.length )
+            error(log, "Wrong number of triple table tuples indexes: "+tripleIndexes.length) ;
+        QuadTable tripleTable = new QuadTable(tripleIndexes, nodeTable) ;
+        return tripleTable ;
+    }
+
+    public DatasetPrefixesTDB buildPrefixTable(NodeTable prefixNodes) {
+        String primary = params.getPrimaryIndexPrefix() ;
+        String[] indexes = params.getPrefixIndexes() ;
+
+        TupleIndex prefixIndexes[] = makeTupleIndexes(primary, indexes) ;
+        if ( prefixIndexes.length != 1 )
+            error(log, "Wrong number of triple table tuples indexes: "+prefixIndexes.length) ;
+
+        // No cache - the prefix mapping is a cache
+        //NodeTable prefixNodes = makeNodeTable(location, pnNode2Id, pnId2Node, -1, -1, -1)  ;
+        NodeTupleTable prefixTable = new NodeTupleTableConcrete(primary.length(),
+                                                                prefixIndexes,
+                                                                prefixNodes) ;
+        DatasetPrefixesTDB prefixes = new DatasetPrefixesTDB(prefixTable) ; 
+        log.debug("Prefixes: "+primary+" :: "+String.join(",", indexes)) ;
+        return prefixes ;
+    }
+
+    // ---- Build structures
+
+    public TupleIndex[] makeTupleIndexes(String primary, String[] indexNames) {
+        int indexRecordLen = primary.length()*SystemTDB.SizeOfNodeId ;
+        TupleIndex indexes[] = new TupleIndex[indexNames.length] ;
+        for (int i = 0 ; i < indexes.length ; i++) {
+            String indexName = indexNames[i] ;
+            String indexLabel = indexNames[i] ;
+            indexes[i] = buildTupleIndex(primary, indexName, indexLabel) ;
+        }
+        return indexes ;
+    }
+
+    public TupleIndex buildTupleIndex(String primary, String index, String name) {
+        TupleMap cmap = TupleMap.create(primary, index) ;
+        RecordFactory rf = new RecordFactory(SystemTDB.SizeOfNodeId * cmap.length(), 0) ;
+        RangeIndex rIdx = buildRangeIndex(rf, index) ;
+        TupleIndex tIdx = new TupleIndexRecord(primary.length(), cmap, index, rf, rIdx) ;
+        return tIdx ;
+    }
+    
+    public RangeIndex buildRangeIndex(RecordFactory recordFactory, String name) {
+        ComponentId cid = componentIdMgr.getComponentId(name) ;
+        FileSet fs = new FileSet(location, name) ;
+        BPlusTree bpt = BPlusTreeFactory.createBPTree(cid, fs, recordFactory) ;
+        txnCoord.add(bpt) ;
+        return bpt ;
+    }
+    
+    public NodeTable buildNodeTable(String name) {
+        NodeTable nodeTable = buildBaseNodeTable(name) ;
+        nodeTable = NodeTableCache.create(nodeTable, params) ;
+        nodeTable = NodeTableInline.create(nodeTable) ;
+        return nodeTable ;
+    }
+
+    public NodeTable buildBaseNodeTable(String name) {
+        RecordFactory recordFactory = new RecordFactory(SystemTDB.LenNodeHash, SystemTDB.SizeOfNodeId) ;
+        Index index = buildRangeIndex(recordFactory, name) ;
+        
+        String dataname = name+"-data" ; 
+        TransBinaryDataFile transBinFile = buildBinaryDataFile(dataname) ;
+        txnCoord.add(transBinFile) ;
+        return new NodeTableTRDF(index, transBinFile) ;
+    }
+    
+    public TransBinaryDataFile buildBinaryDataFile(String name) {
+        ComponentId cid = componentIdMgr.getComponentId(name) ;
+        FileSet fs = new FileSet(location, name) ; 
+        BinaryDataFile binFile = FileFactory.createBinaryDataFile(fs, Names.extObjNodeData) ;
+        BufferChannel pState = FileFactory.createBufferChannel(fs, Names.extBdfState) ;
+        // ComponentId mgt.
+        TransBinaryDataFile transBinFile = new TransBinaryDataFile(binFile, cid, pState) ;
+        return transBinFile ;
+    }
+    
+    private void error(Logger log, String msg)
+    {
+        if ( log != null )
+            log.error(msg) ;
+        throw new TDBException(msg) ;
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBDatasetDetails.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBDatasetDetails.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBDatasetDetails.java
new file mode 100644
index 0000000..37b8998
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/setup/TDBDatasetDetails.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.setup;
+
+import org.apache.jena.atlas.lib.ArrayUtils ;
+import org.apache.jena.atlas.logging.Log ;
+import org.apache.jena.tdb2.store.DatasetGraphTDB;
+import org.apache.jena.tdb2.store.nodetable.NodeTable;
+import org.apache.jena.tdb2.store.nodetable.NodeTableCache;
+import org.apache.jena.tdb2.store.nodetable.NodeTableInline;
+import org.apache.jena.tdb2.store.tupletable.TupleIndex;
+
+public class TDBDatasetDetails {
+    public final TupleIndex[] tripleIndexes ;
+    public final TupleIndex[] quadIndexes ;
+    public final NodeTable ntTop ;
+    public NodeTableInline ntInline ;
+    public NodeTableCache ntCache ;
+    public NodeTable ntBase ;
+    
+    
+    public TDBDatasetDetails(DatasetGraphTDB dsg) {
+        ntTop = dsg.getTripleTable().getNodeTupleTable().getNodeTable() ;
+        tripleIndexes = ArrayUtils.copy(dsg.getTripleTable().getNodeTupleTable().getTupleTable().getIndexes()) ;
+        quadIndexes = ArrayUtils.copy(dsg.getQuadTable().getNodeTupleTable().getTupleTable().getIndexes()) ;
+        
+        fillInNodeTableDetails() ;
+        fillInIndexDetails() ;
+    }
+
+    private void fillInNodeTableDetails() {
+        // Nodetable.
+        NodeTable ntx = ntTop ;
+        while(ntx.wrapped() != null ) {
+            if ( ntx instanceof NodeTableInline ) {
+                if ( ntInline != null )
+                    Log.warn(this, "Multiple NodeTableInline") ;
+                ntInline = (NodeTableInline)ntx ;
+            }
+            else if ( ntx instanceof NodeTableCache ) {
+                if ( ntCache != null )
+                    Log.warn(this, "Multiple NodeTableCache") ;
+                ntCache = (NodeTableCache)ntx ;
+            }
+            ntx = ntx.wrapped() ;
+        } 
+        
+        ntBase = ntx ;
+        
+        if ( ntInline == null )
+            Log.warn(this, "No NodeTableInline") ;
+        if ( ntCache == null )
+            Log.warn(this, "No NodeTableCache") ;
+        if ( ntBase == null )
+            Log.warn(this, "No base NodeTable") ;
+    }
+    
+    private void fillInIndexDetails() {
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/Abortable.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/Abortable.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/Abortable.java
new file mode 100644
index 0000000..486336f
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/Abortable.java
@@ -0,0 +1,21 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+interface Abortable { public void abort() ; }

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingNodeId.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingNodeId.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingNodeId.java
new file mode 100644
index 0000000..dc516c2
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingNodeId.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import java.util.HashMap;
+import java.util.Iterator ;
+import java.util.Map;
+
+import org.apache.jena.atlas.lib.Map2 ;
+
+
+
+import org.apache.jena.sparql.core.Var ;
+import org.apache.jena.sparql.engine.binding.Binding ;
+import org.apache.jena.tdb2.store.NodeId;
+
+/** Class for a Binding-like structure except it works on NodeIds, not on Nodes */  
+public class BindingNodeId extends Map2<Var, NodeId>
+{
+    // This is the parent binding - which may be several steps up the chain. 
+    // This just carried around for later use when we go BindingNodeId back to Binding.
+    private final Binding parentBinding ;
+
+    // Possible optimization: there are at most 3 possible values so HashMap is overkill.
+    // Use a chain of small objects.
+    
+    private BindingNodeId(Map<Var, NodeId> map1, Map2<Var, NodeId> map2, Binding parentBinding)
+    {
+        super(map1, map2) ;
+        this.parentBinding = parentBinding ;
+    }
+
+    // Make from an existing BindingNodeId 
+    public BindingNodeId(BindingNodeId other)
+    {
+        this(new HashMap<Var, NodeId>(), other, other.getParentBinding()) ;
+    }
+    
+    // Make from an existing Binding 
+    public BindingNodeId(Binding binding)
+    {
+        this(new HashMap<Var, NodeId>(), null, binding) ;
+    }
+
+    public BindingNodeId()
+    {
+        this(new HashMap<Var, NodeId>(), null, null) ;
+    }
+    
+    public Binding getParentBinding()    { return parentBinding ; } 
+    
+    //@Override public NodeId get(Var v)    { return super.get(v) ; } 
+    
+    @Override public void put(Var v, NodeId n)
+    {
+        if ( v == null || n == null )
+            throw new IllegalArgumentException("("+v+","+n+")") ;
+        super.put(v, n) ;
+    }
+    
+    public void putAll(BindingNodeId other)
+    {
+        Iterator<Var> vIter = other.iterator() ;
+        
+        for ( ; vIter.hasNext() ; )
+        {
+            Var v = vIter.next() ;
+            if ( v == null )
+                throw new IllegalArgumentException("Null key") ;
+            NodeId n = other.get(v) ;
+            if ( n == null )
+                throw new IllegalArgumentException("("+v+","+n+")") ;
+            super.put(v, n) ;
+        }
+    }
+    
+    @Override
+    public String toString()
+    {
+        StringBuilder sb = new StringBuilder() ;
+        
+        boolean first = true ;
+        for ( Var v : this )
+        {
+            if ( ! first )
+                sb.append(" ") ;
+            first = false ;
+            NodeId x = get(v) ;
+            sb.append(v) ;
+            sb.append(" = ") ;
+            sb.append(x) ;
+        }
+            
+        return sb.toString() ;
+        
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingTDB.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingTDB.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingTDB.java
new file mode 100644
index 0000000..25e0534
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/BindingTDB.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import java.util.* ;
+
+import org.apache.jena.atlas.logging.Log ;
+import org.apache.jena.graph.Node ;
+import org.apache.jena.riot.out.NodeFmtLib ;
+import org.apache.jena.sparql.core.Var ;
+import org.apache.jena.sparql.engine.binding.Binding ;
+import org.apache.jena.sparql.engine.binding.BindingBase ;
+import org.apache.jena.tdb2.store.NodeId;
+import org.apache.jena.tdb2.store.nodetable.NodeTable;
+
+/** Bind that delays turning a NodeId into a Node until explicitly needed by get() */
+
+public class BindingTDB extends BindingBase
+{
+    private final NodeTable nodeTable ;
+    private final BindingNodeId idBinding ;
+    
+    private static final boolean caching = false ;
+    // Whether the cache is worthwhile is unclear - the NodeTable keeps a cache. 
+    private final Map<Var,Node> cache = ( caching ? new HashMap<>() : null ) ;
+
+    public BindingTDB(BindingNodeId idBinding, NodeTable nodeTable)
+    {
+        // BindingNodeId contains the bindings actually used  copied down when created. 
+        super(idBinding.getParentBinding()) ;
+        this.idBinding = idBinding ;
+        this.nodeTable = nodeTable ;
+    }
+
+    @Override
+    protected int size1() { return idBinding.size(); }
+    
+    private List<Var> vars = null ;
+    
+    /** Iterate over all the names of variables. */
+    @Override
+    protected Iterator<Var> vars1() 
+    {
+        if ( vars == null )
+            vars = calcVars() ;
+        return vars.iterator() ;
+    }
+
+    private List<Var> calcVars()
+    {
+        List<Var> vars = new ArrayList<>(4) ;
+        // Only if not in parent.
+        // A (var/value) binding may have been copied down to record it's NodeId.  
+        
+        Binding b = idBinding.getParentBinding() ;
+        
+        Iterator<Var> iter = idBinding.iterator() ;
+        for ( Var v : idBinding )
+        {
+            if ( b == null || ! b.contains(v) )
+                vars.add(v) ;
+        }
+        return vars ;
+    }
+    
+    @Override
+    protected boolean isEmpty1()
+    {
+        return size1() == 0 ;
+    }
+
+    @Override
+    public boolean contains1(Var var)
+    {
+        return idBinding.containsKey(var) ;
+    }
+    
+    public BindingNodeId getBindingId() { return idBinding ; }
+    
+    public NodeId getNodeId(Var var)
+    {
+        NodeId id = idBinding.get(var) ;
+        if ( id != null )
+            return id ;
+        
+        if ( parent == null )
+            return null ; 
+
+        // Maybe in the parent.
+        if ( parent instanceof BindingTDB )
+            return ((BindingTDB)parent).getNodeId(var) ;
+        return null ;
+    }
+    
+    @Override
+    public Node get1(Var var)
+    {
+        try {
+            Node n = cacheGet(var) ;
+            if ( n != null )
+                return n ;
+            
+            NodeId id = idBinding.get(var) ;
+            if ( id == null )
+                return null ; 
+            n = nodeTable.getNodeForNodeId(id) ;
+            // Update cache.
+            cachePut(var, n) ;
+            return n ;
+        } catch (Exception ex)
+        {
+            Log.error(this, String.format("get1(%s)", var), ex) ;
+            return null ;
+        }
+    }
+
+    private void cachePut(Var var, Node n)
+    {
+        if ( cache != null ) cache.put(var, n) ; 
+    }
+
+    private Node cacheGet(Var var)
+    { 
+        if ( cache == null ) return null ;
+        return cache.get(var) ;
+    }
+    
+    @Override
+    protected void format(StringBuffer sbuff, Var var)
+    {
+        NodeId id = idBinding.get(var) ;
+        String extra = "" ;
+        if ( id != null )
+            extra = "/"+id ;
+        Node node = get(var) ;
+        String tmp = NodeFmtLib.displayStr(node) ;
+        sbuff.append("( ?"+var.getVarName()+extra+" = "+tmp+" )") ;
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/OpExecutorTDB1.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/OpExecutorTDB1.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/OpExecutorTDB1.java
new file mode 100644
index 0000000..828bcab
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/OpExecutorTDB1.java
@@ -0,0 +1,428 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import java.util.function.Predicate;
+
+import org.apache.jena.atlas.lib.tuple.Tuple ;
+import org.apache.jena.atlas.logging.Log ;
+import org.apache.jena.graph.Graph ;
+import org.apache.jena.graph.Node ;
+import org.apache.jena.sparql.ARQInternalErrorException ;
+import org.apache.jena.sparql.algebra.Op ;
+import org.apache.jena.sparql.algebra.op.* ;
+import org.apache.jena.sparql.algebra.optimize.TransformFilterPlacement ;
+import org.apache.jena.sparql.core.BasicPattern ;
+import org.apache.jena.sparql.core.Quad ;
+import org.apache.jena.sparql.core.Substitute ;
+import org.apache.jena.sparql.core.Var ;
+import org.apache.jena.sparql.engine.ExecutionContext ;
+import org.apache.jena.sparql.engine.QueryIterator ;
+import org.apache.jena.sparql.engine.iterator.QueryIterPeek ;
+import org.apache.jena.sparql.engine.main.OpExecutor ;
+import org.apache.jena.sparql.engine.main.OpExecutorFactory ;
+import org.apache.jena.sparql.engine.main.QC ;
+import org.apache.jena.sparql.engine.main.iterator.QueryIterGraph ;
+import org.apache.jena.sparql.engine.optimizer.reorder.ReorderProc ;
+import org.apache.jena.sparql.engine.optimizer.reorder.ReorderTransformation ;
+import org.apache.jena.sparql.expr.ExprList ;
+import org.apache.jena.sparql.mgt.Explain ;
+import org.apache.jena.tdb2.store.DatasetGraphTDB;
+import org.apache.jena.tdb2.store.GraphTDB;
+import org.apache.jena.tdb2.store.NodeId;
+import org.slf4j.Logger ;
+import org.slf4j.LoggerFactory ;
+
+/** TDB executor for algebra expressions.  It is the standard ARQ executor
+ *  except for basic graph patterns and filtered basic graph patterns (currently).  
+ * 
+ * See also: StageGeneratorDirectTDB, a non-reordering 
+ */
+public class OpExecutorTDB1 extends OpExecutor
+{
+    private static final Logger log = LoggerFactory.getLogger(OpExecutorTDB1.class) ;
+    
+    public final static OpExecutorFactory OpExecFactoryTDB = new OpExecutorFactory()
+    {
+        @Override
+        public OpExecutor create(ExecutionContext execCxt)
+        { return new OpExecutorTDB1(execCxt) ; }
+    } ;
+    
+    private final boolean isForTDB ;
+    
+    // A new compile object is created for each op compilation.
+    // So the execCxt is changing as we go through the query-compile-execute process  
+    public OpExecutorTDB1(ExecutionContext execCxt)
+    {
+        super(execCxt) ;
+        // NB. The dataset may be a TDB one, or a general one.
+        // Any merged union graph magic (for a TDB dataset was handled
+        // in QueryEngineTDB).
+        
+        isForTDB = (execCxt.getActiveGraph() instanceof GraphTDB) ;
+    }
+
+    @Override
+    protected QueryIterator exec(Op op, QueryIterator input) {
+        if ( level < 0 )
+            // Print only at top level (and we're called before level++) 
+            Explain.explain("TDB", op, super.execCxt.getContext()) ;
+        return super.exec(op, input) ;
+    } 
+    
+    // Retrieving nodes isn't so bad because they will be needed anyway.
+    // And if their duplicates, likely to be cached.
+    // Need to work with SolverLib which wraps the NodeId bindgins with a converter. 
+    
+    @Override
+    protected QueryIterator execute(OpDistinct opDistinct, QueryIterator input)
+    {
+        return super.execute(opDistinct, input) ;
+    }
+    
+    @Override
+    protected QueryIterator execute(OpReduced opReduced, QueryIterator input)
+    {
+        return super.execute(opReduced, input) ;
+    }
+    
+    @Override
+    protected QueryIterator execute(OpFilter opFilter, QueryIterator input)
+    {
+        if ( ! isForTDB )
+            return super.execute(opFilter, input) ;
+        
+        // If the filter does not apply to the input??
+        // Where does ARQ catch this?
+        
+        // (filter (bgp ...))
+        if ( OpBGP.isBGP(opFilter.getSubOp()) )
+        {
+            // Still may be a TDB graph in a non-TDB dataset (e.g. a named model)
+            GraphTDB graph = (GraphTDB)execCxt.getActiveGraph() ;
+            OpBGP opBGP = (OpBGP)opFilter.getSubOp() ;
+            return executeBGP(graph, opBGP, input, opFilter.getExprs(), execCxt) ;
+        }
+        
+        // (filter (quadpattern ...))
+        if ( opFilter.getSubOp() instanceof OpQuadPattern )
+        {
+            OpQuadPattern quadPattern = (OpQuadPattern)opFilter.getSubOp() ;
+            DatasetGraphTDB ds = (DatasetGraphTDB)execCxt.getDataset() ;
+            return optimizeExecuteQuads(ds, input,
+                                        quadPattern.getGraphNode(), quadPattern.getBasicPattern(),
+                                        opFilter.getExprs(), execCxt) ;
+        }
+    
+        // (filter (anything else))
+        return super.execute(opFilter, input) ;
+        }
+
+    // ---- Triple patterns
+    
+    @Override
+    protected QueryIterator execute(OpBGP opBGP, QueryIterator input)
+    {
+        if ( ! isForTDB )
+            return super.execute(opBGP, input) ;
+        
+        GraphTDB graph = (GraphTDB)execCxt.getActiveGraph() ;
+        return executeBGP(graph, opBGP, input, null, execCxt) ;
+       
+    }
+
+    @Override
+    protected QueryIterator execute(OpQuadPattern quadPattern, QueryIterator input)
+    {
+        if ( ! isForTDB )
+            return super.execute(quadPattern, input) ;
+            
+    //        DatasetGraph dg = execCxt.getDataset() ;
+    //        if ( ! ( dg instanceof DatasetGraphTDB ) )
+    //            throw new InternalErrorException("Not a TDB backed dataset in quad pattern execution") ;
+        
+        DatasetGraphTDB ds = (DatasetGraphTDB)execCxt.getDataset() ;
+        BasicPattern bgp = quadPattern.getBasicPattern() ;
+        Node gn = quadPattern.getGraphNode() ;
+        return optimizeExecuteQuads(ds, input, gn, bgp, null, execCxt) ;
+    }
+
+    @Override
+    protected QueryIterator execute(OpGraph opGraph, QueryIterator input)
+    {
+        // Path evaluation or dataset sets which do not go straight to the DatasetGraphTDB  
+        return new QueryIterGraph(input, opGraph, execCxt) ;
+    }
+
+    /** Execute a BGP (and filters) on a TDB graph, which may be in default storage or it may be a named graph */ 
+    private static QueryIterator executeBGP(GraphTDB graph, OpBGP opBGP, QueryIterator input, ExprList exprs, 
+                                            ExecutionContext execCxt)
+    {
+        // Is it the real default graph (normal route or explicitly named)?
+        if ( ! isDefaultGraphStorage(graph.getGraphName()))
+        {
+            // Not default storage - it's a named graph in storage. 
+            DatasetGraphTDB ds = graph.getDSG() ;
+            return optimizeExecuteQuads(ds, input, graph.getGraphName(), opBGP.getPattern(), exprs, execCxt) ;
+        }
+        
+        // Execute a BGP on the real default graph
+        return optimizeExecuteTriples(graph, input, opBGP.getPattern(), exprs, execCxt) ;
+    }
+
+    /** Execute, with optimization, a basic graph pattern on the default graph storage */
+    private static QueryIterator optimizeExecuteTriples(GraphTDB graph, QueryIterator input,
+                                                        BasicPattern pattern, ExprList exprs,
+                                                        ExecutionContext execCxt)
+    {
+        if ( ! input.hasNext() )
+            return input ;
+    
+        // -- Input
+        // Must pass this iterator into the next stage.
+        if ( pattern.size() >= 2 )
+        {
+            // Must be 2 or triples to reorder. 
+            ReorderTransformation transform = graph.getDSG().getReorderTransform() ;
+            if ( transform != null )
+            {
+                QueryIterPeek peek = QueryIterPeek.create(input, execCxt) ;
+                input = peek ; // Must pass on
+                pattern = reorder(pattern, peek, transform) ;
+            }
+        }
+        // -- Filter placement
+            
+        Op op = null ;
+        if ( exprs != null )
+            op = TransformFilterPlacement.transform(exprs, pattern) ;
+        else
+            op = new OpBGP(pattern) ;
+        
+        return plainExecute(op, input, execCxt) ;
+    }
+
+    /** Execute, with optimization, a quad pattern */
+    private static QueryIterator optimizeExecuteQuads(DatasetGraphTDB ds, 
+                                                      QueryIterator input, 
+                                                      Node gn, BasicPattern bgp,
+                                                      ExprList exprs, ExecutionContext execCxt)
+    {
+        if ( ! input.hasNext() )
+            return input ;
+
+        // ---- Graph names with special meaning. 
+
+        gn = decideGraphNode(gn, execCxt) ;
+        if ( gn == null )
+            return optimizeExecuteTriples(ds.getDefaultGraphTDB(), input, bgp, exprs, execCxt) ;
+        
+        // ---- Execute quads+filters
+        if ( bgp.size() >= 2 )
+        {
+            ReorderTransformation transform = ds.getReorderTransform() ;
+    
+            if ( transform != null )
+            {
+                QueryIterPeek peek = QueryIterPeek.create(input, execCxt) ;
+                input = peek ; // Original input now invalid.
+                bgp = reorder(bgp, peek, transform) ;
+            }
+        }
+        // -- Filter placement
+        Op op = null ;
+        if ( exprs != null )
+            op = TransformFilterPlacement.transform(exprs, gn, bgp) ;
+        else
+            op = new OpQuadPattern(gn, bgp) ;
+
+        return plainExecute(op, input, execCxt) ;
+    }
+
+    /** Execute without modification of the op - does <b>not</b> apply special graph name translations */ 
+    private static QueryIterator plainExecute(Op op, QueryIterator input, ExecutionContext execCxt)
+    {
+        // -- Execute
+        // Switch to a non-reordering executor
+        // The Op may be a sequence due to TransformFilterPlacement
+        // so we need to do a full execution step, not go straight to the SolverLib.
+        
+        ExecutionContext ec2 = new ExecutionContext(execCxt) ;
+        ec2.setExecutor(plainFactory) ;
+
+        // Solve without going through this executor again.
+        // There would be issues of nested patterns but this is only a
+        // (filter (bgp...)) or (filter (quadpattern ...)) or sequences of these.
+        // so there are no nested patterns to reorder.
+        return QC.execute(op, input, ec2) ;
+    }
+
+    private static BasicPattern reorder(BasicPattern pattern, QueryIterPeek peek, ReorderTransformation transform)
+    {
+        if ( transform != null )
+        {
+            // This works by getting one result from the peek iterator,
+            // and creating the more gounded BGP. The tranform is used to
+            // determine the best order and the transformation is returned. This
+            // transform is applied to the unsubstituted pattern (which will be
+            // substituted as part of evaluation.
+            
+            if ( ! peek.hasNext() )
+                throw new ARQInternalErrorException("Peek iterator is already empty") ;
+ 
+            BasicPattern pattern2 = Substitute.substitute(pattern, peek.peek() ) ;
+            // Calculate the reordering based on the substituted pattern.
+            ReorderProc proc = transform.reorderIndexes(pattern2) ;
+            // Then reorder original patten
+            pattern = proc.reorder(pattern) ;
+        }
+        return pattern ;
+    }
+    
+    /** Handle special graph node names.  
+     * Returns null for default graph in storage (use the triple table).
+     * Returns Node.ANY for the union graph
+     */
+    public static Node decideGraphNode(Node gn, ExecutionContext execCxt)
+    {
+     // ---- Graph names with special meaning. 
+    
+        // Graph names with special meaning:
+        //   Quad.defaultGraphIRI -- the IRI used in GRAPH <> to mean the default graph.
+        //   Quad.defaultGraphNodeGenerated -- the internal marker node used for the quad form of queries.
+        //   Quad.unionGraph -- the IRI used in GRAPH <> to mean the union of named graphs
+    
+        if ( isDefaultGraphStorage(gn) ) 
+        {
+            // Storage concrete, default graph. 
+            // Either outside GRAPH (no implicit union)
+            // or using the "name" of the default graph
+            return null ;
+        }
+
+        // Not default storage graph.
+        // ---- Union (RDF Merge) of named graphs
+
+        if ( Quad.isUnionGraph(gn) ) 
+            return Node.ANY ;
+        boolean doingUnion = false ;
+        
+        return gn ;
+    }
+
+    // Is this a query against the real default graph in the storage (in a 3-tuple table). 
+    private static boolean isDefaultGraphStorage(Node gn)
+    {
+        if ( gn == null )
+            return true ;
+        
+        // Is it the implicit name for default graph.
+        if ( Quad.isDefaultGraph(gn) )
+            // Not accessing the union of named graphs as the default graph
+            // and pattern is directed to the default graph.
+            return true ;
+    
+        return false ;
+    }
+    
+    @Override
+    protected QueryIterator execute(OpDatasetNames dsNames, QueryIterator input)
+    { 
+        DatasetGraphTDB ds = (DatasetGraphTDB)execCxt.getDataset() ;
+        Predicate<Tuple<NodeId>> filter = QC2.getFilter(execCxt.getContext()) ;
+        Node gn = dsNames.getGraphNode() ;
+        if ( Var.isVar(gn) )
+            return SolverLib.graphNames(ds, dsNames.getGraphNode(), input, filter, execCxt) ;
+        else
+            return SolverLib.testForGraphName(ds, dsNames.getGraphNode(), input, filter, execCxt) ;
+    }
+
+    // ---- OpExecute factories and plain executor.
+    
+    private static OpExecutorFactory plainFactory = new OpExecutorPlainFactoryTDB() ;
+    private static class OpExecutorPlainFactoryTDB implements OpExecutorFactory
+    {
+        @Override
+        public OpExecutor create(ExecutionContext execCxt)
+        {
+            return new OpExecutorPlainTDB(execCxt) ;
+        }
+    }
+
+    /** An op executor that simply executes a BGP or QuadPattern without any reordering */ 
+    private static class OpExecutorPlainTDB extends OpExecutor
+    {
+        Predicate<Tuple<NodeId>> filter = null ;
+        
+        public OpExecutorPlainTDB(ExecutionContext execCxt)
+        {
+            super(execCxt) ;
+            filter = QC2.getFilter(execCxt.getContext()) ;
+        }
+        
+        @Override
+        public QueryIterator execute(OpBGP opBGP, QueryIterator input)
+        {
+            Graph g = execCxt.getActiveGraph() ;
+            
+            if ( g instanceof GraphTDB )
+            {
+                BasicPattern bgp = opBGP.getPattern() ;
+                Explain.explain("Execute", bgp, execCxt.getContext()) ;
+                // Triple-backed (but may be named as explicit default graph).
+                //return SolverLib.execute((GraphTDB)g, bgp, input, filter, execCxt) ;
+                GraphTDB gtdb = (GraphTDB)g ;
+                Node gn = decideGraphNode(gtdb.getGraphName(), execCxt) ;
+                return SolverLib.execute(gtdb.getDSG(), gn, bgp, input, filter, execCxt) ;
+            }
+            Log.warn(this, "Non-GraphTDB passed to OpExecutorPlainTDB") ;
+            return super.execute(opBGP, input) ;
+        }
+        
+        @Override
+        public QueryIterator execute(OpQuadPattern opQuadPattern, QueryIterator input)
+        {
+            Node gn = opQuadPattern.getGraphNode() ;
+            gn = decideGraphNode(gn, execCxt) ;
+            
+            if ( execCxt.getDataset() instanceof DatasetGraphTDB )
+            {
+                DatasetGraphTDB ds = (DatasetGraphTDB)execCxt.getDataset() ;
+                Explain.explain("Execute", opQuadPattern.getPattern(), execCxt.getContext()) ;
+                BasicPattern bgp = opQuadPattern.getBasicPattern() ;
+                return SolverLib.execute(ds, gn, bgp, input, filter, execCxt) ;
+            }
+            // Maybe a TDB named graph inside a non-TDB dataset.
+            Graph g = execCxt.getActiveGraph() ;
+            if ( g instanceof GraphTDB )
+            {
+                // Triples graph from TDB (which is the default graph of the dataset),
+                // used a named graph in a composite dataset.
+                BasicPattern bgp = opQuadPattern.getBasicPattern() ;
+                Explain.explain("Execute", bgp, execCxt.getContext()) ;
+                // Don't pass in G -- gn may be different.
+                return SolverLib.execute(((GraphTDB)g).getDSG(), gn, bgp, input, filter, execCxt) ;
+            }
+            Log.warn(this, "Non-DatasetGraphTDB passed to OpExecutorPlainTDB") ;
+            return super.execute(opQuadPattern, input) ;
+        }
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QC2.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QC2.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QC2.java
new file mode 100644
index 0000000..181acbf
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QC2.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import java.util.function.Predicate;
+
+import org.apache.jena.atlas.lib.tuple.Tuple ;
+import org.apache.jena.sparql.util.Context ;
+import org.apache.jena.tdb2.TDBException;
+import org.apache.jena.tdb2.store.NodeId;
+import org.apache.jena.tdb2.sys.SystemTDB;
+
+public class QC2
+{
+    public static Predicate<Tuple<NodeId>> getFilter(Context context)
+    {
+        Object x = context.get(SystemTDB.symTupleFilter) ;
+
+        try {
+            @SuppressWarnings("unchecked")
+            Predicate<Tuple<NodeId>> f = (Predicate<Tuple<NodeId>>)x ;
+            return f ;
+        } catch (ClassCastException ex)
+        {
+            throw new TDBException("Not a Filter<Tuple<NodeId>>:"+x, ex) ;
+        }
+    }
+
+    public static void setFilter(Context context, Predicate<Tuple<NodeId>> filter)
+    {
+        context.set(SystemTDB.symTupleFilter, filter) ;
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryEngineTDB.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryEngineTDB.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryEngineTDB.java
new file mode 100644
index 0000000..f57f76f
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryEngineTDB.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import org.apache.jena.atlas.lib.Lib ;
+import org.apache.jena.query.Query ;
+import org.apache.jena.sparql.algebra.Algebra ;
+import org.apache.jena.sparql.algebra.Op ;
+import org.apache.jena.sparql.core.DatasetDescription ;
+import org.apache.jena.sparql.core.DatasetGraph ;
+import org.apache.jena.sparql.core.DynamicDatasets ;
+import org.apache.jena.sparql.core.Substitute ;
+import org.apache.jena.sparql.engine.Plan ;
+import org.apache.jena.sparql.engine.QueryEngineFactory ;
+import org.apache.jena.sparql.engine.QueryEngineRegistry ;
+import org.apache.jena.sparql.engine.QueryIterator ;
+import org.apache.jena.sparql.engine.binding.Binding ;
+import org.apache.jena.sparql.engine.binding.BindingFactory ;
+import org.apache.jena.sparql.engine.iterator.QueryIteratorWrapper ;
+import org.apache.jena.sparql.engine.main.QueryEngineMain ;
+import org.apache.jena.sparql.mgt.Explain ;
+import org.apache.jena.sparql.util.Context ;
+import org.apache.jena.tdb2.TDB2;
+import org.apache.jena.tdb2.TDBException;
+import org.apache.jena.tdb2.migrate.A2;
+import org.apache.jena.tdb2.store.DatasetGraphTDB;
+import org.apache.jena.tdb2.sys.TDBInternal;
+
+// This exists to intercept the query execution setup.
+//  e.g choose the transformation optimizations
+// then to make the quad form.
+// TDB also uses a custom OpExecutor to intercept certain part 
+// of the Op evaluations
+
+public class QueryEngineTDB extends QueryEngineMain
+{
+    // ---- Wiring
+    static public QueryEngineFactory getFactory() { return factory ; } 
+    static public void register()       { QueryEngineRegistry.addFactory(factory) ; }
+    static public void unregister()     { QueryEngineRegistry.removeFactory(factory) ; }
+    
+    private Binding initialInput ;
+
+    // ---- Object
+    protected QueryEngineTDB(Op op, DatasetGraphTDB dataset, Binding input, Context context)
+    {
+        super(op, dataset, input, context) ;
+        this.initialInput = input ;
+    }
+    
+    private boolean doingDynamicDatasetBySpecialDataset = false ;
+    
+    protected QueryEngineTDB(Query query, DatasetGraphTDB dataset, Binding input, Context cxt)
+    { 
+        super(query, dataset, input, cxt) ; 
+        DatasetDescription dsDesc = DatasetDescription.create(query, context) ;
+        
+        if ( dsDesc != null )
+        {
+            doingDynamicDatasetBySpecialDataset = true ;
+            super.dataset = DynamicDatasets.dynamicDataset(dsDesc, dataset, cxt.isTrue(TDB2.symUnionDefaultGraph) ) ;
+        }
+        this.initialInput = input ; 
+    }
+    
+    // Choose the algebra-level optimizations to invoke. 
+    @Override
+    protected Op modifyOp(Op op)
+    {
+        op = Substitute.substitute(op, initialInput) ;
+        // Optimize (high-level)
+        op = super.modifyOp(op) ;
+
+        // Quadification
+        // Only apply if not a rewritten DynamicDataset
+        if ( ! doingDynamicDatasetBySpecialDataset )
+            op = Algebra.toQuadForm(op) ;
+        
+        // Record it.
+        setOp(op) ;
+        return op ;
+    }
+
+    @Override
+    public QueryIterator eval(Op op, DatasetGraph dsg, Binding input, Context context)
+    {
+        // Top of execution of a query.
+        // Op is quad'ed by now but there still may be some (graph ....) forms e.g. paths
+        
+        // Fix DatasetGraph for global union.
+        if ( context.isTrue(TDB2.symUnionDefaultGraph) && ! doingDynamicDatasetBySpecialDataset ) 
+        {
+            op = A2.unionDefaultGraphQuads(op) ;
+            Explain.explain("REWRITE(Union default graph)", op, context) ;
+        }
+        QueryIterator results = super.eval(op, dsg, input, context) ;
+        results = new QueryIteratorMaterializeBinding(results) ;
+        return results ; 
+    }
+    
+    /** Copy from any TDB internal BindingTDB to a Binding that
+     *  does not have any connection to the database.   
+     */
+    static class QueryIteratorMaterializeBinding extends QueryIteratorWrapper
+    {
+        public QueryIteratorMaterializeBinding(QueryIterator qIter)
+        {
+            super(qIter) ;
+        }
+        
+        @Override
+        protected Binding moveToNextBinding()
+        { 
+            Binding b = super.moveToNextBinding() ;
+            b = BindingFactory.materialize(b) ;
+            return b ;
+        }
+    }
+    
+    // Execution time (needs wiring to ARQ).
+    public long getMillis() { return -1 ; }
+    
+    // ---- Factory
+    protected static QueryEngineFactory factory = new QueryEngineFactoryTDB() ;
+        
+    protected static class QueryEngineFactoryTDB implements QueryEngineFactory
+    {
+        private static boolean isHandledByTDB(DatasetGraph dataset) {
+            return TDBInternal.isBackedByTDB(dataset);
+        }
+        
+        protected DatasetGraphTDB dsgToQuery(DatasetGraph dataset) {
+            try { 
+                return TDBInternal.requireStorage(dataset);
+            } catch (TDBException ex) {
+                // Check to a more specific message. 
+                throw new TDBException("Internal inconsistency: trying to execute query on unrecognized kind of DatasetGraph: "+Lib.className(dataset)) ;
+            }
+        }
+        
+        @Override
+        public boolean accept(Query query, DatasetGraph dataset, Context context) 
+        { return isHandledByTDB(dataset) ; }
+
+        @Override
+        public Plan create(Query query, DatasetGraph dataset, Binding input, Context context)
+        {
+            QueryEngineTDB engine = new QueryEngineTDB(query, dsgToQuery(dataset), input, context) ;
+            return engine.getPlan() ;
+        }
+        
+        @Override
+        public boolean accept(Op op, DatasetGraph dataset, Context context) 
+        { return isHandledByTDB(dataset) ; }
+
+        @Override
+        public Plan create(Op op, DatasetGraph dataset, Binding binding, Context context)
+        {
+            QueryEngineTDB engine = new QueryEngineTDB(op, dsgToQuery(dataset), binding, context) ;
+            return engine.getPlan() ;
+        }
+    }
+    
+//    // By rewrite, not using a general purpose dataset with the right graphs in.
+//    private static Op dynamicDatasetOp(Op op,  Context context)
+//    {
+//        Transform transform = null ;
+//    
+//        try {
+//            @SuppressWarnings("unchecked")
+//            Set<Node> defaultGraphs = (Set<Node>)(context.get(SystemTDB.symDatasetDefaultGraphs)) ;
+//            @SuppressWarnings("unchecked")
+//            Set<Node> namedGraphs = (Set<Node>)(context.get(SystemTDB.symDatasetNamedGraphs)) ;
+//            if ( defaultGraphs != null || namedGraphs != null )
+//                transform = new TransformDynamicDataset(defaultGraphs, 
+//                                                        namedGraphs, 
+//                                                        context.isTrue(TDB.symUnionDefaultGraph)) ;
+//        } catch (ClassCastException ex)
+//        {
+//            Log.warn(QueryEngineTDB.class, "Bad dynamic dataset description (ClassCastException)", ex) ;
+//            transform = null ;
+//            return op ;
+//        }
+//
+//        // Apply dynamic dataset modifications.
+//        if ( transform != null )
+//            op = Transformer.transform(transform, op) ;
+//        return op ;
+//    }        
+//    
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryIterTDB.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryIterTDB.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryIterTDB.java
new file mode 100644
index 0000000..b4ebc3e
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/QueryIterTDB.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import java.util.Iterator ;
+import java.util.List ;
+
+import org.apache.jena.sparql.engine.ExecutionContext ;
+import org.apache.jena.sparql.engine.QueryIterator ;
+import org.apache.jena.sparql.engine.binding.Binding ;
+import org.apache.jena.sparql.engine.iterator.QueryIterPlainWrapper ;
+
+public class QueryIterTDB extends QueryIterPlainWrapper
+{
+    final private QueryIterator originalInput ;
+    private List<Abortable> killList ;
+    
+    // The original input needs closing as well.
+    public QueryIterTDB(Iterator<Binding> iterBinding, List<Abortable> killList , QueryIterator originalInput, ExecutionContext execCxt)
+    {
+        super(iterBinding, execCxt) ;
+        this.originalInput = originalInput ;
+        this.killList = killList ;
+    }
+    
+    @Override
+    protected void closeIterator()
+    { 
+        if ( originalInput != null )
+            originalInput.close();
+        super.closeIterator() ;
+    }
+
+    @Override
+    protected void requestCancel()
+    { 
+        if ( killList != null )
+            for ( Abortable it : killList )
+                it.abort() ;
+        if ( originalInput != null )
+            originalInput.cancel(); 
+    }
+}

http://git-wip-us.apache.org/repos/asf/jena/blob/3d456654/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/SolverLib.java
----------------------------------------------------------------------
diff --git a/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/SolverLib.java b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/SolverLib.java
new file mode 100644
index 0000000..3f0cc3c
--- /dev/null
+++ b/jena-db/jena-tdb2/src/main/java/org/apache/jena/tdb2/solver/SolverLib.java
@@ -0,0 +1,341 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.tdb2.solver;
+
+import java.util.* ;
+import java.util.function.Function ;
+import java.util.function.Predicate ;
+
+import org.apache.jena.atlas.iterator.Iter ;
+import org.apache.jena.atlas.iterator.IteratorWrapper ;
+import org.apache.jena.atlas.lib.tuple.Tuple ;
+import org.apache.jena.atlas.lib.tuple.TupleFactory ;
+import org.apache.jena.graph.Node ;
+import org.apache.jena.graph.Triple ;
+import org.apache.jena.query.QueryCancelledException ;
+import org.apache.jena.sparql.core.BasicPattern ;
+import org.apache.jena.sparql.core.Quad ;
+import org.apache.jena.sparql.core.Var ;
+import org.apache.jena.sparql.engine.ExecutionContext ;
+import org.apache.jena.sparql.engine.QueryIterator ;
+import org.apache.jena.sparql.engine.binding.Binding ;
+import org.apache.jena.sparql.engine.binding.BindingFactory ;
+import org.apache.jena.sparql.engine.binding.BindingMap ;
+import org.apache.jena.sparql.engine.iterator.QueryIterNullIterator ;
+import org.apache.jena.tdb2.TDBException;
+import org.apache.jena.tdb2.lib.NodeLib;
+import org.apache.jena.tdb2.store.DatasetGraphTDB;
+import org.apache.jena.tdb2.store.GraphTDB;
+import org.apache.jena.tdb2.store.NodeId;
+import org.apache.jena.tdb2.store.nodetable.NodeTable;
+import org.apache.jena.tdb2.store.nodetupletable.NodeTupleTable;
+import org.apache.jena.tdb2.sys.TDBInternal;
+import org.slf4j.Logger ;
+import org.slf4j.LoggerFactory ;
+
+/** Utilities used within the TDB BGP solver : local TDB store */
+public class SolverLib
+{
+    private static Logger log = LoggerFactory.getLogger(SolverLib.class) ; 
+    
+    /** Non-reordering execution of a basic graph pattern, given a iterator of bindings as input */ 
+    public static QueryIterator execute(GraphTDB graph, BasicPattern pattern, 
+                                        QueryIterator input, Predicate<Tuple<NodeId>> filter,
+                                        ExecutionContext execCxt)
+    {
+        // Maybe default graph or named graph.
+        NodeTupleTable ntt = graph.getNodeTupleTable() ;
+        return execute(ntt, graph.getGraphName(), pattern, input, filter, execCxt) ;
+    }
+    
+    /** Non-reordering execution of a quad pattern, given a iterator of bindings as input.
+     *  GraphNode is Node.ANY for execution over the union of named graphs.
+     *  GraphNode is null for execution over the real default graph.
+     */ 
+    public static QueryIterator execute(DatasetGraphTDB ds, Node graphNode, BasicPattern pattern,
+                                        QueryIterator input, Predicate<Tuple<NodeId>> filter,
+                                        ExecutionContext execCxt)
+    {
+        NodeTupleTable ntt = ds.chooseNodeTupleTable(graphNode) ;
+        return execute(ntt, graphNode, pattern, input, filter, execCxt) ;
+    }
+    
+    public static Iterator<BindingNodeId> convertToIds(Iterator<Binding> iterBindings, NodeTable nodeTable)
+    { return Iter.map(iterBindings, convFromBinding(nodeTable)) ; }
+    
+    /** Convert from Iterator<BindingNodeId> to Iterator<Binding>, conversion "on demand" 
+     * (in convToBinding(BindingNodeId, NodeTable)
+     */
+    public static Iterator<Binding> convertToNodes(Iterator<BindingNodeId> iterBindingIds, NodeTable nodeTable)
+    { return Iter.map(iterBindingIds, bindingNodeIds -> convToBinding(bindingNodeIds, nodeTable)) ; }
+    
+    // The worker.  Callers choose the NodeTupleTable.  
+    //     graphNode may be Node.ANY, meaning we should make triples unique.
+    //     graphNode may be null, meaning default graph
+
+    private static QueryIterator execute(NodeTupleTable nodeTupleTable, Node graphNode, BasicPattern pattern, 
+                                         QueryIterator input, Predicate<Tuple<NodeId>> filter,
+                                         ExecutionContext execCxt)
+    {
+        if ( Quad.isUnionGraph(graphNode) )
+            graphNode = Node.ANY ;
+        if ( Quad.isDefaultGraph(graphNode) )
+            graphNode = null ;
+        
+        List<Triple> triples = pattern.getList() ;
+        boolean anyGraph = (graphNode==null ? false : (Node.ANY.equals(graphNode))) ;
+        
+        int tupleLen = nodeTupleTable.getTupleTable().getTupleLen() ;
+        if ( graphNode == null ) {
+            if ( 3 != tupleLen )
+                throw new TDBException("SolverLib: Null graph node but tuples are of length "+tupleLen) ;
+        } else {
+            if ( 4 != tupleLen )
+                throw new TDBException("SolverLib: Graph node specified but tuples are of length "+tupleLen) ;
+        }
+        
+        // Convert from a QueryIterator (Bindings of Var/Node) to BindingNodeId
+        NodeTable nodeTable = nodeTupleTable.getNodeTable() ;
+        
+        Iterator<BindingNodeId> chain = Iter.map(input, SolverLib.convFromBinding(nodeTable)) ;
+        List<Abortable> killList = new ArrayList<>() ;
+        
+        for ( Triple triple : triples )
+        {
+            Tuple<Node> tuple = null ;
+            if ( graphNode == null )
+                // 3-tuples
+                tuple = TupleFactory.tuple(triple.getSubject(), triple.getPredicate(), triple.getObject()) ;
+            else
+                // 4-tuples.
+                tuple = TupleFactory.tuple(graphNode, triple.getSubject(), triple.getPredicate(), triple.getObject()) ;
+            chain = solve(nodeTupleTable, tuple, anyGraph, chain, filter, execCxt) ;
+            chain = makeAbortable(chain, killList) ; 
+        }
+        
+        // DEBUG POINT
+        if ( false )
+        {
+            if ( chain.hasNext())
+                chain = Iter.debug(chain) ;
+            else
+                System.out.println("No results") ;
+        }
+        
+        // Timeout wrapper ****
+        // QueryIterTDB gets called async.
+        // Iter.abortable?
+        // Or each iterator has a place to test.
+        // or pass in a thing to test?
+        
+        
+        // Need to make sure the bindings here point to parent.
+        Iterator<Binding> iterBinding = convertToNodes(chain, nodeTable) ;
+        
+        // "input" will be closed by QueryIterTDB but is otherwise unused.
+        // "killList" will be aborted on timeout.
+        return new QueryIterTDB(iterBinding, killList, input, execCxt) ;
+    }
+    
+    /** Create an abortable iterator, storing it in the killList.
+     *  Just return the input iterator if kilList is null. 
+     */
+    static <T> Iterator<T> makeAbortable(Iterator<T> iter, List<Abortable> killList)
+    {
+        if ( killList == null )
+            return iter ;
+        IterAbortable<T> k = new IterAbortable<>(iter) ;
+        killList.add(k) ;
+        return k ;
+    }
+    
+    /** Iterator that adds an abort operation which can be called
+     *  at any time, including from another thread, and causes the
+     *  iterator to throw an exception when next touched (hasNext, next).  
+     */
+    static class IterAbortable<T> extends IteratorWrapper<T> implements Abortable
+    {
+        volatile boolean abortFlag = false ;
+        
+        public IterAbortable(Iterator<T> iterator)
+        {
+            super(iterator) ;
+        }
+        
+        /** Can call asynchronously at anytime */
+        @Override
+        public void abort() { 
+            abortFlag = true ;
+        }
+        
+        @Override
+        public boolean hasNext()
+        {
+            if ( abortFlag )
+                throw new QueryCancelledException() ;
+            return iterator.hasNext() ; 
+        }
+        
+        @Override
+        public T next()
+        {
+            if ( abortFlag )
+                throw new QueryCancelledException() ;
+            return iterator.next() ; 
+        }
+    }
+    
+    public static Iterator<BindingNodeId> solve(NodeTupleTable nodeTupleTable, 
+                                                Tuple<Node> tuple,
+                                                boolean anyGraph,
+                                                Iterator<BindingNodeId> chain, Predicate<Tuple<NodeId>> filter,
+                                                ExecutionContext execCxt)
+    {
+        return new StageMatchTuple(nodeTupleTable, chain, tuple, anyGraph, filter, execCxt) ;
+    }
+
+    public static Binding convToBinding(BindingNodeId bindingNodeIds, NodeTable nodeTable) {
+        if ( true )
+            return new BindingTDB(bindingNodeIds, nodeTable) ;
+        else {
+            // Makes nodes immediately. Causing unnecessary NodeTable accesses
+            // (e.g. project)
+            BindingMap b = BindingFactory.create() ;
+            for (Var v : bindingNodeIds) {
+                NodeId id = bindingNodeIds.get(v) ;
+                Node n = nodeTable.getNodeForNodeId(id) ;
+                b.add(v, n) ;
+            }
+            return b ;
+        }
+    }
+    
+    // Transform : Binding ==> BindingNodeId
+    public static Function<Binding, BindingNodeId> convFromBinding(final NodeTable nodeTable)
+    {
+        return binding -> SolverLib.convert(binding, nodeTable);
+    }
+    
+    /** Binding ==> BindingNodeId, given a NodeTable */
+    public static BindingNodeId convert(Binding binding, NodeTable nodeTable) 
+    {
+        if ( binding instanceof BindingTDB )
+            return ((BindingTDB)binding).getBindingId() ;
+
+        BindingNodeId b = new BindingNodeId(binding) ;
+        // and copy over, getting NodeIds.
+        Iterator<Var> vars = binding.vars() ;
+
+        for ( ; vars.hasNext() ; )
+        {
+            Var v = vars.next() ;
+            Node n = binding.get(v) ;  
+            if ( n == null )
+                // Variable mentioned in the binding but not actually defined.
+                // Can occur with BindingProject
+                continue ;
+
+            // Rely on the node table cache for efficency - we will likely be
+            // repeatedly looking up the same node in different bindings.
+            NodeId id = nodeTable.getNodeIdForNode(n) ;
+            // Even put in "does not exist" for a node now known not to be in the DB.
+            b.put(v, id) ;
+        }
+        return b ;
+    }
+    
+    /** Find whether a specific graph name is in the quads table. */
+    public static QueryIterator testForGraphName(DatasetGraphTDB ds, Node graphNode, QueryIterator input,
+                                                 Predicate<Tuple<NodeId>> filter, ExecutionContext execCxt) {
+        NodeId nid = TDBInternal.getNodeId(ds, graphNode) ;
+        boolean exists = !NodeId.isDoesNotExist(nid) ;
+        if ( exists ) {
+            // Node exists but is it used in the quad position?
+            NodeTupleTable ntt = ds.getQuadTable().getNodeTupleTable() ;
+            // Don't worry about abortable - this iterator should be fast
+            // (with normal indexing - at least one G???).
+            // Either it finds a starting point, or it doesn't.  We are only 
+            // interested in the first .hasNext.
+            Iterator<Tuple<NodeId>> iter1 = ntt.find(nid, NodeId.NodeIdAny, NodeId.NodeIdAny, NodeId.NodeIdAny) ;
+            if ( filter != null )
+                iter1 = Iter.filter(iter1, filter) ;
+            exists = iter1.hasNext() ;
+        }
+
+        if ( exists )
+            return input ;
+        else {
+            input.close() ;
+            return QueryIterNullIterator.create(execCxt) ;
+        }
+    }
+
+    /** Find all the graph names in the quads table. */
+    public static QueryIterator graphNames(DatasetGraphTDB ds, Node graphNode, QueryIterator input,
+                                           Predicate<Tuple<NodeId>> filter, ExecutionContext execCxt) {
+        List<Abortable> killList = new ArrayList<>() ;
+        Iterator<Tuple<NodeId>> iter1 = ds.getQuadTable().getNodeTupleTable().find(NodeId.NodeIdAny, NodeId.NodeIdAny,
+                                                                                   NodeId.NodeIdAny, NodeId.NodeIdAny) ;
+        if ( filter != null )
+            iter1 = Iter.filter(iter1, filter) ;
+
+        Iterator<NodeId> iter2 = Iter.map(iter1, t -> t.get(0)) ;
+        // Project is cheap - don't brother wrapping iter1
+        iter2 = makeAbortable(iter2, killList) ;
+
+        Iterator<NodeId> iter3 = Iter.distinct(iter2) ;
+        iter3 = makeAbortable(iter3, killList) ;
+
+        Iterator<Node> iter4 = NodeLib.nodes(ds.getQuadTable().getNodeTupleTable().getNodeTable(), iter3) ;
+
+        final Var var = Var.alloc(graphNode) ;
+        Iterator<Binding> iterBinding = Iter.map(iter4, node -> BindingFactory.binding(var, node)) ;
+        // Not abortable.
+        return new QueryIterTDB(iterBinding, killList, input, execCxt) ;
+    }
+
+    public static Set<NodeId> convertToNodeIds(Collection<Node> nodes, DatasetGraphTDB dataset)
+    {
+        Set<NodeId> graphIds = new HashSet<>() ;
+        NodeTable nt = dataset.getQuadTable().getNodeTupleTable().getNodeTable() ;
+        for ( Node n : nodes )
+            graphIds.add(nt.getNodeIdForNode(n)) ;
+        return graphIds ;
+    }
+
+    public static Iterator<Tuple<NodeId>> unionGraph(NodeTupleTable ntt)
+    {
+        Iterator<Tuple<NodeId>> iter = ntt.find((NodeId)null, null, null, null) ;
+        iter = Iter.map(iter, quadsToAnyTriples) ;
+        //iterMatches = Iter.distinct(iterMatches) ;
+        
+        // This depends on the way indexes are choose and
+        // the indexing pattern. It assumes that the index 
+        // chosen ends in G so same triples are adjacent 
+        // in a union query.
+        /// See TupleTable.scanAllIndex that ensures this.
+        iter = Iter.distinctAdjacent(iter) ;
+        return iter ;
+    }
+    
+    private static Function<Tuple<NodeId>, Tuple<NodeId>> quadsToAnyTriples = item -> {
+        return TupleFactory.create4(NodeId.NodeIdAny, item.get(1), item.get(2), item.get(3) ) ;
+    } ;
+
+}