You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@jena.apache.org by an...@apache.org on 2022/08/08 15:55:11 UTC

[jena] branch main updated: gh-1314: service enhancer plugin for lateral joins, bulk retrieval and caching

This is an automated email from the ASF dual-hosted git repository.

andy pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/jena.git


The following commit(s) were added to refs/heads/main by this push:
     new 7cec897c56 gh-1314: service enhancer plugin for lateral joins, bulk retrieval and caching
     new 98e55dc42e Merge pull request #1315 from Aklakan/gh-1314
7cec897c56 is described below

commit 7cec897c5685aa800f0eff2a163403844a028be9
Author: Claus Stadler <Ra...@googlemail.com>
AuthorDate: Mon May 16 08:36:10 2022 +0200

    gh-1314: service enhancer plugin for lateral joins, bulk retrieval and caching
---
 jena-extras/jena-serviceenhancer/README.md         |  10 +
 jena-extras/jena-serviceenhancer/pom.xml           | 149 ++++
 .../algebra/TransformSE_EffectiveOptions.java      |  53 ++
 .../enhancer/algebra/TransformSE_JoinStrategy.java | 196 ++++++
 .../algebra/TransformSE_OptimizeSelfJoin.java      |  69 ++
 .../assembler/DatasetAssemblerServiceEnhancer.java | 106 +++
 .../enhancer/assembler/ServiceEnhancerVocab.java   |  80 +++
 .../enhancer/claimingcache/AsyncClaimingCache.java |  59 ++
 .../claimingcache/AsyncClaimingCacheImplGuava.java | 450 ++++++++++++
 .../sparql/service/enhancer/claimingcache/Ref.java | 115 ++++
 .../enhancer/claimingcache/RefDelegate.java        |  76 +++
 .../enhancer/claimingcache/RefDelegateBase.java    |  35 +
 .../service/enhancer/claimingcache/RefFuture.java  |  59 ++
 .../enhancer/claimingcache/RefFutureImpl.java      | 101 +++
 .../service/enhancer/claimingcache/RefImpl.java    | 287 ++++++++
 .../enhancer/example/ServiceCachingExamples.java   | 264 +++++++
 .../enhancer/example/ServicePluginExamples.java    |  74 ++
 .../sparql/service/enhancer/function/cacheRm.java  | 101 +++
 .../jena/sparql/service/enhancer/impl/Batch.java   |  34 +
 .../sparql/service/enhancer/impl/BatchImpl.java    |  98 +++
 .../enhancer/impl/BatchQueryRewriteResult.java     |  54 ++
 .../service/enhancer/impl/BatchQueryRewriter.java  | 236 +++++++
 .../enhancer/impl/BatchQueryRewriterBuilder.java   |  70 ++
 .../sparql/service/enhancer/impl/CacheMode.java    |  32 +
 .../enhancer/impl/CacheProviderRegistry.java       |  28 +
 .../impl/ChainingServiceExecutorBulkCache.java     |  77 +++
 ...ChainingServiceExecutorBulkServiceEnhancer.java | 127 ++++
 .../sparql/service/enhancer/impl/Estimate.java     |  68 ++
 .../sparql/service/enhancer/impl/GroupedBatch.java |  25 +
 .../service/enhancer/impl/GroupedBatchImpl.java    |  50 ++
 .../enhancer/impl/IteratorFactoryWithBuffer.java   | 293 ++++++++
 .../service/enhancer/impl/OpServiceExecutor.java   |  28 +
 .../enhancer/impl/OpServiceExecutorImpl.java       |  82 +++
 .../service/enhancer/impl/OpServiceInfo.java       | 164 +++++
 .../service/enhancer/impl/PartitionRequest.java    |  64 ++
 .../enhancer/impl/QueryIterServiceBulk.java        | 757 +++++++++++++++++++++
 .../enhancer/impl/QueryIterWrapperCache.java       | 307 +++++++++
 .../service/enhancer/impl/RequestExecutor.java     | 257 +++++++
 .../service/enhancer/impl/RequestScheduler.java    | 212 ++++++
 .../service/enhancer/impl/ServiceCacheKey.java     |  84 +++
 .../enhancer/impl/ServiceCacheKeyFactory.java      | 105 +++
 .../service/enhancer/impl/ServiceCacheValue.java   |  44 ++
 .../sparql/service/enhancer/impl/ServiceOpts.java  | 260 +++++++
 .../enhancer/impl/ServiceResponseCache.java        | 110 +++
 .../enhancer/impl/ServiceResultSizeCache.java      |  70 ++
 .../sparql/service/enhancer/impl/SliceKey.java     |  76 +++
 .../enhancer/impl/util/AutoCloseableBase.java      |  55 ++
 .../util/AutoCloseableWithLeakDetectionBase.java   |  59 ++
 .../service/enhancer/impl/util/BindingUtils.java   | 111 +++
 .../enhancer/impl/util/CollectionUtils.java        |  33 +
 .../service/enhancer/impl/util/FinallyRunAll.java  | 101 +++
 .../enhancer/impl/util/GraphUtilsExtra.java        |  47 ++
 .../service/enhancer/impl/util/IteratorUtils.java  | 195 ++++++
 .../service/enhancer/impl/util/LockUtils.java      |  90 +++
 .../service/enhancer/impl/util/NodeUtilsExtra.java |  45 ++
 .../service/enhancer/impl/util/PageUtils.java      |  71 ++
 .../enhancer/impl/util/PeekIteratorLazy.java       |  53 ++
 .../enhancer/impl/util/PropFuncArgUtils.java       |  46 ++
 .../service/enhancer/impl/util/QueryIterDefer.java |  75 ++
 .../enhancer/impl/util/QueryIterSlottedBase.java   |  86 +++
 .../service/enhancer/impl/util/RangeUtils.java     |  80 +++
 .../enhancer/impl/util/SinglePrefetchIterator.java | 148 ++++
 .../enhancer/impl/util/StackTraceUtils.java        |  58 ++
 .../enhancer/impl/util/ThrowingRunnable.java       |  23 +
 .../service/enhancer/impl/util/VarScopeUtils.java  | 161 +++++
 .../service/enhancer/impl/util/VarUtilsExtra.java  |  44 ++
 .../enhancer/init/ServiceEnhancerConstants.java    |  62 ++
 .../service/enhancer/init/ServiceEnhancerInit.java | 211 ++++++
 .../sparql/service/enhancer/pfunction/cacheLs.java | 202 ++++++
 .../service/enhancer/slice/api/ArrayOps.java       |  77 +++
 .../service/enhancer/slice/api/ChannelBase.java    |  33 +
 .../service/enhancer/slice/api/Disposable.java     |  27 +
 .../service/enhancer/slice/api/HasArrayOps.java    |  24 +
 .../slice/api/IteratorOverReadableChannel.java     | 100 +++
 .../service/enhancer/slice/api/PageHelper.java     |  58 ++
 .../enhancer/slice/api/ReadableChannel.java        |  55 ++
 .../enhancer/slice/api/ReadableChannelBase.java    |  26 +
 .../api/ReadableChannelOverSliceAccessor.java      |  54 ++
 .../slice/api/ReadableChannelWithLimit.java        |  74 ++
 .../sparql/service/enhancer/slice/api/Slice.java   | 119 ++++
 .../service/enhancer/slice/api/SliceAccessor.java  | 111 +++
 .../enhancer/slice/api/SliceMetaDataBasic.java     |  95 +++
 .../service/enhancer/slice/api/SliceWithPages.java |  37 +
 .../enhancer/slice/impl/ArrayOpsObject.java        |  88 +++
 .../service/enhancer/slice/impl/ArrayReadable.java |  43 ++
 .../service/enhancer/slice/impl/ArrayWritable.java |  48 ++
 .../sparql/service/enhancer/slice/impl/Buffer.java |  26 +
 .../service/enhancer/slice/impl/BufferLike.java    |  35 +
 .../enhancer/slice/impl/BufferOverArray.java       |  86 +++
 .../service/enhancer/slice/impl/BufferView.java    |  32 +
 .../service/enhancer/slice/impl/RangeBuffer.java   |  78 +++
 .../enhancer/slice/impl/RangeBufferImpl.java       | 162 +++++
 .../enhancer/slice/impl/ReadOverGapException.java  |  51 ++
 .../enhancer/slice/impl/SliceAccessorImpl.java     | 465 +++++++++++++
 .../service/enhancer/slice/impl/SliceBase.java     |  96 +++
 .../enhancer/slice/impl/SliceInMemoryCache.java    | 165 +++++
 .../enhancer/slice/impl/SliceMetaDataImpl.java     | 138 ++++
 .../slice/impl/SliceMetaDataWithPages.java         |  27 +
 .../slice/impl/SliceMetaDataWithPagesImpl.java     |  81 +++
 .../org.apache.jena.sys.JenaSubsystemLifecycle     |   1 +
 .../TestDatasetAssemblerServiceEnhancer.java       | 110 +++
 ...AbstractTestServiceEnhancerResultSetLimits.java | 229 +++++++
 .../impl/TestServiceEnhancerCachedVsUncached.java  | 175 +++++
 .../enhancer/impl/TestServiceEnhancerMisc.java     | 465 +++++++++++++
 ...estServiceEnhancerResultSetLimitsWithCache.java |  27 +
 ...ServiceEnhancerResultSetLimitsWithoutCache.java |  27 +
 .../src/test/resources/log4j2.properties           |  27 +
 .../src/test/resources/semweb.wikidata.sample.ttl  |  17 +
 jena-extras/pom.xml                                |   1 +
 109 files changed, 11742 insertions(+)

diff --git a/jena-extras/jena-serviceenhancer/README.md b/jena-extras/jena-serviceenhancer/README.md
new file mode 100644
index 0000000000..a48b1d2ef4
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/README.md
@@ -0,0 +1,10 @@
+# Service Enhancer Plugin
+
+This plugin extends the functionality of the SERVICE clause with:
+
+- Bulk requests
+- Correlated joins also known as lateral joins
+- Caching
+
+For details see the documentation at: https://github.com/apache/jena-site/blob/main/source/documentation/query/service\_enhancer.md
+
diff --git a/jena-extras/jena-serviceenhancer/pom.xml b/jena-extras/jena-serviceenhancer/pom.xml
new file mode 100644
index 0000000000..e6d012956b
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/pom.xml
@@ -0,0 +1,149 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>jena-serviceenhancer</artifactId>
+  <name>Apache Jena - Extras - Service Enhancer</name>
+  <description>A plugin that extends the sparql SERVICE clauses with bulk requests, lateral joins and advanced result set caching.</description>
+
+  <parent>
+    <groupId>org.apache.jena</groupId>
+    <artifactId>jena-extras</artifactId>
+    <relativePath>..</relativePath>
+    <version>4.6.0-SNAPSHOT</version>
+  </parent>
+
+  <properties>
+    <automatic.module.name>org.apache.jena.serviceplugins</automatic.module.name>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.jena</groupId>
+      <artifactId>jena-arq</artifactId>
+      <version>4.6.0-SNAPSHOT</version>
+    </dependency>
+
+    <dependency>
+      <groupId>org.xenei</groupId>
+      <artifactId>junit-contracts</artifactId> 
+      <scope>test</scope>
+      <exclusions>
+        <exclusion>
+          <artifactId>commons-cli</artifactId>
+          <groupId>commons-cli</groupId>
+        </exclusion>
+        <exclusion>
+          <artifactId>commons-logging</artifactId>
+          <groupId>commons-logging</groupId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <configuration>
+          <archive>
+            <manifestEntries>
+              <Automatic-Module-Name>${automatic.module.name}</Automatic-Module-Name>
+            </manifestEntries>
+          </archive>
+        </configuration>
+      </plugin>
+
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-source-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>attach-sources</id>
+            <!-- <phase>package</phase> package is the default -->
+            <goals>
+              <goal>jar-no-fork</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
+
+  <!-- Profile to build a "plugin jar bundle" that can be loaded with fuseki or any other jena-based app -->
+  <profiles>
+    <profile>
+      <id>bundle</id>
+      <dependencies>
+        <dependency>
+          <groupId>org.apache.jena</groupId>
+          <artifactId>jena-arq</artifactId>
+          <version>4.6.0-SNAPSHOT</version>
+          <scope>provided</scope>
+        </dependency>
+      </dependencies>
+      <build>  
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-shade-plugin</artifactId>
+            <executions>
+              <execution>
+                <phase>package</phase>
+                <goals>
+                  <goal>shade</goal>
+                </goals>
+                <configuration>
+                  <filters>
+                    <filter>
+                      <artifact>*:*</artifact>
+                      <excludes>
+                        <exclude>META-INF/*.SF</exclude>
+                        <exclude>META-INF/*.DSA</exclude>
+                        <exclude>META-INF/*.RSA</exclude>
+                      </excludes>
+                    </filter>
+                  </filters>
+                  <transformers>
+                    <transformer
+                      implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer" />
+                  </transformers>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+  </profiles>
+
+</project>
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_EffectiveOptions.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_EffectiveOptions.java
new file mode 100644
index 0000000000..8f5456db49
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_EffectiveOptions.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.algebra;
+
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.TransformCopy;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceOpts;
+
+/**
+ * Detects options on SERVICE and materializes them.
+ * In the case of self-join-checks an optimizer will be run preemptively unless
+ * the option 'optimizer:off' is present.
+ *
+ * <pre>
+ * SERVICE &lt;loop:&gt; {
+ *   SERIVCE &lt;bulk:&gt; {
+ *      SERVICE &lt;https://dbpedia.org/sparql&gt; { }
+ *   }
+ * }
+ * </pre>
+ * becomes
+ * <pre>
+ * SERVICE &lt;loop:bulk:https://dbpedia.org/sparql&gt; { }
+ * </pre>
+ */
+public class TransformSE_EffectiveOptions
+    extends TransformCopy
+{
+    @Override
+    public Op transform(OpService opService, Op subOp) {
+        OpService tmp = new OpService(opService.getService(), subOp, opService.getSilent());
+        ServiceOpts so = ServiceOpts.getEffectiveService(tmp);
+        OpService result = so.toService();
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_JoinStrategy.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_JoinStrategy.java
new file mode 100644
index 0000000000..af8d877e95
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_JoinStrategy.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.algebra;
+
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.OpVars;
+import org.apache.jena.sparql.algebra.TransformCopy;
+import org.apache.jena.sparql.algebra.op.OpConditional;
+import org.apache.jena.sparql.algebra.op.OpDisjunction;
+import org.apache.jena.sparql.algebra.op.OpFilter;
+import org.apache.jena.sparql.algebra.op.OpJoin;
+import org.apache.jena.sparql.algebra.op.OpLeftJoin;
+import org.apache.jena.sparql.algebra.op.OpSequence;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.Rename;
+import org.apache.jena.sparql.expr.ExprList;
+import org.apache.jena.sparql.graph.NodeTransform;
+import org.apache.jena.sparql.graph.NodeTransformLib;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceOpts;
+
+/**
+ * Checks for the presence of <code>SERVICE &lt;loop:&gt; { }</code>
+ * transforms those into linear joins using {@link OpSequence} / {@link OpDisjunction}
+ * and adjust variable scopes.
+ *
+ * All variables mentioned on the rhs which have the same reverse-renaming as variables
+ * visible on the lhs will be substituted with the lhs variant.
+ */
+public class TransformSE_JoinStrategy extends TransformCopy
+{
+    public TransformSE_JoinStrategy()
+    {}
+
+    @Override
+    public Op transform(OpJoin opJoin, Op left, Op right)
+    {
+        boolean canDoLinear = false;
+        Op effectiveRight = right;
+        if (right instanceof OpService) {
+            OpService op = (OpService)right;
+            ServiceOpts opts = ServiceOpts.getEffectiveService(op);
+            canDoLinear = opts.containsKey(ServiceOpts.SO_LOOP);
+            if (canDoLinear) {
+                NodeTransform joinVarRename = renameForImplicitJoinVars(left);
+                effectiveRight = NodeTransformLib.transform(joinVarRename, right);
+            }
+        }
+
+        Op result = canDoLinear
+            ? OpSequence.create(left, effectiveRight)
+            : super.transform(opJoin, left, effectiveRight)
+            ;
+
+        return result;
+    }
+
+    @Override
+    public Op transform(OpSequence opSequence, List<Op> elts) {
+        // Accumulated visible vars
+        Set<Var> visibleVarsLeft = new LinkedHashSet<>();
+
+        OpSequence result = OpSequence.create();
+        for (Op right : elts) {
+            Op newOp = right;
+            if (right instanceof OpService) {
+                OpService op = (OpService)right;
+                ServiceOpts opts = ServiceOpts.getEffectiveService(op);
+                boolean isLoop = opts.containsKey(ServiceOpts.SO_LOOP);
+                if (isLoop) {
+                    NodeTransform joinVarRename = renameForImplicitJoinVars(visibleVarsLeft);
+                    newOp = NodeTransformLib.transform(joinVarRename, right);
+                }
+            }
+
+            // Add the now visible vars as new ones
+            Set<Var> visibleVarsRight = OpVars.visibleVars(newOp);
+            visibleVarsLeft.addAll(visibleVarsRight);
+
+            result.add(newOp);
+        }
+
+        return result;
+    }
+
+    @Override
+    public Op transform(OpDisjunction opSequence, List<Op> elts) {
+        // Accumulated visible vars
+        Set<Var> visibleVarsLeft = new LinkedHashSet<>();
+
+        OpDisjunction result = OpDisjunction.create();
+        for (Op right : elts) {
+            Op newOp = right;
+            if (right instanceof OpService) {
+                OpService op = (OpService)right;
+                ServiceOpts opts = ServiceOpts.getEffectiveService(op);
+                boolean isLoop = opts.containsKey(ServiceOpts.SO_LOOP);
+                if (isLoop) {
+                    NodeTransform joinVarRename = renameForImplicitJoinVars(visibleVarsLeft);
+                    newOp = NodeTransformLib.transform(joinVarRename, right);
+                }
+            }
+
+            // Add the now visible vars as new ones
+            Set<Var> visibleVarsRight = OpVars.visibleVars(newOp);
+            visibleVarsLeft.addAll(visibleVarsRight);
+
+            result.add(newOp);
+        }
+
+        return result;
+    }
+
+    @Override
+    public Op transform(OpLeftJoin opLeftJoin, Op left, Op right)
+    {
+        boolean canDoLinear = false;
+        Op effectiveRight = right;
+        if (right instanceof OpService) {
+            OpService op = (OpService)right;
+            ServiceOpts opts = ServiceOpts.getEffectiveService(op);
+            canDoLinear = opts.containsKey(ServiceOpts.SO_LOOP);
+            if (canDoLinear) {
+                NodeTransform joinVarRename = renameForImplicitJoinVars(left);
+                effectiveRight = NodeTransformLib.transform(joinVarRename, right);
+
+                ExprList joinExprs = opLeftJoin.getExprs();
+                if (joinExprs != null) {
+                    ExprList effectiveExprs = NodeTransformLib.transform(joinVarRename, joinExprs);
+                    effectiveRight = OpFilter.filterBy(effectiveExprs, effectiveRight);
+                }
+            }
+        }
+
+        Op result = canDoLinear
+                ? new OpConditional(left, effectiveRight)
+                : super.transform(opLeftJoin, left, effectiveRight)
+                ;
+
+        return result;
+    }
+
+    /**
+     * Remove scoping of all mentioned rhs variables which implicitly join with those visible on the lhs:
+     *
+     * Join on all variables v that are visible in lhs where
+     * there exists a mentioned variable v' in rhs where reverseRename(v) == reverseRename(v')
+     */
+    public static NodeTransform renameForImplicitJoinVars(Op left) {
+        Set<Var> visibleInLhs = OpVars.visibleVars(left);
+        return renameForImplicitJoinVars(visibleInLhs);
+    }
+
+    public static NodeTransform renameForImplicitJoinVars(Set<Var> visibleInLhs) {
+        // Is it possible to have multiple _visible_ variables that map to same variable when reverse-renamed?!
+        // The code assumes no
+        Map<Var, Var> lhsPlainToScoped = visibleInLhs.stream()
+                .collect(Collectors.toMap(
+                        v -> (Var)Rename.reverseVarRename(v),
+                        v -> v));
+
+        Map<Node, Node> cache = new HashMap<>();
+        NodeTransform joinVarRename = n -> {
+            Node plain = cache.computeIfAbsent(n, Rename::reverseVarRename);
+            Var scopedLhs = lhsPlainToScoped.get(plain);
+            Node r = scopedLhs == null ? n : scopedLhs;
+            return r;
+        };
+
+        return joinVarRename;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_OptimizeSelfJoin.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_OptimizeSelfJoin.java
new file mode 100644
index 0000000000..e159c83cc4
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/algebra/TransformSE_OptimizeSelfJoin.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.algebra;
+
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.TransformCopy;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.algebra.optimize.Rewrite;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceOpts;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+
+/** It seems that preemtive optimization before execution does not work with property
+ *  functions. So this class is for now not used. */
+public class TransformSE_OptimizeSelfJoin
+    extends TransformCopy
+{
+    // Optimizer for rewriting self
+    protected Rewrite selfRewrite;
+
+    public TransformSE_OptimizeSelfJoin(Rewrite selfRewrite) {
+        super();
+        this.selfRewrite = selfRewrite;
+    }
+
+    @Override
+    public Op transform(OpService opService, Op subOp) {
+        Op result;
+        ServiceOpts so = ServiceOpts.getEffectiveService(
+                new OpService(opService.getService(), subOp, opService.getSilent()));
+
+        OpService targetService = so.getTargetService();
+        if (ServiceEnhancerConstants.SELF.equals(targetService.getService())) {
+            String optimizerOpt = so.getFirstValue(ServiceOpts.SO_OPTIMIZE, "on", "on");
+
+            if (!optimizerOpt.equalsIgnoreCase("off")) {
+                Op newSub = selfRewrite.rewrite(targetService.getSubOp());
+
+                so.removeKey(ServiceOpts.SO_OPTIMIZE);
+                // so.add(ServiceOpts.SO_OPTIMIZE, "off");
+                // so.add(ServiceOpts.SO_OPTIMIZE, "on");
+                result = new ServiceOpts(
+                        new OpService(targetService.getService(), newSub, targetService.getSilent()),
+                        so.getOptions()).toService();
+            } else {
+                result = so.toService();
+            }
+        } else {
+            result = so.toService();
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/assembler/DatasetAssemblerServiceEnhancer.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/assembler/DatasetAssemblerServiceEnhancer.java
new file mode 100644
index 0000000000..4e39f80ecf
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/assembler/DatasetAssemblerServiceEnhancer.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.assembler;
+
+import java.util.Objects;
+
+import org.apache.jena.assembler.Assembler;
+import org.apache.jena.assembler.exceptions.AssemblerException;
+import org.apache.jena.atlas.logging.Log;
+import org.apache.jena.ext.com.google.common.base.Preconditions;
+import org.apache.jena.graph.Node;
+import org.apache.jena.query.ARQ;
+import org.apache.jena.query.Dataset;
+import org.apache.jena.query.DatasetFactory;
+import org.apache.jena.rdf.model.RDFNode;
+import org.apache.jena.rdf.model.Resource;
+import org.apache.jena.sparql.core.DatasetGraph;
+import org.apache.jena.sparql.core.DatasetGraphWrapper;
+import org.apache.jena.sparql.core.assembler.DatasetAssembler;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceResponseCache;
+import org.apache.jena.sparql.service.enhancer.impl.util.GraphUtilsExtra;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerInit;
+import org.apache.jena.sparql.util.Context;
+import org.apache.jena.sparql.util.graph.GraphUtils;
+
+/**
+ * Assembler that sets up a base dataset's context with the service enhancer machinery.
+ * As changes are only applied to the context the resulting dataset is the base dataset itself.
+ */
+public class DatasetAssemblerServiceEnhancer
+    extends DatasetAssembler
+{
+    @Override
+    public DatasetGraph createDataset(Assembler a, Resource root) {
+        Resource baseDatasetRes = GraphUtils.getResourceValue(root, ServiceEnhancerVocab.baseDataset);
+        Objects.requireNonNull(baseDatasetRes, "No ja:baseDataset specified on " + root);
+        Object obj = a.open(baseDatasetRes);
+
+        Dataset result;
+        if (obj instanceof Dataset) {
+            result = (Dataset)obj;
+            Context cxt = result.getContext();
+            ServiceEnhancerInit.wrapOptimizer(cxt, ARQ.getContext());
+
+            RDFNode selfIdRes = GraphUtils.getAsRDFNode(root, ServiceEnhancerVocab.datasetId);
+
+            Node selfId = selfIdRes == null
+                    ? baseDatasetRes.asNode()
+                    : selfIdRes.asNode();
+
+            RDFNode enableMgmtRdfNode = GraphUtils.getAsRDFNode(root, ServiceEnhancerVocab.enableMgmt);
+            boolean enableMgmt = enableMgmtRdfNode == null ? false : enableMgmtRdfNode.asLiteral().getBoolean();
+
+            cxt.set(ServiceEnhancerConstants.datasetId, selfId);
+
+            // Setup a dataset local cache (replaces an existing one)
+            // if any of the appropriate properties are present
+            if (root.hasProperty(ServiceEnhancerVocab.cacheMaxEntryCount) ||
+                root.hasProperty(ServiceEnhancerVocab.cachePageSize) ||
+                root.hasProperty(ServiceEnhancerVocab.cacheMaxPageCount)) {
+                int maxEntryCount = GraphUtilsExtra.getAsInt(root, ServiceEnhancerVocab.cacheMaxEntryCount, ServiceResponseCache.DFT_MAX_ENTRY_COUNT);
+                int pageSize = GraphUtilsExtra.getAsInt(root, ServiceEnhancerVocab.cachePageSize, ServiceResponseCache.DFT_PAGE_SIZE);
+                int maxPageCount = GraphUtilsExtra.getAsInt(root, ServiceEnhancerVocab.cacheMaxPageCount, ServiceResponseCache.DFT_MAX_PAGE_COUNT);
+
+                Preconditions.checkArgument(maxEntryCount > 0, ServiceEnhancerVocab.cacheMaxEntryCount.getURI() + " requires a value greater than 0");
+                Preconditions.checkArgument(pageSize > 0, ServiceEnhancerVocab.cachePageSize.getURI() + " requires a value greater than 0");
+                Preconditions.checkArgument(maxPageCount > 0, ServiceEnhancerVocab.cacheMaxPageCount.getURI() + " requires a value greater than 0");
+
+                ServiceResponseCache cache = new ServiceResponseCache(maxEntryCount, pageSize, maxPageCount);
+                ServiceResponseCache.set(cxt, cache);
+            }
+
+            // If management is enabled then return a wrapped dataset with a copy of the context which has
+            // mgmt enabled
+            if (enableMgmt) {
+                cxt = new Context(cxt);
+                cxt.set(ServiceEnhancerConstants.enableMgmt, true);
+                result = DatasetFactory.wrap(new DatasetGraphWrapper(result.asDatasetGraph(), cxt));
+            }
+
+            Log.info(DatasetAssemblerServiceEnhancer.class, "Dataset self id set to " + selfId);
+        } else {
+            Class<?> cls = obj == null ? null : obj.getClass();
+            throw new AssemblerException(root, "Expected ja:baseDataset to be a Dataset but instead got " + Objects.toString(cls));
+        }
+
+        return result.asDatasetGraph();
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/assembler/ServiceEnhancerVocab.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/assembler/ServiceEnhancerVocab.java
new file mode 100644
index 0000000000..11444de2e6
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/assembler/ServiceEnhancerVocab.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.assembler;
+
+import org.apache.jena.assembler.JA;
+import org.apache.jena.rdf.model.Property;
+import org.apache.jena.rdf.model.Resource;
+import org.apache.jena.rdf.model.ResourceFactory;
+import org.apache.jena.riot.system.PrefixMap;
+import org.apache.jena.shared.PrefixMapping;
+
+/** Vocabulary for assembler-based configuration of the service enhancer plugin */
+public class ServiceEnhancerVocab {
+    public static final String NS = "http://jena.apache.org/service-enhancer#";
+
+    public static String getURI() { return NS; }
+
+    public static final Resource DatasetServiceEnhancer = ResourceFactory.createResource(NS + "DatasetServiceEnhancer");
+
+    /** The id (a node) to which to resolve urn:x-arq:self */
+    public static final Property datasetId              = ResourceFactory.createProperty(NS + "datasetId");
+
+    /** Enable privileged management functions; creates a wrapped dataset with a copied context */
+    public static final Property enableMgmt             = ResourceFactory.createProperty(NS + "enableMgmt");
+
+    // The term "baseDataset" is not officially in ja but it seems reasonable to eventually add it there
+    // (so far ja only defines baseModel)
+    public static final Property baseDataset            = ResourceFactory.createProperty(JA.getURI() + "baseDataset");
+
+    /** Maximum number of entries the service cache can hold */
+    public static final Property cacheMaxEntryCount = ResourceFactory.createProperty(NS + "cacheMaxEntryCount") ;
+
+    /** Number number of pages for bindings an individual cache entry can hold */
+    public static final Property cacheMaxPageCount = ResourceFactory.createProperty(NS + "cacheMaxPageCount") ;
+
+    /** Number of bindings a page can hold */
+    public static final Property cachePageSize = ResourceFactory.createProperty(NS + "cachePageSize") ;
+
+    /** Adds the following prefix declarations to the given map thereby overrides existing ones:
+     * <table style="border: 1px solid;">
+     *   <tr><th>Prefix</th><th>IRI</th></tr>
+     *   <tr><td>ja</td><td>{@value JA#uri}</td></tr>
+     *   <tr><td>se</td><td>{@value #NS}</td></tr>
+     * </table>
+     */
+    public PrefixMap addPrefixes(PrefixMap pm) {
+        pm.add("ja", JA.getURI());
+        pm.add("se", ServiceEnhancerVocab.getURI());
+        return pm;
+    }
+
+    /** Adds the following prefix declarations to the given map thereby overrides existing ones:
+     * <table style="border: 1px solid;">
+     *   <tr><th>Prefix</th><th>IRI</th></tr>
+     *   <tr><td>ja</td><td>{@value JA#uri}</td></tr>
+     *   <tr><td>se</td><td>{@value #NS}</td></tr>
+     * </table>
+     */
+    public PrefixMapping addPrefixes(PrefixMapping pm) {
+        pm.setNsPrefix("ja", JA.getURI());
+        pm.setNsPrefix("se", ServiceEnhancerVocab.getURI());
+        return pm;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/AsyncClaimingCache.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/AsyncClaimingCache.java
new file mode 100644
index 0000000000..d67dd8085a
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/AsyncClaimingCache.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+import java.util.Collection;
+import java.util.function.Predicate;
+
+import org.apache.jena.sparql.service.enhancer.slice.api.Disposable;
+
+/**
+ * Interface for an async cache that allows "claiming" entries.
+ * Claiming means making explicit references to entries.
+ * As long as an entry is claimed it will not be evicted.
+ * Furthermore, eviction guards can be placed that prevent eviction even of
+ * non-claimed entries.
+ *
+ * @param <K> The key type
+ * @param <V> The value type
+ */
+public interface AsyncClaimingCache<K, V> {
+
+    /**
+     * Claim a reference to the key's entry.
+     */
+    RefFuture<V> claim(K key);
+
+    /**
+     * Claim a key for which loading has already been triggered or which is already loaded.
+     * Calling this method should not trigger loading.
+     */
+    RefFuture<V> claimIfPresent(K key);
+
+    /**
+     * Protect eviction of certain keys as long as the guard is not disposed.
+     * Disposable may immediately evict all no longer guarded items */
+    Disposable addEvictionGuard(Predicate<? super K> predicate);
+
+    /** Return a snapshot of all present keys */
+    Collection<K> getPresentKeys();
+
+    void invalidateAll();
+    void invalidateAll(Iterable<? extends K> keys);
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/AsyncClaimingCacheImplGuava.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/AsyncClaimingCacheImplGuava.java
new file mode 100644
index 0000000000..80e3c7c2b1
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/AsyncClaimingCacheImplGuava.java
@@ -0,0 +1,450 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.BiConsumer;
+import java.util.function.Function;
+import java.util.function.Predicate;
+
+import org.apache.jena.ext.com.google.common.cache.CacheBuilder;
+import org.apache.jena.ext.com.google.common.cache.CacheLoader;
+import org.apache.jena.ext.com.google.common.cache.LoadingCache;
+import org.apache.jena.ext.com.google.common.cache.RemovalCause;
+import org.apache.jena.ext.com.google.common.cache.RemovalListener;
+import org.apache.jena.ext.com.google.common.cache.RemovalNotification;
+import org.apache.jena.sparql.service.enhancer.impl.util.LockUtils;
+import org.apache.jena.sparql.service.enhancer.slice.api.Disposable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Implementation of async claiming cache.
+ * Claimed entries will never be evicted. Conversely, unclaimed items remain are added to a cache such that timely re-claiming
+ * will be fast.
+ *
+ * Use cases:
+ * <ul>
+ *   <li>Resource sharing: Ensure that the same resource is handed to all clients requesting one by key.</li>
+ *   <li>Resource pooling: Claimed resources will never be closed, but unclaimed resources (e.g. something backed by an input stream)
+ *   may remain on standby for a while.</li>
+ * </ul>
+ *
+ * Another way to view this class is as a mix of a map with weak values and a cache.
+ *
+ * @param <K> The key type
+ * @param <V> The value type
+ */
+public class AsyncClaimingCacheImplGuava<K, V>
+    implements AsyncClaimingCache<K, V>
+{
+    private static final Logger logger = LoggerFactory.getLogger(AsyncClaimingCacheImplGuava.class);
+
+    // level1: claimed items - those items will never be evicted as long as the references are not closed
+    protected Map<K, RefFuture<V>> level1;
+
+    // level2: the caffine cache - items in this cache are not claimed are subject to eviction according to configuration
+    protected LoadingCache<K, CompletableFuture<V>> level2;
+
+    // level3: items evicted from level2 but caught be eviction protection
+    protected Map<K, V> level3;
+
+    // Runs atomically in the claim action after the entry exists in level1
+    protected BiConsumer<K, RefFuture<V>> claimListener;
+
+    // Runs atomically in the unclaim action before the entry is removed from level1
+    protected BiConsumer<K, RefFuture<V>> unclaimListener;
+
+    // A lock that prevents invalidation while entries are being loaded
+    protected ReentrantReadWriteLock invalidationLock = new ReentrantReadWriteLock();
+
+    // A collection of deterministic predicates for 'catching' entries evicted by level2
+    // Caught entries are added to level3
+    protected final Collection<Predicate<? super K>> evictionGuards;
+
+    // Runs atomically when an item is evicted or invalidated and will thus no longer be present in any levels
+    // See also https://github.com/ben-manes/caffeine/wiki/Removal
+    protected RemovalListener<K, V> atomicRemovalListener;
+
+    protected Set<K> suppressedRemovalEvents;
+
+    public AsyncClaimingCacheImplGuava(
+            Map<K, RefFuture<V>> level1,
+            LoadingCache<K, CompletableFuture<V>> level2,
+            Map<K, V> level3,
+            Collection<Predicate<? super K>> evictionGuards,
+            BiConsumer<K, RefFuture<V>> claimListener,
+            BiConsumer<K, RefFuture<V>> unclaimListener,
+            RemovalListener<K, V> atomicRemovalListener,
+            Set<K> suppressedRemovalEvents
+            ) {
+        super();
+        this.level1 = level1;
+        this.level2 = level2;
+        this.level3 = level3;
+        this.evictionGuards = evictionGuards;
+        this.claimListener = claimListener;
+        this.unclaimListener = unclaimListener;
+        this.atomicRemovalListener = atomicRemovalListener;
+        this.suppressedRemovalEvents = suppressedRemovalEvents;
+    }
+
+    protected Map<K, Latch> keyToSynchronizer = new ConcurrentHashMap<>();
+
+    /**
+     * Registers a predicate that 'caches' entries about to be evicted
+     * When closing the registration then keys that have not moved back into the ache
+     * by reference will be immediately evicted.
+     */
+    @Override
+    public Disposable addEvictionGuard(Predicate<? super K> predicate) {
+        // Note: LinkedList.listIterator() becomes invalidated after any modification
+        // In principle a LinkedList would be the more appropriate data structure
+        synchronized (evictionGuards) {
+            evictionGuards.add(predicate);
+        }
+
+        return () -> {
+            synchronized (evictionGuards) {
+                evictionGuards.remove(predicate);
+                runLevel3Eviction();
+            }
+        };
+    }
+
+    /** Called while being synchronized on the evictionGuards */
+    protected void runLevel3Eviction() {
+        Iterator<Entry<K, V>> it = level3.entrySet().iterator();
+        while (it.hasNext()) {
+            Entry<K, V> e = it.next();
+            K k = e.getKey();
+            V v = e.getValue();
+
+            boolean isGuarded = evictionGuards.stream().anyMatch(p -> p.test(k));
+            if (!isGuarded) {
+                atomicRemovalListener.onRemoval(RemovalNotification.create(k, v, RemovalCause.COLLECTED));
+                it.remove();
+            }
+        }
+    }
+
+    @Override
+    public RefFuture<V> claim(K key) {
+        RefFuture<V> result;
+
+        // We rely on ConcurrentHashMap.compute operating atomically
+        Latch synchronizer = keyToSynchronizer.compute(key, (k, before) -> before == null ? new Latch() : before.inc());
+
+        // /guarded_entry/ marker; referenced in comment below
+
+        synchronized (synchronizer) {
+            keyToSynchronizer.compute(key, (k, before) -> before.dec());
+            boolean[] isFreshSecondaryRef = { false };
+
+            // Guard against concurrent invalidations
+            @SuppressWarnings("resource")
+            RefFuture<V> secondaryRef = LockUtils.runWithLock(invalidationLock.readLock(), () -> {
+                return level1.computeIfAbsent(key, k -> {
+                    // Wrap the loaded reference such that closing the fully loaded reference adds it to level 2
+
+                    logger.trace("Claiming item [" + key + "] from level2");
+                    CompletableFuture<V> future;
+                    try {
+                        future = level2.get(key);
+                    } catch (ExecutionException e) {
+                        throw new RuntimeException("Should not happen", e);
+                    }
+
+                    // This triggers removal
+                    suppressedRemovalEvents.add(key);
+                    level2.asMap().remove(key);
+                    suppressedRemovalEvents.remove(key);
+
+                    @SuppressWarnings("unchecked")
+                    RefFuture<V>[] holder = new RefFuture[] {null};
+
+                    Ref<CompletableFuture<V>> freshSecondaryRef =
+                        RefImpl.create(future, synchronizer, () -> {
+
+                            // This is the unclaim action
+
+                            RefFuture<V> v = holder[0];
+
+                            if (unclaimListener != null) {
+                                unclaimListener.accept(key, v);
+                            }
+
+                            RefFutureImpl.cancelFutureOrCloseValue(future, null);
+                            level1.remove(key);
+                            logger.trace("Item [" + key + "] was unclaimed. Transferring to level2.");
+                            level2.put(key, future);
+
+                            // If there are no waiting threads we can remove the latch
+                            keyToSynchronizer.compute(key, (kk, before) -> before.get() == 0 ? null : before);
+                            // syncRef.close();
+                        });
+                    isFreshSecondaryRef[0] = true;
+
+                    RefFuture<V> r = RefFutureImpl.wrap(freshSecondaryRef);
+                    holder[0] = r;
+
+                    return r;
+                });
+            });
+
+            result = secondaryRef.acquire();
+
+            if (claimListener != null) {
+                claimListener.accept(key, result);
+            }
+
+            if (isFreshSecondaryRef[0]) {
+                secondaryRef.close();
+            }
+        }
+
+        return result;
+    }
+
+    public static class Builder<K, V>
+    {
+        protected CacheBuilder<Object, Object> cacheBuilder;
+        protected Function<K, V> cacheLoader;
+        protected BiConsumer<K, RefFuture<V>> claimListener;
+        protected BiConsumer<K, RefFuture<V>> unclaimListener;
+        protected RemovalListener<K, V> userAtomicRemovalListener;
+
+        Builder<K, V> setCacheBuilder(CacheBuilder<Object, Object> caffeine) {
+            this.cacheBuilder = caffeine;
+            return this;
+        }
+
+        public Builder<K, V> setClaimListener(BiConsumer<K, RefFuture<V>> claimListener) {
+            this.claimListener = claimListener;
+            return this;
+        }
+
+        public Builder<K, V> setUnclaimListener(BiConsumer<K, RefFuture<V>> unclaimListener) {
+            this.unclaimListener = unclaimListener;
+            return this;
+        }
+
+        public Builder<K, V> setCacheLoader(Function<K, V> cacheLoader) {
+            this.cacheLoader = cacheLoader;
+            return this;
+        }
+
+        public Builder<K, V> setAtomicRemovalListener(RemovalListener<K, V> userAtomicRemovalListener) {
+            this.userAtomicRemovalListener = userAtomicRemovalListener;
+            return this;
+        }
+
+        @SuppressWarnings("unchecked")
+        public AsyncClaimingCacheImplGuava<K, V> build() {
+
+            Map<K, RefFuture<V>> level1 = new ConcurrentHashMap<>();
+            Map<K, V> level3 = new ConcurrentHashMap<>();
+            Collection<Predicate<? super K>> evictionGuards = new ArrayList<>();
+
+            RemovalListener<K, V> level3AwareAtomicRemovalListener = n -> {
+                K k = n.getKey();
+                V v = n.getValue();
+                RemovalCause c = n.getCause();
+
+                // Check for actual removal - key no longer present in level1
+                if (!level1.containsKey(k)) {
+
+                    boolean isGuarded = false;
+                    synchronized (evictionGuards) {
+                        // Check for an eviction guard
+                        for (Predicate<? super K> evictionGuard : evictionGuards) {
+                            isGuarded = evictionGuard.test(k);
+                            if (isGuarded) {
+                                logger.debug("Protecting from eviction: " + k + " - " + level3.size() + " items protected");
+                                level3.put(k, v);
+                                break;
+                            }
+                        }
+                    }
+
+                    if (!isGuarded) {
+                        if (userAtomicRemovalListener != null) {
+                            userAtomicRemovalListener.onRemoval(RemovalNotification.create(k, v, c));
+                        }
+                    }
+                }
+            };
+
+            Set<K> suppressedRemovalEvents = Collections.newSetFromMap(new ConcurrentHashMap<K, Boolean>());
+
+            cacheBuilder.removalListener(n -> {
+                K kk = (K)n.getKey();
+
+                if (!suppressedRemovalEvents.contains(kk)) {
+                    CompletableFuture<V> cfv = (CompletableFuture<V>)n.getValue();
+
+                    V vv = null;
+                    if (cfv.isDone()) {
+                        try {
+                            vv = cfv.get();
+                        } catch (InterruptedException | ExecutionException e) {
+                            throw new RuntimeException("Should not happen", e);
+                        }
+                    }
+
+                    RemovalCause c = n.getCause();
+
+                    level3AwareAtomicRemovalListener.onRemoval(RemovalNotification.create(kk, vv, c));
+                }
+            });
+
+
+            // Cache loader that checks for existing items in level3
+            Function<K, V> level3AwareCacheLoader = k -> {
+                Object[] tmp = new Object[] { null };
+                // Atomically get and remove an existing key from level3
+                level3.compute(k, (kk, v) -> {
+                    tmp[0] = v;
+                    return null;
+                });
+
+                V r = (V)tmp[0];
+                if (r == null) {
+                    r = cacheLoader.apply(k);
+                }
+                return r;
+            };
+
+            LoadingCache<K, CompletableFuture<V>> level2 = cacheBuilder.build(
+                    CacheLoader.from(k -> CompletableFuture.completedFuture(level3AwareCacheLoader.apply(k))));
+
+            AsyncClaimingCacheImplGuava<K, V> result = new AsyncClaimingCacheImplGuava<>(level1, level2, level3, evictionGuards, claimListener, unclaimListener, level3AwareAtomicRemovalListener, suppressedRemovalEvents);
+            return result;
+        }
+    }
+
+    public static <K, V> Builder<K, V> newBuilder(CacheBuilder<Object, Object> caffeine) {
+        Builder<K, V> result = new Builder<>();
+        result.setCacheBuilder(caffeine);
+        return result;
+    }
+
+    public static void main(String[] args) throws InterruptedException {
+        // TODO This should become a test case that tests the eviction guard feature
+
+        AsyncClaimingCacheImplGuava<String, String> cache = AsyncClaimingCacheImplGuava.<String, String>newBuilder(
+                CacheBuilder.newBuilder().maximumSize(10).expireAfterWrite(1, TimeUnit.SECONDS))
+            .setCacheLoader(key -> "Loaded " + key)
+            .setAtomicRemovalListener(n -> System.out.println("Evicted " + n.getKey()))
+            .setClaimListener((k, v) -> System.out.println("Claimed: " + k))
+            .setUnclaimListener((k, v) -> System.out.println("Unclaimed: " + k))
+            .build();
+
+        try (RefFuture<String> ref = cache.claim("test")) {
+            try (Disposable disposable = cache.addEvictionGuard(k -> k.contains("test"))) {
+                System.out.println(ref.await());
+                ref.close();
+                TimeUnit.SECONDS.sleep(5);
+
+                try (RefFuture<String> reclaim = cache.claim("test")) {
+                    disposable.close();
+                    // reclaim.close();
+                }
+            }
+        }
+
+        TimeUnit.SECONDS.sleep(5);
+        System.out.println("done");
+    }
+
+    /**
+     * Claim a key only if it is already present.
+     *
+     * This implementation is a best effort approach:
+     * There is a very slim chance that just between testing a key for presence and claiming its entry
+     * an eviction occurs - causing claiming of a non-present key and thus triggering a load action.
+     */
+    @Override
+    public RefFuture<V> claimIfPresent(K key) {
+        RefFuture<V> result = level1.containsKey(key) || level2.asMap().containsKey(key) ? claim(key) : null;
+        return result;
+    }
+
+    @Override
+    public void invalidateAll() {
+        List<K> keys = new ArrayList<>(level2.asMap().keySet());
+        invalidateAll(keys);
+    }
+
+    @Override
+    public void invalidateAll(Iterable<? extends K> keys) {
+        LockUtils.runWithLock(invalidationLock.writeLock(), () -> {
+            Map<K, CompletableFuture<V>> map = level2.asMap();
+            for (K key : keys) {
+                map.compute(key, (k, vFuture) -> {
+                    V v = null;
+                    if (vFuture.isDone()) {
+                        try {
+                            v = vFuture.get();
+                        } catch (Exception e) {
+                            logger.warn("Detected cache entry that failed to load during invalidation", e);
+                        }
+                    }
+
+                    atomicRemovalListener.onRemoval(RemovalNotification.create(k, v, RemovalCause.EXPLICIT));
+                    return null;
+                });
+            }
+        });
+    }
+
+    @Override
+    public Collection<K> getPresentKeys() {
+        return new LinkedHashSet<>(level2.asMap().keySet());
+    }
+
+    /** Essentially a 'NonAtomicInteger' */
+    private static class Latch {
+        // A flag to indicate that removal of the corresponding entry from keyToSynchronizer needs to be prevented
+        // because another thread already started reusing this latch
+        volatile int numWaitingThreads = 1;
+
+        Latch inc() { ++numWaitingThreads; return this; }
+        Latch dec() { --numWaitingThreads; return this; }
+        int get() { return numWaitingThreads; }
+
+        @Override
+        public String toString() {
+            return "Latch " + System.identityHashCode(this) + " has "+ numWaitingThreads + " threads waiting";
+        }
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/Ref.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/Ref.java
new file mode 100644
index 0000000000..6aaa31bcc5
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/Ref.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+import java.util.function.Function;
+
+/**
+ * Interface for nested references.
+ * References allow for sharing an entity across several clients and
+ * deferring the release of that entity's resources immediately to the point
+ * in time when the last reference is released. The main use case is for memory paging
+ * such that if several threads request the same page only one physical buffer is handed out
+ * from a cache - conversely, as long as a page is still in used by a client, cache eviction
+ * and synchronization may be suppressed.
+ *
+ * Terminology:
+ * <ul>
+ *   <li>A reference is <b>closed</b> when {@link #close()} was called; <b>open</b> otherwise.</li>
+ *   <li>A reference is <b>alive</b> when it is <b>open</b> and/or any of the child refs acquired from it are still <b>alive</b>.</li>
+ *   <li>A reference is <b>released</b> (dead) as soon it is no longer alive. This immediately triggers its release action.</li>
+ * <ul>
+ *
+ * Implementation note: At present the alive-check and release action are assumed to run synchronously. As such there
+ * is no transition phase ('dying' or 'releasing'). This could be added in the future.</li>
+ *
+ * @param <T> The value type stored in this reference.
+ */
+public interface Ref<T>
+    extends AutoCloseable
+{
+    /**
+     * Get the root reference
+     */
+    Ref<T> getRootRef();
+
+    /**
+     * Get the referent only iff this ref instance has not yet been closed.
+     * This method fails for closed alive refs.
+     * A closed reference is alive if it has unclosed child references.
+     *
+     * For most use cases the referent should be accessed using this method.
+     *
+     * @return The referent
+     */
+    T get();
+
+    /**
+     * Return the object on which reference acquisition, release and the close action
+     * are synchronized on.
+     */
+    Object getSynchronizer();
+
+    /**
+     * Acquire a new reference with a given comment object
+     * Acquiration fails if isAlive() returns false
+     */
+    Ref<T> acquire(Object purpose);
+
+    default Ref<T> acquire() {
+        return acquire(null);
+    }
+
+    /**
+     * A reference may itself be closed, but references to it may keep it alive
+     *
+     * @return true iff either this reference is not closed or there exists any acquired reference.
+     */
+    boolean isAlive();
+
+    /**
+     * Check whether this reference is closed
+     */
+    boolean isClosed();
+
+    // Overrides the throws declaration of Autoclose
+    @Override
+    void close();
+
+    /** Optional operation. References may expose where they were acquired. */
+    StackTraceElement[] getAcquisitionStackTrace();
+
+    /** Optional operation. References may expose where they were closed was called. */
+    StackTraceElement[] getCloseStackTrace();
+
+    /** Optional operation. References may expose where they were close was triggered upon release. */
+    StackTraceElement[] getCloseTriggerStackTrace();
+
+    /**
+     * Return a Ref with a new referent obtained by mapping this ref's value with mapper.
+     * Closing the returned Ref closes the original one. Synchronizes on the same object as this ref.
+     */
+    @SuppressWarnings("resource") // Result must be closed by caller
+    default <X> Ref<X> acquireMapped(Function<? super T, ? extends X> mapper) {
+        Ref<T> base = acquire();
+        X mapped = mapper.apply(base.get());
+        Ref<X> result = RefImpl.create(mapped, base.getSynchronizer(), base);
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefDelegate.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefDelegate.java
new file mode 100644
index 0000000000..32a4b9fe4f
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefDelegate.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+/** Interface with default methods that delegate Ref's methods */
+public interface RefDelegate<T, R extends Ref<T>>
+    extends Ref<T>
+{
+    R getDelegate();
+
+    @Override
+    default Ref<T> getRootRef() {
+        return getDelegate().getRootRef();
+    }
+
+    @Override
+    default T get() {
+        return getDelegate().get();
+    }
+
+    @Override
+    default Ref<T> acquire(Object purpose) {
+        return getDelegate().acquire(purpose);
+    }
+
+    @Override
+    default boolean isAlive() {
+        return getDelegate().isAlive();
+    }
+
+    @Override
+    default boolean isClosed() {
+        return getDelegate().isClosed();
+    }
+
+    @Override
+    default void close() {
+        getDelegate().close();
+    }
+
+    @Override
+    default Object getSynchronizer() {
+        return getDelegate().getSynchronizer();
+    }
+
+    @Override
+    default StackTraceElement[] getAcquisitionStackTrace() {
+        return getDelegate().getAcquisitionStackTrace();
+    }
+
+    @Override
+    default StackTraceElement[] getCloseStackTrace() {
+        return getDelegate().getCloseStackTrace();
+    }
+
+    @Override
+    default StackTraceElement[] getCloseTriggerStackTrace() {
+        return getDelegate().getCloseTriggerStackTrace();
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefDelegateBase.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefDelegateBase.java
new file mode 100644
index 0000000000..c3588b3525
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefDelegateBase.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+public class RefDelegateBase<T, R extends Ref<T>>
+    implements RefDelegate<T, R>
+{
+    protected R delegate;
+
+    public RefDelegateBase(R delegate) {
+        super();
+        this.delegate = delegate;
+    }
+
+    @Override
+    public R getDelegate() {
+        return delegate;
+    }
+}
\ No newline at end of file
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefFuture.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefFuture.java
new file mode 100644
index 0000000000..fab788b5e9
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefFuture.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.function.Function;
+
+/** Essentially a typedef for {@code Ref<CompletableFuture<T>>} */
+public interface RefFuture<T>
+    extends RefDelegate<CompletableFuture<T>, Ref<CompletableFuture<T>>>
+{
+    default T await() {
+        CompletableFuture<T> cf = get();
+        T result;
+        try {
+            result = cf.get();
+        } catch (InterruptedException | ExecutionException e) {
+            throw new RuntimeException(e);
+        }
+        return result;
+    }
+
+    @Override
+    RefFuture<T> acquire();
+
+    /** Create a sub-reference to a transformed value of the CompletableFuture */
+    @SuppressWarnings("resource") // Result must be closed by caller
+    default <U> RefFuture<U> acquireTransformed(Function<? super T, ? extends U> transform) {
+        RefFuture<T> acquired = this.acquire();
+        Object synchronizer = acquired.getSynchronizer();
+
+        CompletableFuture<U> future = acquired.get().thenApply(transform);
+        RefFuture<U> result = RefFutureImpl.wrap(RefImpl.create(future, synchronizer, acquired::close));
+        return result;
+    }
+
+    default <U> RefFuture<U> acquireTransformedAndCloseThis(Function<? super T, ? extends U> transform) {
+        RefFuture<U> result = acquireTransformed(transform);
+        this.close();
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefFutureImpl.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefFutureImpl.java
new file mode 100644
index 0000000000..b8122c95e5
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefFutureImpl.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+import java.util.concurrent.CancellationException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class RefFutureImpl<T>
+    extends RefDelegateBase<CompletableFuture<T>, Ref<CompletableFuture<T>>>
+    implements RefFuture<T>
+{
+    private static final Logger logger = LoggerFactory.getLogger(RefFutureImpl.class);
+
+    public RefFutureImpl(Ref<CompletableFuture<T>> delegate) {
+        super(delegate);
+    }
+
+    @Override
+    public RefFuture<T> acquire() {
+        return wrap(getDelegate().acquire());
+    }
+
+    /**
+     * A simple wrapping of an instance of {@code Ref<CompletableFuture<T>>}
+     * as a more handy instance of {@code RefFuture<T>}.
+     * All methods of the returned RefFuture delegate to the original Ref.
+     *
+     * Note, that {@code RefFuture<T>} is a sub-interface of
+     * {@code Ref<CompletableFuture<T>>}.
+     */
+    public static <T> RefFuture<T> wrap(Ref<CompletableFuture<T>> delegate) {
+        return new RefFutureImpl<>(delegate);
+    }
+
+    /** Wrap an existing ref with completed future */
+    public static <T> RefFuture<T> fromRef(Ref<T> ref) {
+        RefFuture<T> result = RefFutureImpl.fromFuture(CompletableFuture.completedFuture(ref), ref.getSynchronizer());
+        return result;
+    }
+
+    /** Create a ref that upon close cancels the future or closes the ref when it is available s*/
+    public static <T> RefFuture<T> fromFuture(CompletableFuture<Ref<T>> future, Object synchronizer) {
+      return wrap(RefImpl.create(future.thenApply(Ref::get), synchronizer, () -> cancelFutureOrCloseRef(future), null));
+    }
+
+    public static void cancelFutureOrCloseRef(CompletableFuture<? extends Ref<?>> future) {
+        cancelFutureOrCloseValue(future, Ref::close);
+    }
+
+    /** Registers a whenComplete action that closes the value if loaded. Then immediately attempts to cancel the future. */
+    public static <T> void cancelFutureOrCloseValue(CompletableFuture<T> future, Consumer<? super T> valueCloseAction) {
+
+        AtomicBoolean closeActionRun = new AtomicBoolean(false);
+
+        BiConsumer<T, Throwable> closeAction = (value, t) -> {
+            // Beware of short circuit evaluation of getAndSet!
+            if (!closeActionRun.getAndSet(true) && value != null && valueCloseAction != null) {
+                valueCloseAction.accept(value);
+            }
+
+            if (t != null) {
+                logger.warn("Exception encountered during close", t);
+            }
+        };
+
+        CompletableFuture<T> derived = future.whenComplete(closeAction);
+
+        try {
+            if (!derived.isDone()) {
+                future.cancel(true);
+                // Wait for exception (possibly due to cancel) or normal completion
+                derived.get();
+            }
+        } catch (CancellationException | InterruptedException | ExecutionException e) {
+            logger.warn("Exception raised during close", e);
+        }
+    }
+}
\ No newline at end of file
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefImpl.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefImpl.java
new file mode 100644
index 0000000000..50dba90a77
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/claimingcache/RefImpl.java
@@ -0,0 +1,287 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.claimingcache;
+
+import java.util.Map;
+import java.util.WeakHashMap;
+import java.util.function.Consumer;
+
+import org.apache.jena.sparql.service.enhancer.impl.util.StackTraceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Implementation of a {@link Ref}.
+ */
+public class RefImpl<T>
+    implements Ref<T>
+{
+    private static final Logger logger = LoggerFactory.getLogger(RefImpl.class);
+
+    protected boolean traceAcquisitions = true;
+
+    protected T value;
+
+    /**
+     * The release action is run once immediately when the isAlive() state changes to false.
+     * The release action cannot 'revive' a reference as the reference is already 'dead'.
+     *
+     * The release action differs depending on how a reference was created:
+     * On the root reference, the releaseAction releases the wrapped resource
+     * On a child reference, the releaseAction releases itself (the child) from the parent one.
+     *
+     */
+    protected AutoCloseable releaseAction;
+
+    // TODO Would it be worthwhile to add a pre-release action that is run immediately before
+    //      a ref would become dead?
+    // protected AutoCloseable preReleaseAction;
+
+    /**
+     * Object on which to synchronize on before any change of state of this reference.
+     * This allows for e.g. synchronizing on a {@code Map<K, Reference<V>}, such that
+     * closing a reference removes the map entry before it can be accessed and conversely,
+     * synchronizing on the map prevents the reference from becoming released.
+     */
+    protected Object synchronizer;
+
+    protected Object comment; // An attribute which can be used for debugging reference chains
+    protected RefImpl<T> parent;
+    protected volatile boolean isClosed = false;
+
+    protected StackTraceElement[] acquisitionStackTrace;
+    protected StackTraceElement[] closeStackTrace;
+    protected StackTraceElement[] closeTriggerStackTrace;
+
+    // A child ref is active as long as its close() method has not been called
+    // The WeakHashMap nature may 'hide' entries whose key is about to be GC'd.
+    // This can lead to the situation that childRefs.isEmpty() may true even
+    // if there are active child refs (whose close method has not yet been called)
+
+    // TODO The map is only for debugging / reporting - remove?
+    protected Map<Ref<T>, Object> childRefs = new WeakHashMap<>();
+    protected volatile int activeChildRefs = 0;
+
+    public RefImpl(
+            RefImpl<T> parent,
+            T value,
+            Object synchronizer,
+            AutoCloseable releaseAction,
+            Object comment) {
+        super();
+        this.parent = parent;
+        this.value = value;
+        this.releaseAction = releaseAction;
+        this.synchronizer = synchronizer == null ? this : synchronizer;
+        this.comment = comment;
+
+        if (traceAcquisitions) {
+            acquisitionStackTrace = StackTraceUtils.getStackTraceIfEnabled();
+        }
+    }
+
+    /**
+     * Note: Actually this method should be replaced with an approach using Java 9 Cleaner
+     * however I couldn't get the cleaner to run.
+     */
+    @SuppressWarnings("deprecation")
+    @Override
+    protected void finalize() throws Throwable {
+        try {
+            if (!isClosed) {
+                synchronized (synchronizer) {
+                    if (!isClosed) {
+                        String msg = "Ref released by GC rather than user logic - indicates resource leak."
+                                + "Acquired at " + StackTraceUtils.toString(acquisitionStackTrace);
+                        logger.warn(msg);
+
+                        close();
+                    }
+                }
+            }
+        } finally {
+            super.finalize();
+        }
+    }
+
+    public Object getComment() {
+        return comment;
+    }
+
+    @Override
+    public Object getSynchronizer() {
+        return synchronizer;
+    }
+
+    @Override
+    public T get() {
+        if (isClosed) {
+            String msg = "Cannot get value of a closed reference:\n"
+                    + "Acquired at " + StackTraceUtils.toString(acquisitionStackTrace) + "\n"
+                    + "Closed at " + StackTraceUtils.toString(closeStackTrace) + "\n"
+                    + "Close Triggered at " + StackTraceUtils.toString(closeTriggerStackTrace);
+            logger.warn(msg);
+
+            throw new RuntimeException("Cannot get value of a closed reference");
+        }
+
+        return value;
+    }
+
+    @Override
+    public Ref<T> acquire(Object comment) {
+        synchronized (synchronizer) {
+            if (!isAlive()) {
+                String msg = "Cannot acquire from a reference with status 'isAlive=false'"
+                        + "\nClose triggered at: " + StackTraceUtils.toString(closeTriggerStackTrace);
+                throw new RuntimeException(msg);
+            }
+
+            // A bit of ugliness to allow the reference to release itself
+            @SuppressWarnings("rawtypes")
+            Ref[] tmp = new Ref[1];
+            tmp[0] = new RefImpl<>(this, value, synchronizer, () -> release(tmp[0]), comment);
+
+            @SuppressWarnings("unchecked")
+            Ref<T> result = tmp[0];
+            childRefs.put(result, comment);
+            ++activeChildRefs;
+            //activeChildRefs.incrementAndGet();
+            return result;
+        }
+    }
+
+    protected void release(Object childRef) {
+        boolean isContained = childRefs.containsKey(childRef);
+        if (isContained) {
+            childRefs.remove(childRef);
+            --activeChildRefs;
+        } else {
+            throw new RuntimeException("An unknown reference requested to release itself. Should not happen");
+        }
+
+        checkRelease();
+    }
+
+    @Override
+    public boolean isAlive() {
+        boolean result;
+        result = !isClosed || activeChildRefs != 0;
+        return result;
+    }
+
+    @Override
+    public void close() {
+        synchronized (synchronizer) {
+            if (isClosed) {
+                String msg = "Reference was already closed." +
+                        "\nReleased at: " + StackTraceUtils.toString(closeStackTrace) +
+                        "\nAcquired at: " + StackTraceUtils.toString(acquisitionStackTrace);
+
+                logger.debug(msg);
+                // Alternatively throw new RuntimeException(msg)?
+            } else {
+                if (traceAcquisitions) {
+                    closeStackTrace = StackTraceUtils.getStackTraceIfEnabled();
+                }
+
+                isClosed = true;
+
+                checkRelease();
+            }
+        }
+    }
+
+    protected void checkRelease() {
+
+        if (!isAlive()) {
+            if (traceAcquisitions) {
+                closeTriggerStackTrace = StackTraceUtils.getStackTraceIfEnabled();
+            }
+
+            if (releaseAction != null) {
+                try {
+                    releaseAction.close();
+                } catch (Exception e) {
+                    throw new RuntimeException(e);
+                }
+            }
+        }
+    }
+
+    public static <T extends AutoCloseable> Ref<T> fromCloseable(T value, Object synchronizer) {
+        return create(value, synchronizer, value);
+    }
+
+    /** Create method where the close action is created from a provided lambda that accepts the value */
+    public static <T> Ref<T> create2(T value, Object synchronizer, Consumer<? super T> closer) {
+        return create(value, synchronizer, () -> closer.accept(value), null);
+    }
+
+    public static <T> Ref<T> create(T value, Object synchronizer, AutoCloseable releaseAction) {
+        return create(value, synchronizer, releaseAction, null);
+    }
+
+    public static <T> Ref<T> create(T value, Object synchronizer, AutoCloseable releaseAction, Object comment) {
+        return new RefImpl<>(null, value, synchronizer, releaseAction, comment);
+    }
+
+    public static <T> Ref<T> createClosed() {
+        RefImpl<T> result = new RefImpl<>(null, null, null, null, null);
+        result.isClosed = true;
+        return result;
+    }
+
+    @Override
+    public boolean isClosed() {
+        return isClosed;
+    }
+
+    @SuppressWarnings("resource")
+    @Override
+    public Ref<T> getRootRef() {
+        RefImpl<T> result = this;
+        while (result.parent != null) {
+            result = result.parent;
+        }
+        return result;
+    }
+
+    @Override
+    public StackTraceElement[] getAcquisitionStackTrace() {
+        return acquisitionStackTrace;
+    }
+
+    @Override
+    public StackTraceElement[] getCloseStackTrace() {
+        return closeStackTrace;
+    }
+
+    @Override
+    public StackTraceElement[] getCloseTriggerStackTrace() {
+        return closeTriggerStackTrace;
+    }
+
+    @Override
+    public String toString() {
+        String result = String.format("Ref %s, active(self, #children)=(%b, %d), aquired at %s",
+                comment, !isClosed, activeChildRefs, StackTraceUtils.toString(acquisitionStackTrace));
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/example/ServiceCachingExamples.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/example/ServiceCachingExamples.java
new file mode 100644
index 0000000000..6ad6bb57e4
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/example/ServiceCachingExamples.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.example;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.jena.atlas.logging.LogCtl;
+import org.apache.jena.ext.com.google.common.base.Stopwatch;
+import org.apache.jena.query.QueryExecution;
+import org.apache.jena.query.QueryExecutionFactory;
+import org.apache.jena.query.QueryFactory;
+import org.apache.jena.query.ResultSetFormatter;
+import org.apache.jena.rdf.model.Model;
+import org.apache.jena.rdf.model.ModelFactory;
+import org.apache.jena.riot.ResultSetMgr;
+import org.apache.jena.riot.resultset.ResultSetLang;
+import org.apache.jena.sparql.exec.http.QueryExecutionHTTP;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerInit;
+
+/** Examples for setting up and using SERVICE caching */
+public class ServiceCachingExamples {
+
+    static { LogCtl.setLogging(); }
+
+    public static void main(String[] args) {
+        basicCachingExample();
+
+        moreExamples();
+    }
+
+    public static void basicCachingExample() {
+        Model model = ModelFactory.createDefaultModel();
+
+        try (QueryExecution qe = QueryExecutionFactory.create(String.join("\n"
+                , "SELECT * {"
+                + "  SERVICE <loop:cache:bulk+3:http://dbpedia.org/sparql> {"
+                + "    SELECT DISTINCT ?p { ?s a <http://dbpedia.org/ontology/MusicalArtist> ; ?p ?o }"
+                + "  }"
+                + "}"),
+                model)) {
+
+            ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+            benchmark(() -> ResultSetFormatter.consume(qe.execSelect()));
+        }
+
+        // The query below makes use of the cache and performs additional filtering
+        // It's execution time should be significantly lower then the prior query
+        try (QueryExecution qe = QueryExecutionFactory.create(String.join("\n"
+                , "SELECT * {"
+                + "  SERVICE <loop:cache:http://dbpedia.org/sparql> {"
+                + "    SELECT DISTINCT ?p { ?s a <http://dbpedia.org/ontology/MusicalArtist> ; ?p ?o }"
+                + "  }"
+                + "  FILTER(CONTAINS(STR(?p), 'tim'))"
+                + "}"),
+                model)) {
+            ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+            benchmark(() -> ResultSetFormatter.consume(qe.execSelect()));
+        }
+    }
+
+    // TODO needs cleanup
+
+
+    public static void testDbpedia() {
+        String queryStr = String.join("\n",
+            "PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>",
+            "PREFIX dbo: <http://dbpedia.org/ontology/>",
+            "SELECT * WHERE {",
+            "  SERVICE <cache:https://dbpedia.org/sparql> {",
+            "    SELECT * {",
+            "      ?s a dbo:MusicalArtist",
+            "    } ORDER BY ?s LIMIT 10 OFFSET 20",
+            "  }",
+            "  SERVICE <cache:loop:bulk+10:https://dbpedia.org/sparql> {",
+            "    ?s rdfs:label ?l",
+            "  }",
+            "}");
+
+        // Model model = ModelFactory.createDefaultModel();
+        QueryFactory.create(queryStr);
+        // TS_ResultSetLimits.testWithCleanCaches(model, queryStr, 10000);
+    }
+
+    public static void moreExamples() {
+        Model model;
+
+//    	ServiceExecutorRegistryBulk.get().prepend(new ChainingServiceExecutorBulkSpecial());
+        //ServiceExecutorRegistryBulk.get().chain(new ChainingServiceExecutorBulkCache());
+
+//    	ServiceResponseCache serviceCache = new ServiceResponseCache();
+//    	ARQ.getContext().set(ServicePlugin.serviceCache, serviceCache);
+
+
+        try (QueryExecution qe = QueryExecutionHTTP.newBuilder()
+            .endpoint("https://dbpedia.org/sparql")
+            .query("CONSTRUCT { ?s ?p ?o } WHERE { { SELECT DISTINCT ?s { ?s a <http://dbpedia.org/ontology/Person> } LIMIT 10 } ?s ?p ?o }")
+            .build()) {
+            model = qe.execConstruct();
+        }
+
+        System.out.println("Backend request spo all");
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT DISTINCT ?s { ?s a <http://dbpedia.org/ontology/Person> } LIMIT 3 } SERVICE <loop:cache:bulk+3> { { SELECT * { ?s ?p ?o } } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        System.out.println("Backend request spo 1");
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT DISTINCT ?s { ?s a <http://dbpedia.org/ontology/Person> } LIMIT 3 } SERVICE <loop:cache:bulk+3> { { SELECT * { ?s ?p ?o } LIMIT 1 } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        System.out.println("Backend request:");
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT DISTINCT ?s { ?s a <http://dbpedia.org/ontology/Person> } LIMIT 3 } SERVICE <loop:cache:bulk+3> { { SELECT * { ?s ?p ?o . FILTER(?p = <http://www.w3.org/2000/01/rdf-schema#label>) } ORDER BY ?p LIMIT 1 } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        System.out.println("Serving from cache:");
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT DISTINCT ?s { ?s a <http://dbpedia.org/ontology/Person> } LIMIT 3 } SERVICE <loop:cache:bulk+3> { { SELECT * { ?s ?p ?o . FILTER(?p = <http://www.w3.org/2000/01/rdf-schema#label>) } ORDER BY ?p LIMIT 1 } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        System.out.println("Fetching one more binding per item:");
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT DISTINCT ?s { ?s a <http://dbpedia.org/ontology/Person> } LIMIT 3 } SERVICE <loop:cache:bulk+3> { { SELECT * { ?s ?p ?o . FILTER(?p = <http://www.w3.org/2000/01/rdf-schema#label>) } ORDER BY ?p LIMIT 2 } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        if (true) {
+            // Test for nested loop
+            // - Special emphasis of this test: Injected idxVars (references to lhs input bindings) must not clash
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { BIND('x' AS ?x) SERVICE <loop:> { BIND(?x AS ?y) SERVICE <loop:> { BIND(?y AS ?z) } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        // System.out.println(Algebra.compile(QueryFactory.create("SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <https://dbpedia.org/sparql> { { SELECT ?s ?p { ?s ?p ?o . FILTER(?p = <http://www.w3.org/2000/01/rdf-schema#label>) } ORDER BY ?p } } }")));
+        // System.out.println(Algebra.compile(QueryFactory.create("SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <https://dbpedia.org/sparql> { BIND(?s AS ?x) } }")));
+
+//        if (false) {
+//            try (QueryExecution qe = QueryExecutionFactory.create(
+//                    //"SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <http://dbpedia.org/sparql> { { SELECT * { { BIND(?s AS ?x) } UNION { BIND(?s AS ?y) } UNION { ?s <urn:dummy> ?s } } } } }",
+//                    "SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <https://dbpedia.org/sparql> { { SELECT ?x ?y { { BIND(?s AS ?x) } UNION { BIND(?s AS ?y) } } } } }",
+//                    //"SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <https://dbpedia.org/sparql> { { BIND(?s AS ?x) } UNION { BIND(?s AS ?y) } } }",
+//                    model)) {
+//                 qe.getContext().set(InitServiceEnhancer.serviceBulkMaxBindingCount, 10);
+//                qe.getContext().set(InitServiceEnhancer.serviceBulkRequestMaxByteSize, 1500);
+//                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+//            }
+//        }
+
+        //		"SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <https://dbpedia.org/sparql> { { SELECT ?s (COUNT(*) AS ?c) { ?s ?p ?o } GROUP BY ?s } } }",
+
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT ?s { ?s a <http://dbpedia.org/ontology/Person> } OFFSET 1 LIMIT 1 } SERVICE <cache:bulk+20:https://dbpedia.org/sparql> { { SELECT ?s ?p ?o { ?s ?p ?o . FILTER(?p = <http://www.w3.org/2000/01/rdf-schema#label>) } ORDER BY ?p } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                qe.getContext().set(ServiceEnhancerConstants.serviceBulkMaxBindingCount, 10);
+                // qe.getContext().set(ServiceEnhancerConstants.serviceBulkRequestMaxByteSize, 1500);
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { { SELECT ?s { ?s a <http://dbpedia.org/ontology/Person> } OFFSET 0 LIMIT 3 } SERVICE <https://dbpedia.org/sparql> { { SELECT ?s ?p ?o { ?s ?p ?o . FILTER(?p = <http://www.w3.org/2000/01/rdf-schema#label>) } ORDER BY ?p } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                qe.getContext().set(ServiceEnhancerConstants.serviceBulkMaxBindingCount, 10);
+                // qe.getContext().set(ServiceEnhancerConstants.serviceBulkRequestMaxByteSize, 1500);
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { ?s a <http://dbpedia.org/ontology/Person> SERVICE <https://dbpedia.org/sparql> { { SELECT * { ?s ?p ?o } LIMIT 3 OFFSET 5 } } }",
+                    model)) {
+                // qe.getContext().set(ServicePlugin.serviceBulkRequestMaxItemCount, 1);
+                // qe.getContext().set(ServiceEnhancerConstants.serviceBulkRequestMaxByteSize, 1500);
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { SERVICE <https://dbpedia.org/sparql> { { SELECT DISTINCT ?p { ?s a <http://dbpedia.org/ontology/Company> ; ?p ?o } ORDER BY ?p } } }",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                qe.getContext().set(ServiceEnhancerConstants.serviceBulkMaxBindingCount, 10);
+                // qe.getContext().set(ServiceEnhancerConstants.serviceBulkRequestMaxByteSize, 1500);
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+
+        if (true) {
+            try (QueryExecution qe = QueryExecutionFactory.create(
+                    "SELECT * { SERVICE <https://dbpedia.org/sparql> { { SELECT DISTINCT ?p { ?s a <http://dbpedia.org/ontology/Company> ; ?p ?o } ORDER BY ?p } } FILTER (CONTAINS(STR(?p), 'rdf'))}",
+                    model)) {
+                ServiceEnhancerInit.wrapOptimizer(qe.getContext());
+                qe.getContext().set(ServiceEnhancerConstants.serviceBulkMaxBindingCount, 10);
+                // qe.getContext().set(ServiceEnhancerConstants.serviceBulkRequestMaxByteSize, 1500);
+                ResultSetMgr.write(System.out, qe.execSelect(), ResultSetLang.RS_JSON);
+            }
+        }
+    }
+
+    /** Utility method to measure the given callable's execution time and display a message on stdout */
+    public static void benchmark(Callable<?> callable) {
+        Stopwatch sw = Stopwatch.createStarted();
+        Object result;
+        try {
+            result = callable.call();
+        } catch (Exception e) {
+            System.out.println("Failed in " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
+            throw new RuntimeException(e);
+        }
+        System.out.println("Obtained value [" + result + "] in " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/example/ServicePluginExamples.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/example/ServicePluginExamples.java
new file mode 100644
index 0000000000..552639dbfe
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/example/ServicePluginExamples.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.example;
+
+import org.apache.jena.query.ARQ;
+import org.apache.jena.query.Dataset;
+import org.apache.jena.query.DatasetFactory;
+import org.apache.jena.sparql.exec.QueryExec;
+import org.apache.jena.sparql.exec.QueryExecDatasetBuilder;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerInit;
+import org.apache.jena.sparql.util.Context;
+import org.apache.jena.sparql.util.QueryExecUtils;
+
+public class ServicePluginExamples {
+
+    public static void main(String[] args) {
+        customLinearJoin(DatasetFactory.empty());
+    }
+
+    public static void customLinearJoin(Dataset dataset) {
+        Context cxt = ARQ.getContext().copy();
+        ServiceEnhancerInit.wrapOptimizer(cxt);
+
+        String queryStr = "SELECT * {\n"
+                + "  BIND(<urn:foo> AS ?s)\n"
+                + "  SERVICE <loop:urn:arq:self> {\n"
+                + "      { BIND(?s AS ?x) } UNION { BIND(?s AS ?y) }\n"
+                + "  }\n"
+                + "}";
+        execQueryAndShowResult(dataset, queryStr, cxt);
+
+        /*
+         * -------------------------------------
+         * | s         | x         | y         |
+         * =====================================
+         * | <urn:foo> | <urn:foo> |           |
+         * | <urn:foo> |           | <urn:foo> |
+         * -------------------------------------
+         */
+    }
+
+    public static void execQueryAndShowResult(
+            Dataset dataset,
+            String queryStr,
+            Context cxt) {
+        try {
+            try (QueryExec exec = QueryExecDatasetBuilder.create()
+                    .dataset(dataset.asDatasetGraph())
+                    .query(queryStr)
+                    .context(cxt)
+                    .build()) {
+                QueryExecUtils.exec(exec);
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/function/cacheRm.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/function/cacheRm.java
new file mode 100644
index 0000000000..6d0030e4bf
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/function/cacheRm.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.function;
+
+import java.math.BigInteger;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+import org.apache.jena.query.QueryExecException;
+import org.apache.jena.sparql.expr.ExprList;
+import org.apache.jena.sparql.expr.NodeValue;
+import org.apache.jena.sparql.function.FunctionBase;
+import org.apache.jena.sparql.function.FunctionEnv;
+import org.apache.jena.sparql.service.enhancer.assembler.ServiceEnhancerVocab;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceCacheKey;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceResponseCache;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.util.Context;
+
+/**
+ * Invalidate the given keys (or all if none are given). Returns the number of invalidated cache entries.
+ * This function only works if {@link ServiceEnhancerConstants#enableMgmt} is set to true in the context.
+ */
+public class cacheRm
+    extends FunctionBase
+{
+    public static final String DEFAULT_IRI = ServiceEnhancerVocab.NS + "cacheRm";
+
+    /** This method must be implemented but it is only called from the base implementation of
+     * {@link #exec(List, FunctionEnv)} which is overridden here too */
+    @Override
+    public NodeValue exec(List<NodeValue> args) {
+        throw new IllegalStateException("Should never be called");
+    }
+
+    @Override
+    protected NodeValue exec(List<NodeValue> args, FunctionEnv env) {
+        Context cxt = env.getContext();
+
+        if (!cxt.isTrue(ServiceEnhancerConstants.enableMgmt)) {
+            throw new QueryExecException("Service enhancer management functions have not been enabled for this dataset");
+        }
+
+        ServiceResponseCache cache = ServiceResponseCache.get(cxt);
+
+        long resultCount = 0;
+
+        if (cache != null) {
+            Map<Long, ServiceCacheKey> idToKey = cache.getIdToKey();
+
+            Collection<ServiceCacheKey> keys;
+
+            if (!args.isEmpty()) {
+                keys = args.stream()
+                        .filter(Objects::nonNull)
+                        .filter(NodeValue::isInteger)
+                        .map(NodeValue::getInteger)
+                        .map(BigInteger::longValue)
+                        .map(idToKey::get)
+                        .filter(Objects::nonNull)
+                        .collect(Collectors.toSet());
+            } else {
+                keys = cache.getCache().getPresentKeys();
+            }
+
+            resultCount = keys.size();
+            cache.getCache().invalidateAll(keys);
+
+        } else {
+            // If there is no cache always return 0
+            // Alternatively: throw new ExprEvalException("");
+        }
+
+        return NodeValue.makeInteger(resultCount);
+    }
+
+
+    @Override
+    public void checkBuild(String uri, ExprList args) {
+        // Nothing to do
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/Batch.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/Batch.java
new file mode 100644
index 0000000000..6935ad63d7
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/Batch.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.NavigableMap;
+
+/**
+ * A mapping of unique comparable keys of type K to items of type T.
+ * Any add operation must be performed with a key that is strictly greater than
+ * any other key already in the batch. Keys need not be consecutive.
+ */
+interface Batch<K extends Comparable<K>, T> {
+    NavigableMap<K, T> getItems();
+    void put(K index, T item);
+    K getNextValidIndex();
+    boolean isEmpty();
+    int size();
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchImpl.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchImpl.java
new file mode 100644
index 0000000000..f59a698904
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchImpl.java
@@ -0,0 +1,98 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Collections;
+import java.util.NavigableMap;
+import java.util.TreeMap;
+
+import org.apache.jena.ext.com.google.common.collect.DiscreteDomain;
+
+/**
+ * Batch implementation backed by a navigable map.
+ */
+public class BatchImpl<K extends Comparable<K>, T>
+    implements Batch<K, T>
+{
+    protected K firstKey;
+    protected DiscreteDomain<K> discreteDomain;
+    protected NavigableMap<K, T> items;
+
+    // Note: The contained lists should be considered immutable
+    protected NavigableMap<K, T> unmodifiableItems;
+
+    public BatchImpl(K firstKey, DiscreteDomain<K> discreteDomain) {
+        super();
+        this.firstKey = firstKey;
+        this.discreteDomain = discreteDomain;
+        this.items = new TreeMap<>();
+        this.unmodifiableItems = Collections.unmodifiableNavigableMap(items);
+    }
+
+    public static <T> Batch<Integer, T> forInteger() {
+        return new BatchImpl<>(0, DiscreteDomain.integers());
+    }
+
+    public static <T> Batch<Long, T> forLong() {
+        return new BatchImpl<>(0l, DiscreteDomain.longs());
+    }
+
+    /**
+     * Items must be added with ascending indexes.
+     * Adding an item with a lower index than already seen raises an IllegalArgumentException
+     */
+    @Override public void put(K index, T item) {
+        K nextValidIndex = getNextValidIndex();
+        int cmp = index.compareTo(nextValidIndex);
+        if (cmp < 0) {
+            throw new IllegalArgumentException("Index is lower than an existing one");
+        }
+
+        items.put(index, item);
+    }
+
+    @Override
+    public K getNextValidIndex() {
+        K result = items.isEmpty()
+                ? firstKey
+                : discreteDomain.next(items.lastKey());
+        return result;
+    }
+
+    /** Returns an immutable view of the items in the batch */
+    @Override
+    public NavigableMap<K, T> getItems() {
+        return unmodifiableItems;
+    }
+
+    @Override
+    public boolean isEmpty() {
+        return items.isEmpty();
+    }
+
+    @Override
+    public int size() {
+        return items.size();
+    }
+
+    @Override
+    public String toString() {
+        return "Batch [size=" + size() + ", itemRanges=" + items + "]";
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriteResult.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriteResult.java
new file mode 100644
index 0000000000..4b6938f70d
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriteResult.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Map;
+
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.core.Var;
+
+/**
+ * Rewrite result of a bulk service request. The 'renames' mapping
+ * may turn publicized variables back to internal/anonymous ones.
+ * For instance, running <pre>{@code SERVICE <foo> { { SELECT COUNT(*) {...} } }}</pre>
+ * will allocate an internal variable for count.
+ */
+public class BatchQueryRewriteResult {
+    protected Op op;
+    protected Map<Var, Var> renames;
+
+    public BatchQueryRewriteResult(Op op, Map<Var, Var> renames) { //  Set<Var> joinVars
+        super();
+        this.op = op;
+        this.renames = renames;
+    }
+
+    public Op getOp() {
+        return op;
+    }
+
+    public Map<Var, Var> getRenames() {
+        return renames;
+    }
+
+    @Override
+    public String toString() {
+        return "BatchQueryRewriteResult [op=" + op + ", renames=" + renames + "]";
+    }
+}
\ No newline at end of file
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriter.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriter.java
new file mode 100644
index 0000000000..35b8bcbcf6
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriter.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+
+import org.apache.jena.atlas.logging.Log;
+import org.apache.jena.query.Query;
+import org.apache.jena.query.SortCondition;
+import org.apache.jena.sparql.algebra.Algebra;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.OpAsQuery;
+import org.apache.jena.sparql.algebra.op.OpExtend;
+import org.apache.jena.sparql.algebra.op.OpOrder;
+import org.apache.jena.sparql.algebra.op.OpSlice;
+import org.apache.jena.sparql.algebra.op.OpTable;
+import org.apache.jena.sparql.algebra.op.OpUnion;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.main.QC;
+import org.apache.jena.sparql.expr.ExprVar;
+import org.apache.jena.sparql.expr.NodeValue;
+import org.apache.jena.sparql.service.enhancer.impl.util.BindingUtils;
+
+/**
+ * Rewriter for instantiating a query such that a list of initial bindings are injected.
+ * In general, there are several rewriting strategies for that purpose and their applicability
+ * depends on the operations used in of the query:
+ *
+ * <ul>
+ * <li>Union/Substitution strategy: The is perhaps the most reliable (and also most verbose) strategy:
+ * It creates a union query where for every input binding a union member is obtained by
+ * substituting the original query with it</li>
+ * <li>Join strategy: The input bindings are collected into a VALUES block and placed on the left hand size
+ * of a join with an adjusted version of the original query - not yet supported</li>
+ * <li>Filter strategy: Input bindings are turned into a disjunctive filter expression - not yet supported</li>
+ * </ul>
+ */
+public class BatchQueryRewriter {
+    protected OpServiceInfo serviceInfo;
+    protected Var idxVar;
+
+    /** Whether it can be assumed that union yields the bindings of the members in the
+     * order those members are specified.
+     * If false then ORDER BY ASC(?__idx__) is appended to the created query */
+    protected boolean sequentialUnion;
+
+    /**
+     * Whether bindings returned by union members with ORDER BY remain sorted
+     * If false then sort conditions are added to the outer query
+     */
+    protected boolean orderRetainingUnion;
+
+
+    /** Whether to omit the end marker */
+    protected boolean omitEndMarker;
+
+    /** Constant to mark end of a batch (could also be dynamically set to one higher then the idx in a batch) */
+    static int REMOTE_END_MARKER = 1000000000;
+    static NodeValue NV_REMOTE_END_MARKER = NodeValue.makeInteger(REMOTE_END_MARKER);
+
+    /** True if either local or remote end marker */
+//    public static boolean isLocalOrRemoteEndMarker(int id) {
+//        return isRemoteEndMarker(id) || isLocalEndMarker(id);
+//    }
+
+    public static boolean isRemoteEndMarker(int id) {
+        return id == REMOTE_END_MARKER;
+    }
+
+    public static boolean isRemoteEndMarker(Integer id) {
+        return Objects.equals(id, REMOTE_END_MARKER);
+    }
+
+
+
+    // Local end marker is not returned by the remote service
+//    static int LOCAL_END_MARKER = 1000000001;
+//    static NodeValue NV_LOCAL_END_MARKER = NodeValue.makeInteger(LOCAL_END_MARKER);
+//
+//    public static boolean isLocalEndMarker(int id) {
+//        return id == LOCAL_END_MARKER;
+//    }
+//
+//    public static boolean isLocalEndMarker(Integer id) {
+//        return Objects.equals(id, LOCAL_END_MARKER);
+//    }
+
+
+    public BatchQueryRewriter(OpServiceInfo serviceInfo, Var idxVar,
+            boolean sequentialUnion, boolean orderRetainingUnion,
+            boolean omitEndMarker) {
+        super();
+        this.serviceInfo = serviceInfo;
+        this.idxVar = idxVar;
+        this.sequentialUnion = sequentialUnion;
+        this.orderRetainingUnion = orderRetainingUnion;
+        this.omitEndMarker = omitEndMarker;
+    }
+
+    /** The index var used by this rewriter */
+    public Var getIdxVar() {
+        return idxVar;
+    }
+
+    public static Set<Var> seenVars(Collection<PartitionRequest<Binding>> batchRequest) {
+        Set<Var> result = new LinkedHashSet<>();
+        batchRequest.forEach(br -> BindingUtils.addAll(result, br.getPartitionKey()));
+        return result;
+    }
+
+    public BatchQueryRewriteResult rewrite(Batch<Integer, PartitionRequest<Binding>> batchRequest) {
+
+        Op newOp = null;
+        List<Entry<Integer, PartitionRequest<Binding>>> es = new ArrayList<>(batchRequest.getItems().entrySet());
+        Collections.reverse(es);
+
+        Query normQuery = serviceInfo.getNormedQuery();
+        Op normOp = serviceInfo.getNormedQueryOp();
+
+        // Prepare the sort conditions
+        List<SortCondition> sortConditions = new ArrayList<>();
+        List<SortCondition> localSortConditions =
+                Optional.ofNullable(normQuery.getOrderBy()).orElse(Collections.emptyList());
+
+        boolean noOrderNeeded =
+                orderRetainingUnion || sequentialUnion && localSortConditions.isEmpty();
+
+        boolean orderNeeded = !noOrderNeeded;
+
+            // No ordering by index needed
+        if (orderNeeded) {
+            SortCondition sc = new SortCondition(new ExprVar(idxVar), Query.ORDER_ASCENDING);
+            sortConditions.add(sc);
+        }
+
+        sortConditions.addAll(localSortConditions);
+
+        for (Entry<Integer, PartitionRequest<Binding>> e : es) { // batchRequest.getItems().entrySet()) {
+
+            PartitionRequest<Binding> req = e.getValue(); // batchRequest.get(i);
+            long idx = e.getKey();
+            Binding scopedBinding = req.getPartitionKey();
+
+            Set<Var> scopedBindingVars = BindingUtils.varsMentioned(scopedBinding);
+
+            Map<Var, Var> varMapScopedToNormed = ServiceCacheKeyFactory
+                    .createJoinVarMapScopedToNormed(serviceInfo, scopedBindingVars);
+
+            // Binding plainBinding = BindingUtils.renameKeys(scopedBinding, serviceInfo.getMentionedSubOpVarsScopedToPlain());
+            Binding normedBinding = BindingUtils.renameKeys(scopedBinding, varMapScopedToNormed);
+
+            // Op op = serviceInfo.getNormedQueryOp();
+            Op op = normOp;
+
+            // Note: QC.substitute does not remove variables being substituted from projections
+            //   This may cause unbound variables to be projected
+
+            // If the union is sequential and order retaining we can retain the order on the members
+            // otherwise, we can remove any ordering on the member
+            if ((sequentialUnion && orderRetainingUnion) || localSortConditions.isEmpty()) {
+                // If the union is sequential and order retaining we can retain the order on the members
+                // otherwise, we can remove any ordering on the member
+            } else {
+                // Member order may not be retained - remove it from the query
+
+                // TODO This should be done once OpServiceInfo
+                Query tmp = normQuery.cloneQuery();
+                if (tmp.hasOrderBy()) {
+                    tmp.getOrderBy().clear();
+                }
+
+                op = Algebra.compile(tmp);
+                // TODO Something is odd with ordering here
+                // Add the sort conditions
+                // op = new OpOrder(op, localSortConditions);
+            }
+
+            op = QC.substitute(op, normedBinding);
+            op = OpExtend.create(op, idxVar, NodeValue.makeInteger(idx));
+
+            long o = req.hasOffset() ? req.getOffset() : Query.NOLIMIT;
+            long l = req.hasLimit() ? req.getLimit() : Query.NOLIMIT;
+
+            if (o != Query.NOLIMIT || l != Query.NOLIMIT) {
+                op = new OpSlice(op, o, l);
+            }
+
+            newOp = newOp == null ? op : OpUnion.create(op, newOp);
+        }
+
+        if (!omitEndMarker) {
+            Op endMarker = OpExtend.create(OpTable.unit(), idxVar, NV_REMOTE_END_MARKER);
+            newOp = newOp == null ? endMarker : OpUnion.create(newOp, endMarker);
+        }
+
+        if (orderNeeded) {
+            newOp = new OpOrder(newOp, sortConditions);
+        }
+
+        Query q = OpAsQuery.asQuery(newOp);
+
+        Log.info(BatchQueryRewriter.class, "Rewritten bulk query: " + q);
+
+        // Add a rename for idxVar so that QueryIter.map does not omit it
+        Map<Var, Var> renames = new HashMap<>(serviceInfo.getVisibleSubOpVarsNormedToScoped());
+        renames.put(idxVar, idxVar);
+        return new BatchQueryRewriteResult(newOp, renames);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriterBuilder.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriterBuilder.java
new file mode 100644
index 0000000000..8fbad9b0c5
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/BatchQueryRewriterBuilder.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import org.apache.jena.sparql.core.Var;
+
+public class BatchQueryRewriterBuilder {
+    protected OpServiceInfo serviceInfo;
+    protected Var idxVar;
+    protected boolean sequentialUnion;
+    protected boolean orderRetainingUnion;
+    protected boolean omitEndMarker;
+
+    public BatchQueryRewriterBuilder(OpServiceInfo serviceInfo, Var idxVar) {
+        super();
+        this.serviceInfo = serviceInfo;
+        this.idxVar = idxVar;
+    }
+
+    public boolean isSequentialUnion() {
+        return sequentialUnion;
+    }
+
+    public BatchQueryRewriterBuilder setSequentialUnion(boolean linearUnion) {
+        this.sequentialUnion = linearUnion;
+        return this;
+    }
+
+    public boolean isOrderRetainingUnion() {
+        return orderRetainingUnion;
+    }
+
+    public BatchQueryRewriterBuilder setOrderRetainingUnion(boolean orderRetainingUnion) {
+        this.orderRetainingUnion = orderRetainingUnion;
+        return this;
+    }
+
+    public boolean isOmitEndMarker() {
+        return omitEndMarker;
+    }
+
+    public BatchQueryRewriterBuilder setOmitEndMarker(boolean omitEndMarker) {
+        this.omitEndMarker = omitEndMarker;
+        return this;
+    }
+
+    public static BatchQueryRewriterBuilder from(OpServiceInfo serviceInfo, Var idxVar) {
+        return new BatchQueryRewriterBuilder(serviceInfo, idxVar);
+    }
+
+    public BatchQueryRewriter build() {
+        return new BatchQueryRewriter(serviceInfo, idxVar, sequentialUnion, orderRetainingUnion, omitEndMarker);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/CacheMode.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/CacheMode.java
new file mode 100644
index 0000000000..3e428356b8
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/CacheMode.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+public enum CacheMode {
+    OFF,
+    DEFAULT, // Read if cached, write if not yet cached
+    // REFRESH, // Refresh caches; never read from cache but overwrite affected ranges in the cache
+    CLEAR; // Like refresh but first clear all ranges of the cache entry
+
+    /** Returns the argument unless it is null in which case the result is OFF */
+    public static CacheMode effectiveMode(CacheMode cacheMode) {
+        CacheMode result = cacheMode == null ? CacheMode.OFF : cacheMode;
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/CacheProviderRegistry.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/CacheProviderRegistry.java
new file mode 100644
index 0000000000..55c625baf4
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/CacheProviderRegistry.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import org.apache.jena.sparql.SystemARQ;
+import org.apache.jena.sparql.util.Symbol;
+
+// TODO Allow custom cache providers via a registry
+public class CacheProviderRegistry {
+    public static Symbol cacheProvider = SystemARQ.allocSymbol("cacheProvider");
+
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ChainingServiceExecutorBulkCache.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ChainingServiceExecutorBulkCache.java
new file mode 100644
index 0000000000..7c385cb678
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ChainingServiceExecutorBulkCache.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Optional;
+
+import org.apache.jena.atlas.iterator.IteratorCloseable;
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.service.bulk.ChainingServiceExecutorBulk;
+import org.apache.jena.sparql.service.bulk.ServiceExecutorBulk;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.util.Context;
+
+/** Do not register directly - use {@link ChainingServiceExecutorBulkServiceEnhancer} which gives more control over
+ * when to use this in a service executor chain */
+public class ChainingServiceExecutorBulkCache
+    implements ChainingServiceExecutorBulk {
+
+    public static final int DEFAULT_BULK_SIZE = 30;
+    public static final int MAX_BULK_SIZE = 100;
+    public static final int DEFAULT_MAX_BYTE_SIZE = 5000;
+
+    protected int bulkSize;
+    protected CacheMode cacheMode;
+
+    public ChainingServiceExecutorBulkCache(int bulkSize, CacheMode cacheMode) {
+        super();
+        this.cacheMode = cacheMode;
+        this.bulkSize = bulkSize;
+    }
+
+    @Override
+    public QueryIterator createExecution(OpService original, QueryIterator input, ExecutionContext execCxt,
+            ServiceExecutorBulk chain) {
+
+        Context cxt = execCxt.getContext();
+        // int bulkSize = cxt.getInt(InitServiceEnhancer.serviceBulkMaxBindingCount, DEFAULT_BULK_SIZE);
+        ServiceResponseCache serviceCache = CacheMode.OFF.equals(cacheMode)
+                ? null
+                : ServiceResponseCache.get(cxt);
+
+        OpServiceInfo serviceInfo = new OpServiceInfo(original);
+
+        ServiceResultSizeCache resultSizeCache = Optional.ofNullable(cxt.<ServiceResultSizeCache>
+                get(ServiceEnhancerConstants.serviceResultSizeCache))
+                .orElseGet(ServiceResultSizeCache::new);
+
+        OpServiceExecutorImpl opExecutor = new OpServiceExecutorImpl(serviceInfo.getOpService(), execCxt, chain);
+
+        RequestScheduler<Node, Binding> scheduler = new RequestScheduler<>(serviceInfo::getSubstServiceNode, bulkSize);
+        IteratorCloseable<GroupedBatch<Node, Long, Binding>> inputBatchIterator = scheduler.group(input);
+
+        RequestExecutor exec = new RequestExecutor(opExecutor, serviceInfo, resultSizeCache, serviceCache, cacheMode, inputBatchIterator);
+
+        return exec;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ChainingServiceExecutorBulkServiceEnhancer.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ChainingServiceExecutorBulkServiceEnhancer.java
new file mode 100644
index 0000000000..935b1c772d
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ChainingServiceExecutorBulkServiceEnhancer.java
@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.List;
+import java.util.Map.Entry;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.graph.NodeFactory;
+import org.apache.jena.query.QueryExecException;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.service.bulk.ChainingServiceExecutorBulk;
+import org.apache.jena.sparql.service.bulk.ServiceExecutorBulk;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.util.Context;
+
+public class ChainingServiceExecutorBulkServiceEnhancer
+    implements ChainingServiceExecutorBulk
+{
+    @Override
+    public QueryIterator createExecution(OpService opService, QueryIterator input, ExecutionContext execCxt,
+            ServiceExecutorBulk chain) {
+
+        QueryIterator result;
+        Node node = opService.getService();
+        List<Entry<String, String>> opts = ServiceOpts.parseAsOptions(node);
+
+        boolean enableBulk = false;
+
+        int bulkSize = 1;
+
+        CacheMode cacheMode = null;
+        Context cxt = execCxt.getContext();
+        int n = opts.size();
+        int i = 0;
+        outer: for (; i < n; ++i) {
+            Entry<String, String> opt = opts.get(i);
+            String key = opt.getKey();
+            String val = opt.getValue();
+
+            switch (key) {
+            case ServiceOpts.SO_LOOP:
+                // Loop (lateral join) is handled on the algebra level
+                // nothing to do here except for suppressing forward to
+                // to the remainder of the chain
+                break;
+            case ServiceOpts.SO_CACHE: // Enables caching
+                String v = val == null ? "" : val.toLowerCase();
+
+                switch (v) {
+                case "off": cacheMode = CacheMode.OFF; break;
+                case "clear": cacheMode = CacheMode.CLEAR; break;
+                default: cacheMode = CacheMode.DEFAULT; break;
+                }
+
+                break;
+            case ServiceOpts.SO_BULK: // Enables bulk requests
+                enableBulk = true;
+
+                int maxBulkSize = cxt.get(ServiceEnhancerConstants.serviceBulkMaxBindingCount, ChainingServiceExecutorBulkCache.MAX_BULK_SIZE);
+                bulkSize = cxt.get(ServiceEnhancerConstants.serviceBulkBindingCount, ChainingServiceExecutorBulkCache.DEFAULT_BULK_SIZE);
+                try {
+                    if (val == null || val.isBlank()) {
+                        // Ignored
+                    } else {
+                        bulkSize = Integer.parseInt(val);
+                    }
+                } catch (Exception e) {
+                    throw new QueryExecException("Failed to configure bulk size", e);
+                }
+                bulkSize = Math.max(Math.min(bulkSize, maxBulkSize), 1);
+                break;
+            default:
+                break outer;
+            }
+        }
+
+        List<Entry<String, String>> subList = opts.subList(i, n);
+        String serviceStr = ServiceOpts.unparse(subList);
+        OpService newOp = null;
+        if (serviceStr.isEmpty()) {
+            Op subOp = opService.getSubOp();
+            if (subOp instanceof OpService) {
+                newOp = (OpService)subOp;
+            } else {
+                serviceStr = ServiceEnhancerConstants.SELF.getURI();
+            }
+        }
+
+        if (newOp == null) {
+            node = NodeFactory.createURI(serviceStr);
+            newOp = new OpService(node, opService.getSubOp(), opService.getSilent());
+        }
+
+        CacheMode effCacheMode = CacheMode.effectiveMode(cacheMode);
+
+        boolean enableSpecial = effCacheMode != CacheMode.OFF || enableBulk; // || enableLoopJoin; // || !overrides.isEmpty();
+
+        if (enableSpecial) {
+            ChainingServiceExecutorBulkCache exec = new ChainingServiceExecutorBulkCache(bulkSize, cacheMode);
+            result = exec.createExecution(newOp, input, execCxt, chain);
+        } else {
+            result = chain.createExecution(newOp, input, execCxt);
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/Estimate.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/Estimate.java
new file mode 100644
index 0000000000..53e0969bd8
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/Estimate.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+/** An estimated value with a flag that indicates whether it's exact */
+public class Estimate<T>
+    implements Serializable
+{
+    private static final long serialVersionUID = 1L;
+
+    protected boolean isExact;
+    protected T value;
+
+    public Estimate(T value, boolean isExact) {
+        super();
+        this.value = value;
+        this.isExact = isExact;
+    }
+
+    public boolean isExact() {
+        return isExact;
+    }
+
+    public T getValue() {
+        return value;
+    }
+
+    @Override
+    public String toString() {
+        return "Estimate [isExact=" + isExact + ", value=" + value + "]";
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(isExact, value);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        Estimate<?> other = (Estimate<?>) obj;
+        return isExact == other.isExact && Objects.equals(value, other.value);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/GroupedBatch.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/GroupedBatch.java
new file mode 100644
index 0000000000..5d7350fe55
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/GroupedBatch.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+/** Interface that combines a group key with a {@link Batch} */
+public interface GroupedBatch<G, K extends Comparable<K>, V> {
+    G getGroupKey();
+    Batch<K, V> getBatch();
+}
\ No newline at end of file
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/GroupedBatchImpl.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/GroupedBatchImpl.java
new file mode 100644
index 0000000000..1853ed0d63
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/GroupedBatchImpl.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+/**
+ * Implementation that combines a batch with a group key.
+ */
+public class GroupedBatchImpl<G, K extends Comparable<K>, V>
+    implements GroupedBatch<G, K, V>
+{
+    protected G groupKey;
+    protected Batch<K, V> batch;
+
+    public GroupedBatchImpl(G groupKey, Batch<K, V> batch) {
+        super();
+        this.groupKey = groupKey;
+        this.batch = batch;
+    }
+
+    @Override
+    public G getGroupKey() {
+        return groupKey;
+    }
+
+    @Override
+    public Batch<K, V> getBatch() {
+        return batch;
+    }
+
+    @Override
+    public String toString() {
+        return "GroupedBatchImpl [groupKey=" + groupKey + ", batch=" + batch + "]";
+    }
+}
\ No newline at end of file
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/IteratorFactoryWithBuffer.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/IteratorFactoryWithBuffer.java
new file mode 100644
index 0000000000..381b917a97
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/IteratorFactoryWithBuffer.java
@@ -0,0 +1,293 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.SortedSet;
+import java.util.stream.IntStream;
+
+import org.apache.jena.ext.com.google.common.collect.MultimapBuilder;
+import org.apache.jena.ext.com.google.common.collect.PeekingIterator;
+import org.apache.jena.ext.com.google.common.collect.SetMultimap;
+import org.apache.jena.ext.com.google.common.primitives.Ints;
+import org.apache.jena.sparql.service.enhancer.impl.util.SinglePrefetchIterator;
+
+/**
+ * Buffering iterator. Can buffer an arbitrary amount ahead.
+ *
+ * Single producer multi consumer style.
+ *
+ * @param <T> The item type of this iterator
+ * @param <I> The type of the underlying iterator
+ */
+public class IteratorFactoryWithBuffer<T, I extends Iterator<T>>
+{
+    protected final Object lock = new Object();
+
+    protected I delegate;
+    protected List<T> buffer = null;
+    protected long absBufferOffset = 0;
+    protected SetMultimap<Long, Iterator<T>> offsetToChild = MultimapBuilder.treeKeys().hashSetValues().build();
+
+    public IteratorFactoryWithBuffer(I delegate) {
+        this.delegate = delegate;
+    }
+
+    public static <T, I extends Iterator<T>> SubIterator<T, I> wrap(I delegate) {
+        SubIterator<T, I> result = new IteratorFactoryWithBuffer<>(delegate).createSubIterator(false);
+        return result;
+    }
+
+    protected SubIterator<T, I> createSubIterator(long offset) {
+        SubIterator<T, I> result;
+        synchronized (lock) {
+            result = new SubIteratorImpl(offset);
+            offsetToChild.put(offset, result);
+        }
+        return result;
+    }
+
+    public SubIterator<T, I> createSubIterator(boolean startAtUnbuffered) {
+        SubIterator<T, I> result;
+        synchronized (lock) {
+            long offset;
+            offset = absBufferOffset;
+            if (buffer != null && startAtUnbuffered) {
+                offset += buffer.size();
+            }
+
+            // If the delegate is exhausted then position at the next element
+//            if (!delegate.hasNext()) {
+//                ++offset;
+//            }
+
+            result = createSubIterator(offset);
+        }
+        return result;
+    }
+
+    public interface SubIterator<T, I extends Iterator<T>>
+        extends PeekingIterator<T>, AutoCloseable {
+
+        I getDelegate();
+
+        /**
+         * Should return the absolute offset (starting at 0) of the next item being returned by a call to next();
+         * ISSUE Guava AbstractIterator doesn't allow for checking whether hasNext has been called... - so
+         * if hasNext was been called then the offset will point to the next element!
+         */
+        long getOffset();
+
+        /**
+         * Return how far this iterator is ahead of the iterator with the lowest offset.
+         * If there is no other iterator than the distance is 0.
+         *
+         * This is also the amount of buffering used.
+         */
+        long getDistanceToLowestOffset();
+
+        @Override
+        void close();
+
+        /** Create an iterator with the same next item as this one. Items are buffered as long as there exists an
+         *  open iterator with a lower offset */
+        SubIterator<T, I> createSubIterator(boolean startAtUnbuffered);
+
+        default SubIterator<T, I> subIteratorAtStartOfBuffer() { return createSubIterator(false); }
+        default SubIterator<T, I> subIteratorAtEndOfBuffer() { return createSubIterator(true); }
+    }
+
+    protected class SubIteratorImpl
+        extends SinglePrefetchIterator<T>
+        implements SubIterator<T, I>
+    {
+        protected long absOffset;
+
+        @Override
+        public I getDelegate() {
+            return delegate;
+        }
+
+        public SubIteratorImpl(long absOffset) {
+            super();
+            this.absOffset = absOffset;
+        }
+
+        @Override
+        public SubIterator<T, I> createSubIterator(boolean startAtUnbuffered) {
+            @SuppressWarnings("resource") // The result must be closed by the caller
+            SubIterator<T, I> result = isOpen()
+                    ? startAtUnbuffered
+                            ? IteratorFactoryWithBuffer.this.createSubIterator(startAtUnbuffered)
+                            : IteratorFactoryWithBuffer.this.createSubIterator(absOffset)
+                    : new SubIteratorImpl(absOffset); // Create an iterator without registration => considered closed
+
+            return result;
+        }
+
+        @Override
+        public long getOffset() {
+            // If an item was prefetched but not picked up via next() then return the previous index
+            long d = wasHasNextCalled() ? 1 : 0;
+            return absOffset - d;
+        }
+
+        protected boolean isOpen() {
+            return offsetToChild.containsEntry(absOffset, this);
+        }
+
+        @Override
+        protected T prefetch() {
+
+            boolean isEndOfData = false;
+            T result = null;
+            synchronized (lock) {
+                // If closed
+                if (!isOpen()) {
+                    return finish();
+                }
+
+                if (absOffset < absBufferOffset) {
+                    throw new IllegalStateException();
+                }
+
+                int relOffset = Ints.checkedCast(absOffset - absBufferOffset);
+
+                long bufferSize = buffer == null ? 0 : buffer.size();
+
+                if (relOffset < bufferSize) {
+                    // Serve item from the buffer
+                    result = buffer.get(relOffset);
+                    ++absOffset;
+                } else if (relOffset == bufferSize) {
+                    if (delegate.hasNext()) {
+                        result = delegate.next();
+
+                        // If there is another child iterator then buffer the item
+                        if (offsetToChild.size() > 1) {
+                            if (buffer == null) {
+                                buffer = new ArrayList<>();
+                                absBufferOffset = absOffset;
+                            }
+                            buffer.add(result);
+                        } else {
+                            // Buffer exhausted - clear it
+                            buffer = null;
+                        }
+
+                        ++absOffset;
+                    } else {
+                        isEndOfData = true;
+                    }
+                } else {
+                    // If sub iterator is created from a finished iterator it comes here
+                    isEndOfData = true;
+                    // throw new IllegalStateException();
+                }
+
+                if (isEndOfData) {
+                    result = finish();
+                    close();
+                } else {
+                    offsetToChild.remove(absOffset - 1, this);
+                    offsetToChild.put(absOffset, this);
+
+                    if (buffer == null) {
+                        absBufferOffset = absOffset;
+                    }
+                    checkShrink();
+                }
+            }
+
+            return result;
+        }
+
+        @Override
+        public void close() {
+            synchronized (lock) {
+                offsetToChild.remove(absOffset, this);
+                checkShrink();
+            }
+        }
+
+        @Override
+        public long getDistanceToLowestOffset() {
+            SortedSet<Long> keys = (SortedSet<Long>)offsetToChild.asMap().keySet();
+
+            long first = keys.isEmpty()
+                ? absOffset
+                : keys.first();
+
+            long result = absOffset - first;
+            return result;
+        }
+
+        @Override
+        public T peek() {
+            return current();
+        }
+    }
+
+    protected void checkShrink() {
+        // TODO For completeness shrink e.g. if the needed size of the buffer has halved
+        // SortedSet<Long> keys = (SortedSet<Long>)offsetToChild.asMap().keySet();
+        // if (keys.first()) {
+        // }
+    }
+
+    public static void main(String[] args) {
+        Iterator<Integer> base = IntStream.range(0, 5).iterator();
+        IteratorFactoryWithBuffer<Integer, ?> factory = new IteratorFactoryWithBuffer<>(base);
+
+        try (SubIterator<Integer, ?> primary = factory.createSubIterator(false)) {
+
+            for (int i = 0; i < 2; ++i) {
+                System.out.println("primary: " + primary.next());
+            }
+
+            try (SubIterator<Integer, ?> secondary = primary.createSubIterator(false)) {
+                for (int i = 0; i < 2; ++i) {
+                    System.out.println("secondary: " + secondary.next());
+                }
+
+                // secondary.close();
+                try (SubIterator<Integer, ?> ternary = secondary.createSubIterator(false)) {
+                    while (ternary.hasNext()) {
+                        System.out.println("ternary: " + ternary.next());
+                    }
+
+                    while (primary.hasNext()) {
+                        System.out.println("primary: " + primary.next());
+                    }
+
+                    while (secondary.hasNext()) {
+                        System.out.println("secondary: " + secondary.next());
+                    }
+
+                    System.out.println(primary.getOffset());
+                    System.out.println(secondary.getOffset());
+                    System.out.println(ternary.getOffset());
+                }
+            }
+        }
+
+    }
+}
\ No newline at end of file
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceExecutor.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceExecutor.java
new file mode 100644
index 0000000000..aafdc17995
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceExecutor.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.engine.QueryIterator;
+
+/** Interface for directly executing {@link OpService} instances */
+@FunctionalInterface
+public interface OpServiceExecutor {
+    QueryIterator exec(OpService opService);
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceExecutorImpl.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceExecutorImpl.java
new file mode 100644
index 0000000000..935d92350e
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceExecutorImpl.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import org.apache.jena.atlas.logging.Log;
+import org.apache.jena.query.QueryExecException;
+import org.apache.jena.riot.out.NodeFmtLib;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingFactory;
+import org.apache.jena.sparql.engine.iterator.QueryIter;
+import org.apache.jena.sparql.engine.iterator.QueryIterSingleton;
+import org.apache.jena.sparql.service.bulk.ServiceExecutorBulk;
+
+/** Helper class to simplify executing concrete OpService instances */
+public class OpServiceExecutorImpl
+    implements OpServiceExecutor
+{
+    protected OpService originalOp;
+    protected ExecutionContext execCxt;
+    protected ServiceExecutorBulk delegate;
+
+    public OpServiceExecutorImpl(OpService opService, ExecutionContext execCxt, ServiceExecutorBulk delegate) {
+        this.originalOp = opService;
+        this.execCxt = execCxt;
+        this.delegate = delegate;
+    }
+
+    public ExecutionContext getExecCxt() {
+        return execCxt;
+    }
+    
+    @Override
+    public QueryIterator exec(OpService substitutedOp) {
+        QueryIterator result;
+        Binding input = BindingFactory.binding();
+        boolean silent = originalOp.getSilent();
+
+        try {
+            QueryIterator singleton = QueryIterSingleton.create(BindingFactory.root(), execCxt);
+            result = delegate.createExecution(substitutedOp, singleton, execCxt);
+
+            // ---- Execute
+            if (result == null) {
+                throw new QueryExecException("No SERVICE handler");
+            }
+
+            result = QueryIter.makeTracked(result, execCxt);
+            // Need to put the outerBinding as parent to every binding of the service call.
+            // There should be no variables in common because of the OpSubstitute.substitute
+            // return new QueryIterCommonParent(qIter, outerBinding, getExecContext());
+        } catch (RuntimeException ex) {
+            if ( silent ) {
+                Log.warn(this, "SERVICE " + NodeFmtLib.strTTL(substitutedOp.getService()) + " : " + ex.getMessage());
+                // Return the input
+                result = QueryIterSingleton.create(input, execCxt);
+
+            }
+            throw ex;
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceInfo.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceInfo.java
new file mode 100644
index 0000000000..adcb7e9eb5
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/OpServiceInfo.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.jena.ext.com.google.common.collect.BiMap;
+import org.apache.jena.graph.Node;
+import org.apache.jena.query.Query;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.OpAsQuery;
+import org.apache.jena.sparql.algebra.OpVars;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.algebra.op.OpSlice;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.core.VarExprList;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.expr.Expr;
+import org.apache.jena.sparql.graph.NodeTransformLib;
+import org.apache.jena.sparql.service.enhancer.impl.util.VarScopeUtils;
+import org.apache.jena.sparql.syntax.syntaxtransform.NodeTransformSubst;
+
+/**
+ * Class used to map a given scoped OpService to a normalized form. Several methods abbreviate
+ * normalized with normed.
+ * A normalized query with a non-empty set of variables always has variables at scope level 0.
+ */
+public class OpServiceInfo {
+    // The original opService which is assumed to make use of scoped variables
+    protected OpService opService;
+
+    // Cache of the service node / var for easier access
+    protected Node serviceNode;
+    protected Var serviceVar;
+
+    protected BiMap<Var, Var> mentionedSubOpVarsScopedToNormed;
+
+    // The restored query of opService.getSubOp() without scoping and without slice
+    protected Query normedQuery;
+
+    // Compiled algebra of rawQuery; Algebra.compile(rawQuery)
+    protected Op normedQueryOp;
+
+    // Limit and offset that effectively applies to rawQuery
+    protected long limit;
+    protected long offset;
+
+    // Mapping of visible variables of rawQuery to the visible (possibly scoped) ones in opService
+    protected BiMap<Var, Var> visibleSubOpVarsScopedToNorm;
+
+    public OpServiceInfo(OpService opService) {
+        this.opService = opService ;
+
+        this.serviceNode = opService.getService();
+        this.serviceVar = serviceNode.isVariable() ? (Var)serviceNode: null;
+
+        // Get the variables used in the service clause (excluding the possible one for the service iri)
+        Op baseSubOp = opService.getSubOp();
+
+        if (baseSubOp instanceof OpSlice) {
+            OpSlice slice = (OpSlice)baseSubOp;
+            baseSubOp = slice.getSubOp();
+            this.offset = slice.getStart();
+            this.limit = slice.getLength();
+        } else {
+            this.limit = Query.NOLIMIT;
+            this.offset = Query.NOLIMIT;
+        }
+
+        Collection<Var> mentionedSubOpVars = OpVars.mentionedVars(baseSubOp);
+        // mentionedSubOpVarsScopedToNormed = VarUtils.normalizeVarScopesGlobal(mentionedSubOpVars);
+        mentionedSubOpVarsScopedToNormed = VarScopeUtils.normalizeVarScopes(mentionedSubOpVars);
+
+        normedQueryOp = NodeTransformLib.transform(new NodeTransformSubst(mentionedSubOpVarsScopedToNormed), baseSubOp);
+
+        // Handling of a null supOp - can that happen?
+        Set<Var> visibleSubOpVars = OpVars.visibleVars(baseSubOp);
+        this.visibleSubOpVarsScopedToNorm = VarScopeUtils.normalizeVarScopesGlobal(visibleSubOpVars);
+
+        this.normedQuery = OpAsQuery.asQuery(normedQueryOp);
+
+        VarExprList vel = normedQuery.getProject();
+        VarExprList newVel = new VarExprList();
+
+        int allocId = 0;
+        for (Var var : vel.getVars()) {
+            Expr expr = vel.getExpr(var);
+            if (Var.isAllocVar(var)) {
+                Var tmp = Var.alloc("__av" + (++allocId) + "__");
+                mentionedSubOpVarsScopedToNormed.put(var, tmp);
+                visibleSubOpVarsScopedToNorm.put(tmp, tmp);
+                // visibleSubOpVarsScopedToPlain.put(var, tmp);
+                // mentionedSubOpVarsScopedToPlain.put(var, tmp);
+                var = tmp;
+            }
+            newVel.add(var, expr);
+        }
+        vel.clear();
+        vel.addAll(newVel);
+    }
+
+    public OpService getOpService() {
+        return opService;
+    }
+
+    public Node getServiceNode() {
+        return serviceNode;
+    }
+
+    public Node getSubstServiceNode(Binding binding) {
+        Node result = serviceVar == null ? serviceNode : binding.get(serviceVar);
+        return result;
+    }
+
+    public Var getServiceVar() {
+        return serviceVar;
+    }
+
+    public Query getNormedQuery() {
+        return normedQuery;
+    }
+
+    public Op getNormedQueryOp() {
+        return normedQueryOp;
+    }
+
+    public long getLimit() {
+        return limit;
+    }
+
+    public long getOffset() {
+        return offset;
+    }
+
+    public Set<Var> getVisibleSubOpVarsScoped() {
+        return visibleSubOpVarsScopedToNorm.keySet();
+    }
+
+    public Map<Var, Var> getMentionedSubOpVarsScopedToNormed() {
+        return mentionedSubOpVarsScopedToNormed;
+    }
+
+    public Map<Var, Var> getVisibleSubOpVarsNormedToScoped() {
+        return visibleSubOpVarsScopedToNorm.inverse();
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/PartitionRequest.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/PartitionRequest.java
new file mode 100644
index 0000000000..2c08334955
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/PartitionRequest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+/** Helper class to capture a range of data w.r.t. a partition key (typically a binding) and assign it an id */
+public class PartitionRequest<I>
+{
+    protected long outputId;
+    protected I partitionKey;
+    protected long offset;
+    protected long limit;
+
+    public PartitionRequest(
+            long outputId,
+            I partition,
+            long offset,
+            long limit) {
+        super();
+        this.outputId = outputId;
+        this.partitionKey = partition;
+        this.offset = offset;
+        this.limit = limit;
+    }
+
+    public long getOutputId() {
+        return outputId;
+    }
+
+    public I getPartitionKey() {
+        return partitionKey;
+    }
+
+    public long getOffset() {
+        return offset;
+    }
+
+    public long getLimit() {
+        return limit;
+    }
+
+    public boolean hasOffset() {
+        return offset > 0;
+    }
+
+    public boolean hasLimit() {
+        return limit >= 0 && limit < Long.MAX_VALUE;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/QueryIterServiceBulk.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/QueryIterServiceBulk.java
new file mode 100644
index 0000000000..695236aeb2
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/QueryIterServiceBulk.java
@@ -0,0 +1,757 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
+import java.util.SortedMap;
+import java.util.concurrent.locks.Lock;
+import java.util.function.Supplier;
+
+import org.apache.jena.atlas.iterator.Iter;
+import org.apache.jena.atlas.iterator.IteratorCloseable;
+import org.apache.jena.atlas.iterator.IteratorOnClose;
+import org.apache.jena.atlas.lib.Closeable;
+import org.apache.jena.atlas.logging.Log;
+import org.apache.jena.ext.com.google.common.collect.Iterators;
+import org.apache.jena.ext.com.google.common.collect.Range;
+import org.apache.jena.ext.com.google.common.collect.RangeMap;
+import org.apache.jena.ext.com.google.common.collect.RangeSet;
+import org.apache.jena.ext.com.google.common.collect.TreeBasedTable;
+import org.apache.jena.ext.com.google.common.collect.TreeRangeMap;
+import org.apache.jena.ext.com.google.common.collect.TreeRangeSet;
+import org.apache.jena.ext.com.google.common.math.LongMath;
+import org.apache.jena.graph.Node;
+import org.apache.jena.query.Query;
+import org.apache.jena.sparql.algebra.Algebra;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingFactory;
+import org.apache.jena.sparql.engine.iterator.QueryIter;
+import org.apache.jena.sparql.engine.iterator.QueryIterConvert;
+import org.apache.jena.sparql.engine.iterator.QueryIterNullIterator;
+import org.apache.jena.sparql.engine.iterator.QueryIterPeek;
+import org.apache.jena.sparql.engine.iterator.QueryIterPlainWrapper;
+import org.apache.jena.sparql.engine.iterator.QueryIteratorMapped;
+import org.apache.jena.sparql.expr.NodeValue;
+import org.apache.jena.sparql.service.enhancer.claimingcache.RefFuture;
+import org.apache.jena.sparql.service.enhancer.impl.IteratorFactoryWithBuffer.SubIterator;
+import org.apache.jena.sparql.service.enhancer.impl.util.BindingUtils;
+import org.apache.jena.sparql.service.enhancer.impl.util.QueryIterDefer;
+import org.apache.jena.sparql.service.enhancer.impl.util.QueryIterSlottedBase;
+import org.apache.jena.sparql.service.enhancer.slice.api.IteratorOverReadableChannel;
+import org.apache.jena.sparql.service.enhancer.slice.api.ReadableChannel;
+import org.apache.jena.sparql.service.enhancer.slice.api.ReadableChannelOverSliceAccessor;
+import org.apache.jena.sparql.service.enhancer.slice.api.ReadableChannelWithLimit;
+import org.apache.jena.sparql.service.enhancer.slice.api.Slice;
+import org.apache.jena.sparql.service.enhancer.slice.api.SliceAccessor;
+import org.apache.jena.sparql.util.NodeFactoryExtra;
+
+/**
+ * QueryIter to process service requests in bulk with support for streaming caching.
+ *
+ * The methods closeIterator and moveToNext are synchronized.
+ *
+ */
+public class QueryIterServiceBulk
+    extends QueryIterSlottedBase
+{
+    protected OpServiceInfo serviceInfo;
+    protected ServiceCacheKeyFactory cacheKeyFactory;
+
+    protected BatchQueryRewriter batchQueryRewriter;
+
+    protected OpServiceExecutor opExecutor;
+    protected ExecutionContext execCxt;
+
+    protected List<Binding> inputs;
+
+    protected ServiceResultSizeCache resultSizeCache;
+    protected ServiceResponseCache cache;
+
+    protected CacheMode cacheMode;
+
+    protected Node targetService;
+
+    protected int currentInputId = -1; // the binding currently being served from the batch
+    protected int currentRangeId = -1;
+
+    // The number of bindings served for the current inputId
+    protected long currentInputIdBindingsServed;
+
+    // Cached attribute from BatchQueryRewriter.getIdxVar()
+    protected Var idxVar;
+
+    protected int maxBufferSize = 100000;
+    protected int maxSkipCount = 10000;
+
+    // Cache items in blocks of that many bindings; avoids synchronization on every binding
+    protected int cacheBulkSize = 128;
+
+    protected TreeBasedTable<Integer, Integer, Integer> inputToRangeToOutput = TreeBasedTable.create();
+
+    // This is the reverse mapping of the table above; PartitionKey = (inputId, rangeId)
+    protected Map<Integer, SliceKey> outputToSliceKey = new HashMap<>();
+
+    // The set of outputIds that are served from the backend (absent means served from cache)
+    // protected Set<Integer> backendOutputs;
+    protected Set<SliceKey> sliceKeysForBackend = new HashSet<>(); // The partitions served from the backend
+
+    // The query iterator of the active bulk request
+    protected SubIterator<Binding, QueryIterator> backendIt;
+
+    protected Map<SliceKey, QueryIterPeek> sliceKeyToIter = new HashMap<>();
+
+    protected Estimate<Long> backendResultSetLimit;
+
+    // Close a sliceKey's iterator upon exhaustion if they slice key is in the set
+    protected Set<SliceKey> sliceKeyToClose = new HashSet<>();
+
+    public QueryIterServiceBulk(
+            OpServiceInfo serviceInfo,
+            BatchQueryRewriter batchQueryRewriter,
+            ServiceCacheKeyFactory cacheKeyFactory,
+            OpServiceExecutor opExecutor,
+            ExecutionContext execCxt,
+            List<Binding> inputs,
+            ServiceResultSizeCache resultSizeCache,
+            ServiceResponseCache cache,
+            CacheMode cacheMode
+        ) {
+        this.serviceInfo = serviceInfo;
+        this.cacheKeyFactory = cacheKeyFactory;
+        this.opExecutor = opExecutor;
+        this.execCxt = execCxt;
+        this.inputs = inputs;
+        this.resultSizeCache = resultSizeCache;
+        this.cacheMode = cacheMode;
+        this.cache = cache;
+        this.batchQueryRewriter = batchQueryRewriter;
+
+        this.idxVar = batchQueryRewriter.getIdxVar();
+        this.targetService = serviceInfo.getServiceNode();
+    }
+
+    public Var getIdxVar() {
+        return idxVar;
+    }
+
+    protected void advanceInput(boolean resetRangeId) {
+        ++currentInputId;
+        currentInputIdBindingsServed = 0;
+
+        if (resetRangeId) {
+            currentRangeId = 0;
+        }
+    }
+
+    @Override
+    protected synchronized Binding moveToNext() {
+        Binding mergedBindingWithIdx = null;
+
+        // One time init
+        if (currentInputId < 0) {
+            ++currentInputId;
+            currentRangeId = 0;
+            prepareNextBatchExec(false);
+        }
+
+        // Peek the next binding on the active iterator and verify that it maps to the current
+        // partition key
+        outer: while (true) {
+
+            SliceKey partKey = new SliceKey(currentInputId, currentRangeId);
+            QueryIterPeek activeIt = sliceKeyToIter.get(partKey);
+
+            if (activeIt == null) {
+                // Must advance to next scheduled iterator (may turn out that there is none)
+                break;
+            }
+
+            boolean isBackendIt = sliceKeysForBackend.contains(partKey);
+
+            if (isBackendIt && !activeIt.hasNext()) {
+                Log.debug(QueryIterServiceBulk.class, "Iterator ended without end marker - assuming remote result set limit reached");
+                long seenBackendData = backendIt.getOffset();
+                backendResultSetLimit = new Estimate<>(seenBackendData, true);
+                if (seenBackendData <= 0) {
+                    Log.warn(QueryIterServiceBulk.class, "Known result set limit of " + seenBackendData + " detected");
+                }
+
+                resultSizeCache.updateLimit(targetService, backendResultSetLimit);
+
+                // We obtained to few data for the current id - repeat the request
+                prepareNextBatchExec(false);
+                continue;
+            }
+
+            // Refresh the result set limit in case there was a concurrent update
+            if (backendResultSetLimit == null || currentInputIdBindingsServed >= backendResultSetLimit.getValue()) {
+                backendResultSetLimit = resultSizeCache.getLimit(targetService);
+
+                // totalNeededBackendRowCount: The number of rows the backend needs to deliver before we can serve any
+                // additional rows to the client.
+                @SuppressWarnings("unused") // For debugging
+                long totalNeededBackendRowCount;
+                long obtainedRowCount = 0;
+
+                // The following loop decides whether another binding can be served from activeIt without
+                // violating result size limits
+                // The 'worst' outcome is that the request needs to be repeated because the result size
+                // limit could not be determined within the thresholds
+
+                // If the number of served bindings equals the backend result set limit we need at least one more binding
+                while ((totalNeededBackendRowCount = (currentInputIdBindingsServed - backendResultSetLimit.getValue() + 1)) > 0) {
+
+                    long remainingNeededBackendRowCount = 0;
+
+                    // TODO We could rely on the backend iterator to update the cache with the known size
+
+                    // If the limit is unknown we can try to buffer in the hope to make it known
+                    // If the limit is then still unknown we need to reset
+                    if (backendIt != null && !backendResultSetLimit.isExact()) {
+                        // Log.debug(QueryIterServiceBulk.class, String.format("Analyzing result set size limit whether %d bindings can be served. Current limit %d", currentInputIdBindingsServed, backendResultSetLimit.getValue()));
+
+                        // Subtract the rows that have already been delivered by the backend
+                        try (SubIterator<Binding, ?> subIt = backendIt.subIteratorAtEndOfBuffer()) {
+
+                            long deliveredBackendRowCount = subIt.getOffset();
+                            remainingNeededBackendRowCount = backendResultSetLimit.getValue() - deliveredBackendRowCount + 1;
+
+                            // If there is insufficient buffer available we can still try whether we see a result set limit
+                            // alternatively we could just set resetRequest to true
+
+                            boolean isResultSetLimitReached = false; // reached end without seeing the end marker
+                            while (obtainedRowCount < remainingNeededBackendRowCount) { // Repeat until we can serve another binding
+
+                                if (subIt.hasNext()) {
+                                    Binding binding = subIt.next();
+                                    int inputId = getPartKeyFromBinding(binding).getInputId();
+                                    boolean isEndMarkerSeen = BatchQueryRewriter.isRemoteEndMarker(inputId);
+                                    if (isEndMarkerSeen) {
+                                        // Ensure subIt's offset points past the end marker
+                                        Iterators.size(subIt);
+                                        break;
+                                    } else {
+                                        ++obtainedRowCount;
+                                    }
+                                } else {
+                                    isResultSetLimitReached = true;
+                                    break;
+                                }
+                            }
+
+                            long seenBackendData = subIt.getOffset();
+                            backendResultSetLimit = new Estimate<>(seenBackendData, isResultSetLimitReached);
+                            resultSizeCache.updateLimit(targetService, backendResultSetLimit);
+                        }
+
+                        if (obtainedRowCount < remainingNeededBackendRowCount) {
+                            // This creates a request that bypasses the cache on the first input
+                            // (i.e. may retrieve data previously served from the cache)
+                            // but also cuts away already retrieved (and returned) bindings.
+                            prepareNextBatchExec(true);
+                            continue outer;
+                        }
+                    }
+
+                    // Check if we are going to serve too many results for the current binding
+                    if (backendResultSetLimit.isExact() && currentInputIdBindingsServed >= backendResultSetLimit.getValue()) {
+                        // Skip until we reach the next id
+                        // If we need data from cache we can just increment currentInputId
+                        // If we need to serve from the backend then try to skip
+                        if (isBackendIt) {
+                            int skipCount = 0;
+
+                            while (backendIt.hasNext() && skipCount++ < maxSkipCount) {
+                                Binding peek = backendIt.peek();
+                                int peekInputId = getPartKeyFromBinding(peek).getInputId();
+
+                                if (peekInputId != currentInputId) {
+                                    advanceInput(true);
+                                    continue outer;
+                                } else {
+                                    backendIt.next();
+                                }
+                            }
+
+                            // Cut off the iterator so we move to the next input
+                            activeIt = null; // QueryIterPeek.create(new QueryIterNullIterator(execCxt), execCxt);
+                            break;
+                        } else {
+                            // Skip over the cache entry and skip to the next input
+                            if (activeIt != null) {
+                                activeIt.close();
+                            }
+                            advanceInput(true);
+                            continue outer;
+                        }
+                    }
+
+                    if (backendIt == null) {
+                        prepareNextBatchExec(true);
+                        activeIt = sliceKeyToIter.get(partKey);
+                        // continue outer;
+                    }
+
+                    // Note: we only need to skip over excessive data once we need to fetch data from the backend iterator
+                    // Conversely, we skip over iterators backed by the cache
+
+                    // If the limit is known then we need to skip over excessive backend data
+                    //   if there is too much data to skip at some point we give up and reset the request
+
+                    // We need to skip ahead on iterator over the backend (not over the cache)
+                    // skipOrReset(activeIt);
+                }
+            }
+
+            if (activeIt != null) {
+                if (activeIt.hasNext()) {
+                    Binding peek = activeIt.peek();
+                    int peekOutputId = BindingUtils.getNumber(peek, idxVar).intValue();
+                    if (BatchQueryRewriter.isRemoteEndMarker(peekOutputId)) {
+                        // Attempt to move to the next range
+                        ++currentRangeId;
+                        continue;
+                    }
+
+                    SliceKey sliceKey = outputToSliceKey.get(peekOutputId);
+
+                    if (sliceKey == null) {
+                        throw new IllegalStateException(
+                                String.format("An output binding referred to an input id without corresponding input binding. Referenced input id %1$d, Output binding: %2$s", peekOutputId, peek));
+                    }
+
+                    boolean matchesCurrentPartition = sliceKey.getInputId() == currentInputId &&
+                            sliceKey.getRangeId() == currentRangeId;
+
+                    if (matchesCurrentPartition) {
+                        Binding parentBinding = inputs.get(currentInputId);
+                        Binding childBindingWithIdx = activeIt.next();
+
+                        // Check for compatibility
+                        mergedBindingWithIdx = Algebra.merge(parentBinding, childBindingWithIdx);
+                        if (mergedBindingWithIdx == null) {
+                            continue;
+                        } else {
+                            break;
+                        }
+                    }
+                } else {
+                    // If we come here it means:
+                    // - no end marker was present
+                    // - no more data available
+                    // If our request ended prematurely fetch more data
+                    //prepareNextBatchExec(false);
+                    // continue;
+                }
+            }
+
+            // Cleanup of no longer needed resources
+            SliceKey sliceKey = new SliceKey(currentInputId, currentRangeId);
+
+            if (sliceKeyToClose.contains(sliceKey)) {
+                // System.out.println("Closing part key " + pk);
+                Closeable closeable = sliceKeyToIter.get(sliceKey);
+                closeable.close();
+                sliceKeyToClose.remove(sliceKey);
+            }
+            inputToRangeToOutput.remove(currentInputId, currentRangeId);
+            sliceKeyToIter.remove(sliceKey);
+
+            // Increment rangeId/inputId until we reach the end
+            ++currentRangeId;
+            SortedMap<Integer, Integer> row = inputToRangeToOutput.row(currentInputId);
+            if (!row.containsKey(currentRangeId)) {
+                advanceInput(true);
+            }
+
+            // If there is still no further batch then we assume we reached the end
+            if (!inputToRangeToOutput.containsRow(currentInputId)) {
+                break;
+            }
+        }
+
+        // Remove the idxVar from the childBinding
+        Binding result = null;
+        if (mergedBindingWithIdx != null) {
+            ++currentInputIdBindingsServed;
+
+            int outputId = BindingUtils.getNumber(mergedBindingWithIdx, idxVar).intValue();
+            SliceKey pk = outputToSliceKey.get(outputId);
+            int inputId = pk.getInputId();
+            Binding tmp = BindingUtils.project(mergedBindingWithIdx, mergedBindingWithIdx.vars(), idxVar);
+
+            result = BindingFactory.binding(tmp, idxVar, NodeValue.makeInteger(inputId).asNode());
+        }
+
+        if (result == null) {
+            freeResources();
+        }
+
+        return result;
+    }
+
+    public SliceKey getPartKeyFromBinding(Binding binding) {
+        int peekOutputId = BindingUtils.getNumber(binding, idxVar).intValue();
+
+        SliceKey result = BatchQueryRewriter.isRemoteEndMarker(peekOutputId)
+                ? new SliceKey(BatchQueryRewriter.REMOTE_END_MARKER, 0)
+                : outputToSliceKey.get(peekOutputId);
+
+        return result;
+    }
+
+    protected void freeResources() {
+        if (backendIt != null) {
+            backendIt.close();
+        }
+
+        for (SliceKey partKey : sliceKeyToClose) {
+            Closeable closeable = sliceKeyToIter.get(partKey);
+            closeable.close();
+        }
+        sliceKeyToClose.clear();
+
+        inputToRangeToOutput.clear();
+        outputToSliceKey.clear();
+        // partKeyToIter.values().forEach(QueryIterator::close);
+        sliceKeyToIter.clear();
+
+        sliceKeysForBackend.clear();
+    }
+
+    @Override
+    public synchronized void closeIterator() {
+        freeResources();
+    }
+
+    /** Prepare the lazy execution of the next batch and register all iterators with {@link #sliceKeyToIter} */
+    // seqId = sequential number injected into the request
+    // inputId = id (index) of the input binding
+    // rangeId = id of the range w.r.t. to the input binding
+    // partitionKey = (inputId, rangeId)
+    public void prepareNextBatchExec(boolean bypassCacheOnFirstInput) {
+
+        freeResources();
+
+        Batch<Integer, PartitionRequest<Binding>> backendRequests = BatchImpl.forInteger();
+        Estimate<Long> serviceDescription = resultSizeCache.getLimit(targetService);
+        long resultSetLimit = serviceDescription.getValue();
+        boolean isExact = serviceDescription.isExact(); // we interpret the limit as a lower bound if exact is false!
+
+        // TODO If the result set limit is known then restrict the iterators to it
+
+        int nextAllocOutputId = 0;
+        int batchSize = inputs.size();
+
+        Log.info(QueryIterServiceBulk.class, "Schedule for current batch:");
+        int rangeId = currentRangeId;
+
+        for (int inputId = currentInputId; inputId < batchSize; ++inputId) {
+
+            boolean isFirstInput = inputId == currentInputId;
+
+            long displacement = isFirstInput && !bypassCacheOnFirstInput
+                    ? currentInputIdBindingsServed
+                    : 0l
+                    ;
+
+            Binding inputBinding = inputs.get(inputId);
+            // Binding joinBinding = new BindingProject(joinVarMap.keySet(), inputBinding);
+
+            Slice<Binding[]> slice = null;
+            Lock lock = null;
+            RefFuture<ServiceCacheValue> cacheValueRef = null;
+
+            if (cache != null) {
+
+                ServiceCacheKey cacheKey = cacheKeyFactory.createCacheKey(inputBinding);
+                // ServiceCacheKey cacheKey = new ServiceCacheKey(targetService, serviceInfo.getRawQueryOp(), joinBinding, useLoopJoin);
+                // System.out.println("Lookup with cache key " + cacheKey);
+
+                // Note: cacheValueRef must be closed as part of the iterators that read from the cache
+                cacheValueRef = cache.getCache().claim(cacheKey);
+                ServiceCacheValue serviceCacheValue = cacheValueRef.await();
+
+                // Lock an existing cache entry so we can read out the loaded ranges
+                slice = serviceCacheValue.getSlice();
+
+                if (CacheMode.CLEAR.equals(cacheMode)) {
+                    slice.clear();
+                }
+
+                lock = slice.getReadWriteLock().readLock();
+
+                Log.debug(QueryIterServiceBulk.class, "Created cache key: " + cacheKey);
+                // Log.debug(BatchRequestIterator.class, "Cached ranges: " + slice.getLoadedRanges().toString());
+
+                lock.lock();
+            }
+
+            RangeSet<Long> loadedRanges;
+            long knownSize;
+            try {
+                if (slice != null) {
+                    loadedRanges = slice.getLoadedRanges();
+                    knownSize = slice.getKnownSize();
+                } else {
+                    loadedRanges = TreeRangeSet.create();
+                    knownSize = -1;
+                }
+
+                // Iterate the present/absent ranges
+                long start = serviceInfo.getOffset();
+                if (start == Query.NOLIMIT) {
+                    start = 0;
+                }
+
+                long baseLimit = serviceInfo.getLimit();
+                if (baseLimit < 0) {
+                    baseLimit = Long.MAX_VALUE;
+                }
+
+                long limit = baseLimit;
+                if (isExact && baseLimit >= 0) {
+                    limit = Math.min(limit, resultSetLimit);
+                }
+
+                if (displacement != 0) {
+                    start += displacement;
+                    if (limit != Long.MAX_VALUE) {
+                        limit -= displacement;
+                    }
+                }
+
+                long max = knownSize < 0 ? Long.MAX_VALUE : knownSize;
+                long end = limit == Long.MAX_VALUE ? max : LongMath.saturatedAdd(start, limit);
+                end = Math.min(end, max);
+
+                Range<Long> requestedRange = end == Long.MAX_VALUE
+                    ? Range.atLeast(start)
+                    : Range.closedOpen(start, end);
+
+                RangeMap<Long, Boolean> allRanges = TreeRangeMap.create();
+                if (bypassCacheOnFirstInput && isFirstInput) {
+                    allRanges.put(requestedRange, false);
+                    // Note: If we bypass the cache we need to skip the bindings already served
+                    //   based on 'currentInputIdBindingsServed'
+                } else {
+                    RangeSet<Long> presentRanges = loadedRanges.subRangeSet(requestedRange);
+                    RangeSet<Long> absentRanges = loadedRanges.complement().subRangeSet(requestedRange);
+
+                    presentRanges.asRanges().forEach(r -> allRanges.put(r, true));
+                    absentRanges.asRanges().forEach(r -> allRanges.put(r, false));
+                }
+
+                // If the beginning of the request range is covered by a cache then serve from it
+                // a global limit may prevent having to fire a backend request
+                // However, as soon as we have to fire a backend request we need to ensure we don't serve
+                // more data then the seen result set limit
+                // If the size of the data can be greater than that limit then:
+                //   - We need to start the backend request from the request offset
+                //   - The issue is how to handle the next binding
+
+                Log.info(QueryIterServiceBulk.class, "input " + inputId + ": " +
+                    allRanges.toString()
+                        .replace("false", "fetch")
+                        .replace("true", "cached"));
+
+                Map<Range<Long>, Boolean> mapOfRanges = allRanges.asMapOfRanges();
+
+                if (mapOfRanges.isEmpty()) {
+                    // Special case if it is known that there are no bindings:
+                    // Register an empty iterator
+                    SliceKey partitionKey = new SliceKey(inputId, rangeId);
+                    QueryIterPeek it = QueryIterPeek.create(new QueryIterNullIterator(execCxt), execCxt);
+                    sliceKeyToIter.put(partitionKey, it);
+                    sliceKeyToClose.add(partitionKey); // it);
+
+                    // Close the cache ref immediately
+                    if (cacheValueRef != null) {
+                        cacheValueRef.close();
+                    }
+                } else {
+                    Iterator<Entry<Range<Long>, Boolean>> rangeIt = mapOfRanges.entrySet().iterator();
+
+                    RefFuture<ServiceCacheValue> finalCacheValueRef = cacheValueRef;
+
+                    boolean usesCacheRead = false;
+                    while (rangeIt.hasNext()) {
+                        SliceKey partitionKey = new SliceKey(inputId, rangeId);
+                        Entry<Range<Long>, Boolean> f = rangeIt.next();
+
+                        Range<Long> range = f.getKey();
+                        boolean isLoaded = f.getValue();
+
+                        long lo = range.lowerEndpoint();
+                        long hi = range.hasUpperBound() ? range.upperEndpoint() : Long.MAX_VALUE;
+                        long lim = hi == Long.MAX_VALUE ? Long.MAX_VALUE : hi - lo;
+
+                        if (isLoaded) {
+                            usesCacheRead = true;
+                            @SuppressWarnings("resource") // Accessor will be closed via channel below
+                            SliceAccessor<Binding[]> accessor = slice.newSliceAccessor();
+
+                            // Prevent eviction of the scheduled range
+                            accessor.addEvictionGuard(Range.closedOpen(lo, hi));
+
+                            // Create a channel over the accessor for sequential reading
+                            // Reading from the channel internally advances the range of data claimed by the accessor
+                            // Note: As an improvement the eviction guard could be managed by the channel so that data is immediately released after read.
+                            @SuppressWarnings("resource") // Channel will be closed via baseIt below
+                            ReadableChannel<Binding[]> channel =
+                                    new ReadableChannelWithLimit<>(
+                                            new ReadableChannelOverSliceAccessor<>(accessor, lo),
+                                            lim);
+
+                            // CloseableIterator<Binding> baseIt = ReadableChannels.newIterator(channel);
+                            IteratorCloseable<Binding> baseIt = new IteratorOverReadableChannel<>(channel.getArrayOps(), channel, 1024 * 4);
+
+                            // The last iterator's close method also unclaims the cache entry
+                            Runnable cacheEntryCloseAction = rangeIt.hasNext() || finalCacheValueRef == null
+                                    ? baseIt::close
+                                    : () -> {
+                                        baseIt.close();
+                                        finalCacheValueRef.close();
+                                    };
+
+                            // Bridge the cache iterator to jena
+                            QueryIterator qIterA = QueryIterPlainWrapper.create(Iter.onClose(baseIt, cacheEntryCloseAction), execCxt);
+
+                            Map<Var, Var> normedToScoped = serviceInfo.getVisibleSubOpVarsNormedToScoped();
+                            qIterA = new QueryIteratorMapped(qIterA, normedToScoped);
+
+                            // Add a pseudo idxVar mapping
+                            final long idxVarValue = nextAllocOutputId;
+                            QueryIterConvert qIterB = new QueryIterConvert(qIterA, b ->
+                                BindingFactory.binding(b, idxVar, NodeFactoryExtra.intToNode(idxVarValue)), execCxt);
+
+                            QueryIterPeek it = QueryIterPeek.create(qIterB, execCxt);
+
+                            sliceKeyToIter.put(partitionKey, it);
+                            sliceKeyToClose.add(partitionKey); // it);
+                        } else {
+                            PartitionRequest<Binding> request = new PartitionRequest<>(nextAllocOutputId, inputBinding, lo, lim);
+                            backendRequests.put(nextAllocOutputId, request);
+                            sliceKeysForBackend.add(partitionKey);
+                        }
+
+                        inputToRangeToOutput.put(inputId, rangeId, nextAllocOutputId);
+                        outputToSliceKey.put(nextAllocOutputId, partitionKey);
+
+                        ++rangeId;
+                        ++nextAllocOutputId;
+                    }
+
+                    // Close the reference to the cache entry; QueryIterCaching will manage
+                    // claim/unclaim in batches so we don't need to leave the reference open here
+                    if (!usesCacheRead && finalCacheValueRef != null) {
+                        finalCacheValueRef.close();
+                    }
+                }
+            } finally {
+                if (lock != null) {
+                    lock.unlock();
+                }
+            }
+
+            rangeId = 0;
+        }
+
+        // Create *deferred* a remote execution if needed
+        // A limit on the query may cause the deferred execution to never run
+        if (!backendRequests.isEmpty()) {
+            BatchQueryRewriteResult rewrite = batchQueryRewriter.rewrite(backendRequests);
+            // System.out.println(rewrite);
+
+            Op newSubOp = rewrite.getOp();
+            OpService substitutedOp = new OpService(targetService, newSubOp, serviceInfo.getOpService().getSilent());
+
+            // Execute the batch request and wrap it such that ...
+            // (1) we can merge it with other backend and cache requests in the right order
+            // (2) responses are written to the cache
+            Supplier<QueryIterator> qIterSupplier = () -> {
+                QueryIterator r = opExecutor.exec(substitutedOp);
+                return r;
+            };
+
+            QueryIterator qIter = new QueryIterDefer(qIterSupplier);
+
+            // Wrap the iterator such that the items are cached
+            if (cache != null) {
+                qIter = new QueryIterWrapperCache(qIter, cacheBulkSize, cache, cacheKeyFactory, backendRequests, idxVar, targetService);
+            }
+
+            // Apply renaming after cache to avoid mismatch between op and bindings
+            qIter = QueryIter.map(qIter, rewrite.getRenames());
+
+            // Wrap the iterator further to detect resultset limit condition
+            // Wrap the query iter such that we can peek the next binding in order
+            // to decide from which iterator to take the next element
+            SubIterator<Binding, QueryIterator> backendItPrimary = IteratorFactoryWithBuffer.wrap(qIter);
+            IteratorOnClose<Binding> jenaIt = Iter.onClose(backendItPrimary, qIter::close);
+            QueryIterator iter = QueryIterPlainWrapper.create(jenaIt, execCxt);
+
+            QueryIterPeek frontIter = QueryIterPeek.create(iter, execCxt);
+
+            // Register the iterator for the backend request with all corresponding outputIds
+            for (Integer outputId : backendRequests.getItems().keySet()) {
+                SliceKey sliceKey = outputToSliceKey.get(outputId);
+                sliceKeyToIter.put(sliceKey, frontIter);
+            }
+
+            int lastOutputId = backendRequests.getItems().lastKey();
+            SliceKey lastSliceKey = outputToSliceKey.get(lastOutputId);
+            sliceKeyToClose.add(lastSliceKey); // frontIter);
+
+            backendIt = backendItPrimary;
+
+            if (bypassCacheOnFirstInput) {
+                // If we come here then a number fo bindings was handed to the client
+                // but then we weren't sure whether we can deliver any more w.r.t. the
+                // backend result set size limit - consume as many bindings already handed to the client
+                for (int i = 0; i < currentInputIdBindingsServed; ++i) {
+                    if (backendIt.hasNext()) {
+                        backendIt.next();
+                    }
+                }
+            }
+        }
+    }
+
+    protected int getOutputId(Binding binding) {
+        int result = BindingUtils.getNumber(binding, idxVar).intValue();
+        return result;
+    }
+
+    protected SliceKey getSliceKeyForOutputId(int outputId) {
+        return outputToSliceKey.get(outputId);
+    }
+}
+
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/QueryIterWrapperCache.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/QueryIterWrapperCache.java
new file mode 100644
index 0000000000..5c16565ebb
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/QueryIterWrapperCache.java
@@ -0,0 +1,307 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.NavigableMap;
+
+import org.apache.jena.atlas.logging.Log;
+import org.apache.jena.ext.com.google.common.collect.AbstractIterator;
+import org.apache.jena.ext.com.google.common.collect.Iterators;
+import org.apache.jena.ext.com.google.common.collect.Table.Cell;
+import org.apache.jena.ext.com.google.common.math.LongMath;
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingFactory;
+import org.apache.jena.sparql.service.enhancer.claimingcache.RefFuture;
+import org.apache.jena.sparql.service.enhancer.impl.util.BindingUtils;
+import org.apache.jena.sparql.service.enhancer.impl.util.IteratorUtils;
+import org.apache.jena.sparql.service.enhancer.impl.util.QueryIterSlottedBase;
+import org.apache.jena.sparql.service.enhancer.slice.api.Slice;
+import org.apache.jena.sparql.service.enhancer.slice.api.SliceAccessor;
+
+public class QueryIterWrapperCache
+    extends QueryIterSlottedBase
+{
+    protected AbstractIterator<Cell<Integer, Integer, Iterator<Binding>>> mergeLeftJoin;
+
+    protected QueryIterator inputIter;
+    protected ServiceResponseCache cache;
+
+    protected int batchSize;
+    protected Batch<Integer, PartitionRequest<Binding>> inputBatch;
+    protected Var idxVar; // CacheKeyAccessor cacheKeyAccessor;
+    protected Node serviceNode;
+
+    protected ServiceCacheKeyFactory cacheKeyFactory;
+
+    protected PartitionRequest<Binding> inputPart; // Value stored here for debugging
+
+    protected long currentOffset = 0;
+    protected long processedBindingCount = 0;
+
+    protected Iterator<Binding> currentBatchIt;
+    /** The claimed cache entry - prevents premature eviction */
+    protected RefFuture<ServiceCacheValue> claimedCacheEntry = null;
+
+    /** The accessor for writing data to the cache */
+    protected SliceAccessor<Binding[]> cacheDataAccessor = null;
+
+    protected AbstractIterator<Long> batchOutputIdIt;
+
+    public QueryIterWrapperCache(
+            QueryIterator qIter,
+            int batchSize,
+            ServiceResponseCache cache,
+            // Set<Var> joinVars,
+            // boolean isLoopJoin,
+            ServiceCacheKeyFactory cacheKeyFactory,
+            Batch<Integer, PartitionRequest<Binding>> inputBatch,
+            Var idxVar,
+            Node serviceNode
+            ) {
+        this.inputIter = qIter;
+        this.batchSize = batchSize;
+        this.cache = cache;
+        this.cacheKeyFactory = cacheKeyFactory;
+        this.inputBatch = inputBatch;
+        this.idxVar = idxVar;
+        this.serviceNode = serviceNode;
+        this.currentBatchIt = null;
+        mergeLeftJoin = IteratorUtils.partialLeftMergeJoin(
+                Iterators.concat(
+                        inputBatch.getItems().keySet().iterator(),
+                        Arrays.asList(BatchQueryRewriter.REMOTE_END_MARKER).iterator()),
+                qIter,
+                outputId -> outputId,
+                binding -> BindingUtils.getNumber(binding, idxVar).intValue()
+            );
+    }
+
+    @Override
+    protected Binding moveToNext() {
+        if (currentBatchIt == null) {
+            setupForNextLhsBinding();
+            currentBatchIt = Collections.emptyIterator();
+        }
+
+        Binding result;
+        while (true) {
+            if (currentBatchIt.hasNext()) {
+                result = currentBatchIt.next();
+                break;
+            } else {
+                prepareNextBatch();
+
+                if (!currentBatchIt.hasNext()) {
+                    closeCurrentCacheResources();
+                    result = null;
+                    break;
+                }
+            }
+        }
+        return result;
+    }
+
+    protected void setupForNextLhsBinding() {
+        closeCurrentCacheResources();
+
+        NavigableMap<Integer, PartitionRequest<Binding>> inputs = inputBatch.getItems();
+
+        if (mergeLeftJoin.hasNext()) {
+            Cell<Integer, Integer, Iterator<Binding>> peek = mergeLeftJoin.peek();
+            int outputId = peek.getColumnKey();
+
+            if (!BatchQueryRewriter.isRemoteEndMarker(outputId)) {
+
+                inputPart = inputs.get(outputId);
+                Binding inputBinding = inputPart.getPartitionKey();
+                // System.out.println("Moving to inputBinding " + inputBinding);
+
+                ServiceCacheKey cacheKey = cacheKeyFactory.createCacheKey(inputBinding);
+                // System.out.println("Writing to cache key " + cacheKey);
+
+                claimedCacheEntry = cache.getCache().claim(cacheKey);
+                ServiceCacheValue c = claimedCacheEntry.await();
+
+                Slice<Binding[]> slice = c.getSlice();
+                cacheDataAccessor = slice.newSliceAccessor();
+            }
+        }
+    }
+
+    /**
+     *
+     * The tricky part is that we first need to consume rhs and write it to the cache.
+     * When rhs is consumed a post-action that updates slice metadata has to be performed; but
+     * this action depends on the next item in lhs.
+     *
+     */
+    public void prepareNextBatch() {
+        NavigableMap<Integer, PartitionRequest<Binding>> inputs = inputBatch.getItems();
+
+        Binding[] arr = new Binding[batchSize];
+        long remainingBatchCapacity = batchSize;
+
+        // The batch of bindings under preparation - its content will also exist in the cache
+        List<Binding> clientBatch = new ArrayList<>(batchSize);
+
+        while (mergeLeftJoin.hasNext() && remainingBatchCapacity > 0) {
+            Cell<Integer, Integer, Iterator<Binding>> cell = mergeLeftJoin.peek();
+            int outputId = cell.getColumnKey();
+            Iterator<Binding> rhs = cell.getValue();
+
+            boolean isLocalEndMarker = BatchQueryRewriter.isRemoteEndMarker(outputId);
+
+            if (isLocalEndMarker) {
+                if (rhs != null) {
+                    Iterators.size(rhs); // Consume; expect 1 item
+                }
+                Iterators.size(mergeLeftJoin); // Consume; expect 1 item
+
+                // Expose the end marker
+                if (rhs != null) {
+                    clientBatch.add(BindingFactory.binding(idxVar, BatchQueryRewriter.NV_REMOTE_END_MARKER.asNode()));
+                }
+                break;
+            }
+
+            inputPart = inputs.get(outputId);
+
+            // If rhs is consumed we can only update minimum slice sizes
+            int arrLen = 0;
+            if (rhs != null) {
+                while (rhs.hasNext() && arrLen < remainingBatchCapacity) {
+                    Binding rawOutputBinding = rhs.next();
+                    clientBatch.add(rawOutputBinding);
+
+                    // Cut away the idx value for the binding in the cache
+                    Binding outputBinding = BindingUtils.project(rawOutputBinding, rawOutputBinding.vars(), idxVar);
+                    arr[arrLen++] = outputBinding;
+                }
+                remainingBatchCapacity -= arrLen;
+                processedBindingCount += arrLen;
+            }
+
+            boolean isRhsExhausted = rhs == null || !rhs.hasNext();
+
+            // Submit batch so far
+            long inputOffset = inputPart.getOffset();
+            long inputLimit = inputPart.getLimit();
+            long start = inputOffset + currentOffset;
+            long end = start + arrLen;
+
+            currentOffset += arrLen;
+            cacheDataAccessor.claimByOffsetRange(start, end);
+
+            cacheDataAccessor.lock();
+            try {
+                cacheDataAccessor.write(start, arr, 0, arrLen);
+
+                Slice<Binding[]> slice = cacheDataAccessor.getSlice();
+                // If rhs is completely empty (without any data) then only update the slice metadata
+
+                if (isRhsExhausted) {
+                    mergeLeftJoin.next();
+
+                    Cell<Integer, Integer, Iterator<Binding>> nextTuple = mergeLeftJoin.hasNext()
+                            ? mergeLeftJoin.peek()
+                            : null;
+
+                    Integer nextKey = nextTuple != null ? nextTuple.getColumnKey() : null;
+
+                    // Important: This is the server's end marker
+                    boolean peekedRemoteEndMarker = BatchQueryRewriter.isRemoteEndMarker(nextKey) && nextTuple.getValue() != null;
+
+                    if (peekedRemoteEndMarker) {
+                        Log.info(QueryIterWrapperCache.class, "Peeked end marker - result set was not cut off");
+                    }
+
+                    // Note: A key is also completed if in total fewer tuples than
+                    // the minimum known service result set size were processed
+                    boolean isKeyCompleted = (nextTuple != null && nextTuple.getValue() != null);
+
+                    // If not a single binding was delivered by the service then we certainly did not hit a result set limit
+                    // If the end marker was seen then we also did not hit a result set limit
+                    isKeyCompleted = isKeyCompleted || peekedRemoteEndMarker || processedBindingCount == 0;
+
+                    long requestEnd = inputPart.hasLimit() ? LongMath.saturatedAdd(inputOffset, inputLimit) : Long.MAX_VALUE;
+                    boolean isEndKnown = end < requestEnd;
+
+                    if (isKeyCompleted) {
+                        if (isEndKnown) {
+                            if (currentOffset > 0) {
+                                slice.mutateMetaData(metaData -> metaData.setKnownSize(end));
+                            } else {
+                                // If we saw no binding we don't know at which point the data actually ended
+                                // but the start(=end) point is an upper limit
+                                // Note: Setting the maximum size to zero will make it a known size of 0
+                                slice.mutateMetaData(metaData -> metaData.setMaximumKnownSize(end));
+                            }
+                        } else {
+                            // Data retrieval ended at a limit (e.g. we retrieved 10/10 items)
+                            // We don't know whether there is more data - but it gives a lower bound
+                            slice.mutateMetaData(metaData -> metaData.setMinimumKnownSize(end));
+                        }
+                    } else {
+                        slice.mutateMetaData(metaData -> metaData.setMinimumKnownSize(end));
+                    }
+                    currentOffset = 0;
+                }
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            } finally {
+                cacheDataAccessor.unlock();
+            }
+
+            if (isRhsExhausted) {
+                // Only initialize after unlocking the current cacheDataAccessor
+                setupForNextLhsBinding();
+            }
+        }
+
+        currentBatchIt = clientBatch.iterator();
+    }
+
+    protected void closeCurrentCacheResources() {
+        if (cacheDataAccessor != null) {
+            cacheDataAccessor.close();
+            cacheDataAccessor = null;
+        }
+
+        if (claimedCacheEntry != null) {
+            claimedCacheEntry.close();
+            claimedCacheEntry = null;
+        }
+    }
+
+    @Override
+    protected void closeIterator() {
+        closeCurrentCacheResources();
+        inputIter.close();
+
+        super.closeIterator();
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/RequestExecutor.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/RequestExecutor.java
new file mode 100644
index 0000000000..2958efcff5
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/RequestExecutor.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableMap;
+import java.util.Set;
+
+import org.apache.jena.atlas.iterator.IteratorCloseable;
+import org.apache.jena.atlas.lib.Closeable;
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingFactory;
+import org.apache.jena.sparql.engine.iterator.QueryIterConvert;
+import org.apache.jena.sparql.engine.iterator.QueryIterPeek;
+import org.apache.jena.sparql.engine.iterator.QueryIterPlainWrapper;
+import org.apache.jena.sparql.expr.NodeValue;
+import org.apache.jena.sparql.graph.NodeTransform;
+import org.apache.jena.sparql.service.enhancer.impl.util.BindingUtils;
+import org.apache.jena.sparql.service.enhancer.impl.util.QueryIterSlottedBase;
+import org.apache.jena.sparql.service.enhancer.impl.util.VarUtilsExtra;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerInit;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+
+/**
+ * Prepare and execute bulk requests
+ */
+public class RequestExecutor
+    extends QueryIterSlottedBase
+{
+    protected OpServiceInfo serviceInfo;
+
+    /**  Ensure that at least there are active requests to serve the next n input bindings */
+    protected int fetchAhead = 5;
+    protected int maxRequestSize = 2000;
+
+    protected OpServiceExecutor opExecutor;
+    protected ExecutionContext execCxt;
+    protected ServiceResultSizeCache resultSizeCache;
+    protected ServiceResponseCache cache;
+    protected CacheMode cacheMode;
+
+    protected IteratorCloseable<GroupedBatch<Node, Long, Binding>> batchIterator;
+    protected Var globalIdxVar;
+
+    // Input iteration
+    protected long currentInputId = -1;
+    protected QueryIterPeek activeIter;
+
+    protected Map<Long, Binding> inputToBinding = new HashMap<>();
+    protected Map<Long, QueryIterPeek> inputToOutputIt = new LinkedHashMap<>();
+    protected Set<Long> inputToClose = new HashSet<>(); // Whether an iterator can be closed once the input is processed
+
+    public RequestExecutor(
+            OpServiceExecutorImpl opExector,
+            // boolean useLoopJoin,
+            OpServiceInfo serviceInfo,
+            ServiceResultSizeCache resultSizeCache,
+            ServiceResponseCache cache,
+            CacheMode cacheMode,
+            IteratorCloseable<GroupedBatch<Node, Long, Binding>> batchIterator) {
+        this.opExecutor = opExector;
+        // this.useLoopJoin = useLoopJoin;
+        this.serviceInfo = serviceInfo;
+        this.resultSizeCache = resultSizeCache;
+        this.cache = cache;
+        this.cacheMode = cacheMode;
+        this.batchIterator = batchIterator;
+
+        // Allocate a fresh index var - services may be nested which results in
+        // multiple injections of an idxVar which need to be kept separate
+        Set<Var> visibleServiceSubOpVars = serviceInfo.getVisibleSubOpVarsScoped();
+        this.globalIdxVar = VarUtilsExtra.freshVar("__idx__", visibleServiceSubOpVars);
+        this.execCxt = opExector.getExecCxt();
+        this.activeIter = QueryIterPeek.create(QueryIterPlainWrapper.create(Collections.<Binding>emptyList().iterator(), execCxt), execCxt);
+    }
+
+    @Override
+    protected Binding moveToNext() {
+
+        Binding parentBinding = null;
+        Binding childBindingWithIdx = null;
+
+        // Peek the next binding on the active iterator and verify that it maps to the current
+        // partition key
+        while (true) {
+          if (activeIter.hasNext()) {
+              Binding peek = activeIter.peek();
+              long peekOutputId = BindingUtils.getNumber(peek, globalIdxVar).longValue();
+
+              boolean matchesCurrentPartition = peekOutputId == currentInputId;
+
+              if (matchesCurrentPartition) {
+                  parentBinding = inputToBinding.get(currentInputId);
+                  childBindingWithIdx = activeIter.next();
+                  break;
+              }
+          }
+
+          // Cleanup of no longer needed resources
+          boolean isClosePoint = inputToClose.contains(currentInputId);
+          if (isClosePoint) {
+              QueryIterPeek it = inputToOutputIt.get(currentInputId);
+              it.close();
+              inputToClose.remove(currentInputId);
+          }
+
+          inputToBinding.remove(currentInputId);
+
+          // Increment rangeId/inputId until we reach the end
+          ++currentInputId;
+
+          // Check if we need to load the next batch
+          // If there are missing (=non-loaded) rows within the read ahead range then load them
+          if (!inputToOutputIt.containsKey(currentInputId)) {
+              if (batchIterator.hasNext()) {
+                  prepareNextBatchExec();
+              }
+          }
+
+          // If there is still no further batch then we assume we reached the end
+          if (!inputToOutputIt.containsKey(currentInputId)) {
+              break;
+          }
+
+          activeIter = inputToOutputIt.get(currentInputId);
+      }
+
+      // Remove the idxVar from the childBinding
+      Binding result = null;
+      if (childBindingWithIdx != null) {
+          Binding childBinding = BindingUtils.project(childBindingWithIdx, childBindingWithIdx.vars(), globalIdxVar);
+          result = BindingFactory.builder(parentBinding).addAll(childBinding).build();
+      }
+
+      if (result == null) {
+          freeResources();
+      }
+
+      return result;
+    }
+
+    /** Prepare the lazy execution of the next batch and register all iterators with {@link #inputToOutputIt} */
+    // seqId = sequential number injected into the request
+    // inputId = id (index) of the input binding
+    // rangeId = id of the range w.r.t. to the input binding
+    // partitionKey = (inputId, rangeId)
+    public void prepareNextBatchExec() {
+
+        GroupedBatch<Node, Long, Binding> batchRequest = batchIterator.next();
+
+        // TODO Support ServiceOpts from Node directly
+        ServiceOpts so = ServiceOpts.getEffectiveService(serviceInfo.getOpService());
+
+        Node targetServiceNode = so.getTargetService().getService();
+
+        // Refine the request w.r.t. the cache
+        Batch<Long, Binding> batch = batchRequest.getBatch();
+
+        // This block sets up the execution of the batch
+        // For aesthetics, bindings are re-numbered starting with 0 when creating the backend request
+        // These ids are subsequently mapped back to the offset of the input iterator
+        {
+            NavigableMap<Long, Binding> batchItems = batch.getItems();
+
+            List<Binding> inputs = new ArrayList<>(batchItems.values());
+
+            NodeTransform serviceNodeRemapper = node -> ServiceEnhancerInit.resolveServiceNode(node, execCxt);
+
+            Set<Var> inputVarsMentioned = BindingUtils.varsMentioned(inputs);
+            ServiceCacheKeyFactory cacheKeyFactory = ServiceCacheKeyFactory.createCacheKeyFactory(serviceInfo, inputVarsMentioned, serviceNodeRemapper);
+
+            Set<Var> visibleServiceSubOpVars = serviceInfo.getVisibleSubOpVarsScoped();
+            Var batchIdxVar = VarUtilsExtra.freshVar("__idx__", visibleServiceSubOpVars);
+
+            BatchQueryRewriterBuilder builder = BatchQueryRewriterBuilder.from(serviceInfo, batchIdxVar);
+
+            if (ServiceEnhancerConstants.SELF.equals(targetServiceNode)) {
+                builder.setOrderRetainingUnion(true)
+                    .setSequentialUnion(true);
+            }
+
+            BatchQueryRewriter rewriter = builder.build();
+
+            QueryIterServiceBulk baseIt = new QueryIterServiceBulk(
+                    serviceInfo, rewriter, cacheKeyFactory, opExecutor, execCxt, inputs,
+                    resultSizeCache, cache, cacheMode);
+
+            QueryIterator tmp = baseIt;
+
+            // Remap the local input id of the batch to the global one here
+            Var innerIdxVar = baseIt.getIdxVar();
+            List<Long> reverseMap = new ArrayList<>(batchItems.keySet());
+
+            tmp = new QueryIterConvert(baseIt, b -> {
+                int localId = BindingUtils.getNumber(b, innerIdxVar).intValue();
+                long globalId = reverseMap.get(localId);
+
+                Binding q = BindingUtils.project(b, b.vars(), innerIdxVar);
+                Binding r = BindingFactory.binding(q, globalIdxVar, NodeValue.makeInteger(globalId).asNode());
+
+                return r;
+            }, execCxt);
+
+
+            QueryIterPeek queryIter = QueryIterPeek.create(tmp, execCxt);
+            // Register the iterator with the input ids
+            // for (int i = 0; i < batchItems.size(); ++i) {
+            for (Long e : batchItems.keySet()) {
+                inputToOutputIt.put(e, queryIter);
+            }
+
+            long lastKey = batch.getItems().lastKey();
+            inputToClose.add(lastKey);
+        }
+    }
+
+    protected void freeResources() {
+        for (long inputId  : inputToClose) {
+            Closeable closable = inputToOutputIt.get(inputId);
+            closable.close();
+        }
+        batchIterator.close();
+    }
+
+    @Override
+    protected void closeIterator() {
+        freeResources();
+        super.closeIterator();
+    }
+}
+
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/RequestScheduler.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/RequestScheduler.java
new file mode 100644
index 0000000000..c09a06a1f0
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/RequestScheduler.java
@@ -0,0 +1,212 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NavigableMap;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.TreeMap;
+import java.util.function.Function;
+
+import org.apache.jena.atlas.iterator.Iter;
+import org.apache.jena.atlas.iterator.IteratorCloseable;
+import org.apache.jena.ext.com.google.common.collect.AbstractIterator;
+
+/**
+ * Accumulates items from an input iterator into batches.
+ * Every returned batch will start with the first item index not covered by any prior batch.
+ *
+ * Parameters are:
+ * <ul>
+ * <li>maxBulkSize: The maximum number of items allowed in a batch returned by a call to next()</li>
+ * <li>maxeReadAhead: The maximum number of items allowed to read from the input iterator in order to fill a batch</li>
+ * <li>maxInputDistance: The index of items w.r.t. to the input iterator in a batch must not be farther apart than this distance</li>
+ * </ul>
+ *
+ * A batch is guaranteed to have at least one item.
+ * If any of the thresholds is exceeded a batch will have fewer items that its maximum allowed size.
+ *
+ *
+ * @param <G> group key type (e.g. service IRI)
+ * @param <I> item type (e.g. Binding)
+ */
+public class RequestScheduler<G, I> {
+    protected int maxBulkSize;
+
+    /** Allow reading at most this number of items ahead for the input iterator to completely fill
+     *  the batch request for the next response */
+    protected int maxReadAhead = 300;
+
+    /** Do not group inputs into the same batch if their ids are this (or more) of that amount apart */
+    protected int maxInputDistance = 50;
+
+    // protected Iterator<I> inputIterator;
+    protected Function<I, G> inputToGroup;
+
+    public RequestScheduler(Function<I, G> inputToGroup, int maxBulkSize) {
+        super();
+        this.inputToGroup = inputToGroup;
+        this.maxBulkSize = maxBulkSize;
+    }
+
+    public IteratorCloseable<GroupedBatch<G, Long, I>> group(IteratorCloseable<I> inputIterator) {
+        return new Grouper(inputIterator);
+    }
+
+    class Grouper
+        extends AbstractIterator<GroupedBatch<G, Long, I>> implements IteratorCloseable<GroupedBatch<G, Long, I>>
+    {
+        protected IteratorCloseable<I> inputIterator;
+
+        /** The position of the inputIterator */
+        protected long inputIteratorOffset;
+
+        /** The offset of the next item being returned */
+        protected long nextResultOffset;
+
+        protected long nextInputId;
+
+        // the outer navigable map has to lowest offset of the batch
+        protected Map<G, NavigableMap<Long, Batch<Long, I>>> groupToBatches = new HashMap<>();
+
+        // Offsets of the group keys
+        protected NavigableMap<Long, G> nextGroup = new TreeMap<>();
+
+        public Grouper(IteratorCloseable<I> inputIterator) {
+            this(inputIterator, 0);
+        }
+
+        public Grouper(IteratorCloseable<I> inputIterator, int inputIteratorOffset) {
+            super();
+            this.inputIterator = inputIterator;
+            this.inputIteratorOffset = inputIteratorOffset;
+            this.nextResultOffset = inputIteratorOffset;
+        }
+
+        @Override
+        protected GroupedBatch<G, Long, I> computeNext() {
+            G resultGroupKey = Optional.ofNullable(nextGroup.firstEntry()).map(Entry::getValue).orElse(null);
+            G lastGroupKey = null;
+
+            // Cached references
+            NavigableMap<Long, Batch<Long, I>> batches = null;
+            Batch<Long, I> batch = null;
+
+            while (inputIterator.hasNext() && inputIteratorOffset - nextResultOffset < maxReadAhead) {
+                I input = inputIterator.next();
+                G groupKey = inputToGroup.apply(input);
+
+                if (!Objects.equals(groupKey, lastGroupKey)) {
+                    lastGroupKey = groupKey;
+
+                    if (resultGroupKey == null) {
+                        resultGroupKey = groupKey;
+                    }
+
+                    batches = groupToBatches.computeIfAbsent(groupKey, x -> new TreeMap<>());
+                    if (batches.isEmpty()) {
+                        batch = BatchImpl.forLong();
+                        batches.put(inputIteratorOffset, batch);
+                        nextGroup.put(inputIteratorOffset, groupKey);
+                    } else {
+                        batch = batches.lastEntry().getValue();
+                    }
+                }
+
+                // Check whether we need to start a new request
+                // Either because the batch is full or the differences between the input ids is too great
+                long batchEndOffset = batch.getNextValidIndex();
+                long distance = nextInputId - batchEndOffset;
+
+                // If the item is consecutive add it to the list
+                int batchSize = batch.size();
+                if (distance > maxInputDistance || batchSize >= maxBulkSize) {
+                    batch = BatchImpl.forLong();
+                    batches.put(inputIteratorOffset, batch);
+                }
+                batch.put(inputIteratorOffset, input);
+                ++inputIteratorOffset;
+
+                // If the batch of the result group just became full then break
+                if (groupKey.equals(resultGroupKey) && batchSize + 1 >= maxBulkSize) {
+                    break;
+                }
+            }
+
+            // Return and remove the first batch from our data structures
+
+            GroupedBatch<G, Long, I> result;
+            Iterator<Entry<Long, G>> nextGroupIt = nextGroup.entrySet().iterator();
+            if (nextGroupIt.hasNext()) {
+                Entry<Long, G> e = nextGroupIt.next();
+                resultGroupKey = e.getValue();
+                nextGroupIt.remove();
+                nextInputId = e.getKey();
+
+                NavigableMap<Long, Batch<Long, I>> nextBatches = groupToBatches.get(resultGroupKey);
+                Iterator<Batch<Long, I>> nextBatchesIt = nextBatches.values().iterator();
+                Batch<Long, I> resultBatch = nextBatchesIt.next();
+                nextBatchesIt.remove();
+
+                result = new GroupedBatchImpl<>(resultGroupKey, resultBatch);
+            } else {
+                result = endOfData();
+            }
+            return result;
+        }
+
+        @Override
+        public void close() {
+            Iter.close(inputIterator);
+        }
+    }
+
+//
+//	public static void main(String[] args) {
+//		Var v = Var.alloc("v");
+//		Iterator<Binding> individualIt = IntStream.range(0, 10)
+//				.mapToObj(x -> BindingFactory.binding(v, NodeValue.makeInteger(x).asNode()))
+//				.iterator();
+//
+//		Op op = Algebra.compile(QueryFactory.create("SELECT * { ?v ?p ?o }"));
+//		OpService opService = new OpService(v, op, false);
+//		OpServiceInfo serviceInfo = new OpServiceInfo(opService);
+//
+//
+//		RequestScheduler<Node, Binding> scheduler = new RequestScheduler<>(b ->
+//			NodeFactory.createLiteral("group" + (NodeValue.makeNode(b.get(v)).getInteger().intValue() % 3)), 2);
+//		Iterator<ServiceBatchRequest<Node, Binding>> batchIt = scheduler.group(individualIt);
+//
+//		OpServiceExecutorImpl opExecutor = null;
+//
+//		RequestExecutor executor = new RequestExecutor(opExecutor, serviceInfo, batchIt);
+//		// executor.exec();
+//
+////		while (batchIt.hasNext()) {
+////			System.out.println(batchIt.next());
+////		}
+//
+//
+//	}
+//
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheKey.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheKey.java
new file mode 100644
index 0000000000..287092bf7b
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheKey.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Objects;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.engine.binding.Binding;
+
+public class ServiceCacheKey {
+
+    // Note: The reason why serviceNode and Op are not combined into an OpService here
+    // is because for a cache key, the serviceNode has to be concrete (i.e. substitution applied), whereas the
+    // service's subOp (here 'op') has to be as given (without substitution).
+    protected Node serviceNode;
+    protected Op op;
+    protected Binding binding;
+
+    /**
+     * Key object for service cache entries.
+     *
+     * @param serviceNode The node used with the SERVICE clause (typically an IRI).
+     * @param op A SERVICE clause's algebra expression. Typically with noremalized variable scopes.
+     * @param binding A binding holding substitutions of op's variables with concrete values.
+     */
+    public ServiceCacheKey(Node serviceNode, Op op, Binding binding) {
+        super();
+        this.serviceNode = serviceNode;
+        this.op = op;
+        this.binding = binding;
+    }
+
+    public Node getServiceNode() {
+        return serviceNode;
+    }
+
+    public Op getOp() {
+        return op;
+    }
+
+    public Binding getBinding() {
+        return binding;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(binding, op, serviceNode);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        ServiceCacheKey other = (ServiceCacheKey) obj;
+        return Objects.equals(binding, other.binding) && Objects.equals(op, other.op)
+                && Objects.equals(serviceNode, other.serviceNode);
+    }
+
+    @Override
+    public String toString() {
+        return "ServiceCacheKey [serviceNode=" + serviceNode + ", op=" + op + ", binding=" + binding + "]";
+    }
+ }
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheKeyFactory.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheKeyFactory.java
new file mode 100644
index 0000000000..a0f7f2b550
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheKeyFactory.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.jena.ext.com.google.common.collect.Sets;
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.core.Substitute;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingBuilder;
+import org.apache.jena.sparql.engine.binding.BindingFactory;
+import org.apache.jena.sparql.graph.NodeTransform;
+
+public class ServiceCacheKeyFactory
+{
+    // Needed to resolve 'self' references
+    protected OpServiceInfo serviceInfo;
+    protected Map<Var, Var> joinVarMap;
+
+    // Used to remap self-id
+    protected NodeTransform serviceNodeRemapper;
+
+    public ServiceCacheKeyFactory(
+            OpServiceInfo serviceInfo,
+            Map<Var, Var> joinVarMap,
+            NodeTransform serviceNodeRemapper) {
+        super();
+        this.serviceInfo = serviceInfo;
+        this.joinVarMap = joinVarMap;
+        this.serviceNodeRemapper = serviceNodeRemapper;
+    }
+
+    public Map<Var, Var> getJoinVarMap() {
+        return joinVarMap;
+    }
+
+    public ServiceCacheKey createCacheKey(Binding binding) {
+        Node serviceNode = Substitute.substitute(serviceInfo.getServiceNode(), binding);
+
+        Node effServiceNode = serviceNodeRemapper.apply(serviceNode);
+        if (effServiceNode == null) {
+            effServiceNode = serviceNode;
+        }
+
+        Op op = serviceInfo.getNormedQueryOp();
+
+        BindingBuilder joinbb = BindingFactory.builder();
+        binding.forEach((v, n) -> {
+            Var effectiveVar = joinVarMap.get(v);
+            if (effectiveVar != null) {
+                joinbb.add(effectiveVar, n);
+            }
+        });
+        Binding joinBinding = joinbb.build();
+
+
+        ServiceCacheKey result = new ServiceCacheKey(effServiceNode, op, joinBinding);
+        return result;
+    }
+
+    // Intersection between lhs vars and mentioned(!) rhs vars with subsequent normalization against serviceInfo
+    public static Map<Var, Var> createJoinVarMapScopedToNormed(OpServiceInfo serviceInfo, Set<Var> lhsBindingVarsScoped) {
+        Map<Var, Var> rhsVarsScopedToNormed = serviceInfo.getMentionedSubOpVarsScopedToNormed();
+        Map<Var, Var> joinVarMap = Sets.intersection(lhsBindingVarsScoped, rhsVarsScopedToNormed.keySet()).stream()
+                // .map(rhsVarsScopedToNorm::get)
+                .collect(Collectors.toMap(x -> x, rhsVarsScopedToNormed::get));
+
+        return joinVarMap;
+    }
+
+    public static ServiceCacheKeyFactory createCacheKeyFactory(
+            OpServiceInfo serviceInfo,
+            // boolean isLoopJoin,
+            Set<Var> lhsBindingVarsScoped,
+            NodeTransform serviceNodeRemapper
+            ) {
+
+
+        Map<Var, Var> joinVarMap = createJoinVarMapScopedToNormed(serviceInfo, lhsBindingVarsScoped);
+
+
+        return new ServiceCacheKeyFactory(serviceInfo, joinVarMap, serviceNodeRemapper);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheValue.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheValue.java
new file mode 100644
index 0000000000..d530b68acd
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceCacheValue.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.service.enhancer.slice.api.Slice;
+
+public class ServiceCacheValue {
+    // The id of the cache entry
+    protected long id;
+
+    // Some slice construction
+    protected Slice<Binding[]> slice;
+
+    public ServiceCacheValue(long id, Slice<Binding[]> slice) {
+        super();
+        this.id = id;
+        this.slice = slice;
+    }
+
+    public long getId() {
+        return id;
+    }
+
+    public Slice<Binding[]> getSlice() {
+        return slice;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceOpts.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceOpts.java
new file mode 100644
index 0000000000..a9c18878c9
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceOpts.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.AbstractMap.SimpleEntry;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.ListIterator;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.graph.NodeFactory;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+
+/**
+ * Utilities to exploit url scheme pattern to represent key value pairs.
+ * <a href="https://datatracker.ietf.org/doc/html/rfc3986">RFC3986</a> only
+ * allows for a very limited set of characters:
+ * <pre>
+ * scheme      = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
+ * </pre>
+ *
+ * For this reason '+' is used where usually '=' would be used.
+ * Separator is ':'.
+ *
+ * Examples:
+ * <pre>
+ * SERVICE &lt;cache:&gt; {} Trailing colon is needed to discriminate from relative IRIs. Resulting map: {cache=null}
+ * SERVICE &lt;cache:bulk+20&gt; {cache=null, bulk=20}
+ * </pre>
+ */
+public class ServiceOpts {
+    // Use ':' as a separator unless it is preceeded by the escape char '\'
+    private static final Pattern SPLIT_PATTERN = Pattern.compile("(?<!(esc))(?:(esc){2})*(sep)"
+            .replace("(esc)", ":")
+            .replace("(sep)", ":"));
+
+    // Service options
+    public static final String SO_OPTIMIZE = "optimize";
+    public static final String SO_CACHE = "cache";
+    public static final String SO_BULK = "bulk";
+
+    // Undo scoping of variables
+    public static final String SO_LOOP = "loop";
+
+    protected OpService opService;
+    protected List<Entry<String, String>> options;
+
+    public ServiceOpts(OpService opService, List<Entry<String, String>> options) {
+        super();
+        this.opService = opService;
+        this.options = options;
+    }
+
+    public OpService getTargetService() {
+        return opService;
+    }
+
+    public ServiceOpts copy() {
+        return new ServiceOpts(opService, new ArrayList<>(options));
+    }
+
+    public void add(String key, String value) {
+        options.add(new SimpleEntry<>(key, value));
+    }
+
+    /** Removes all occurrences of a key */
+    public void removeKey(Object key) {
+        Iterator<? extends Entry<?, ?>> it = options.iterator();
+        while (it.hasNext()) {
+            Entry<?, ?> e = it.next();
+            if (Objects.equals(e.getKey(), key)) {
+                it.remove();
+            }
+        }
+    }
+
+    public List<Entry<String, String>> getOptions() {
+        return options;
+    }
+
+    public boolean containsKey(Object key) {
+        boolean result = options.stream().anyMatch(e -> Objects.equals(e.getKey(), key));
+        return result;
+    }
+
+    /**
+     * Find a key's first value in the list of options
+     *
+     * @param key The key to find in the options
+     * @param valueIfNull The value to return if the key is present with a null value
+     * @param valueIfAbsent The value to return if the key is absent
+     */
+    public String getFirstValue(Object key, String valueIfNull, String valueIfAbsent) {
+        String result = options.stream()
+                .filter(e -> Objects.equals(e.getKey(), key))
+                .map(e -> {
+                    String r = e.getValue();
+                    if (r == null) {
+                        r = valueIfNull;
+                    }
+                    return r;
+                })
+                .findFirst()
+                .orElse(valueIfAbsent);
+        return result;
+    }
+
+    /** Encode the options as a OpService */
+    public OpService toService() {
+        OpService result;
+        if (options.isEmpty()) {
+            result = opService;
+        } else {
+            Node node = opService.getService();
+            String prefixStr = ServiceOpts.unparse(options);
+            if (!node.isURI()) {
+                Node uri = NodeFactory.createURI(prefixStr);
+                result = new OpService(uri, opService, false);
+            } else {
+                Node uri = NodeFactory.createURI(prefixStr + node.getURI());
+                result = new OpService(uri, opService.getSubOp(), false);
+            }
+        }
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "ServiceOpts [options=" + options + ", opService=" + opService + "]";
+    }
+
+    public static List<Entry<String, String>> parseAsOptions(Node node) {
+        String iri = node.isURI() ? node.getURI() : null;
+        List<Entry<String, String>> result = iri == null ? null : parseAsOptions(iri);
+        return result;
+    }
+
+    /** Split an iri by ':' and attempt to parse the splits as key=value pairs. */
+    public static List<Entry<String, String>> parseAsOptions(String iri) {
+        List<Entry<String, String>> result = new ArrayList<>();
+        String[] rawSplits = SPLIT_PATTERN.split(iri);
+        for (String rawSplit : rawSplits) {
+            String split = rawSplit.replace("\\\\", "\\");
+            String[] kv = split.split("\\+", 2);
+            result.add(new SimpleEntry<>(kv[0], kv.length == 2 ? kv[1] : null));
+        }
+
+        return result;
+    }
+
+    public static String escape(String str) {
+        String result = str.replace("\\", "\\\\").replace(":", "\\:");
+        return result;
+    }
+
+    /** Convert a list of options back into an escaped string */
+    public static String unparse(List<Entry<String, String>> optionList) {
+        String result = optionList.stream()
+            .map(e -> escape(e.getKey()) + (Optional.ofNullable(e.getValue()).map(v -> "+" + escape(v)).orElse("")))
+            .collect(Collectors.joining(":"));
+
+        ListIterator<? extends Entry<String, String>> it = optionList.listIterator(optionList.size());
+        if (it.hasPrevious()) {
+            Entry<String, String> lastEntry = it.previous();
+            if (isKnownOption(lastEntry.getKey())) {
+                result += ":";
+            }
+        }
+
+            //.collect(Collectors.joining(":"));
+        return result;
+    }
+
+    public static boolean isKnownOption(String key) {
+        Set<String> knownOptions = new LinkedHashSet<>();
+        knownOptions.add(SO_CACHE);
+        knownOptions.add(SO_BULK);
+        knownOptions.add(SO_LOOP);
+        knownOptions.add(SO_OPTIMIZE);
+
+        return knownOptions.contains(key);
+    }
+
+    public static ServiceOpts getEffectiveService(OpService opService) {
+        List<Entry<String, String>> opts = new ArrayList<>();
+        OpService currentOp = opService;
+        boolean isSilent;
+        String serviceStr = null;
+
+        while (true) {
+            isSilent = currentOp.getSilent();
+            Node node = currentOp.getService();
+            List<Entry<String, String>> parts = ServiceOpts.parseAsOptions(node);
+
+            if (parts == null) { // node is not an iri
+                break;
+            }
+
+            // If there are only options then check whether to merge with a sub service op
+            // If there is none then append 'self'
+            int n = parts.size();
+            int i = 0;
+            for (; i < n; ++i) {
+                Entry<String, String> e = parts.get(i);
+                String key = e.getKey();
+
+                if (isKnownOption(key)) {
+                    opts.add(e);
+                } else {
+                    break;
+                }
+            }
+
+            List<Entry<String, String>> subList = parts.subList(i, n);
+            serviceStr = ServiceOpts.unparse(subList);
+            if (serviceStr.isEmpty()) {
+                Op subOp = opService.getSubOp();
+                if (subOp instanceof OpService) {
+                    currentOp = (OpService)subOp;
+                } else {
+                    serviceStr = ServiceEnhancerConstants.SELF.getURI();
+                    break;
+                }
+            } else {
+                break;
+            }
+        }
+
+        ServiceOpts result = opts.isEmpty()
+                ? new ServiceOpts(opService, opts)
+                : new ServiceOpts(new OpService(NodeFactory.createURI(serviceStr), currentOp.getSubOp(), isSilent), opts);
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceResponseCache.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceResponseCache.java
new file mode 100644
index 0000000000..d80a579b9f
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceResponseCache.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.jena.atlas.logging.Log;
+import org.apache.jena.ext.com.google.common.cache.CacheBuilder;
+import org.apache.jena.query.ARQ;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.service.enhancer.claimingcache.AsyncClaimingCache;
+import org.apache.jena.sparql.service.enhancer.claimingcache.AsyncClaimingCacheImplGuava;
+import org.apache.jena.sparql.service.enhancer.claimingcache.RefFuture;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.service.enhancer.slice.api.ArrayOps;
+import org.apache.jena.sparql.service.enhancer.slice.api.Slice;
+import org.apache.jena.sparql.service.enhancer.slice.impl.SliceInMemoryCache;
+import org.apache.jena.sparql.util.Context;
+
+public class ServiceResponseCache {
+    // Default parameters (can cache up to 150K bindings for 300 queries amounting to up to 45M bindings)
+    public static final int DFT_MAX_ENTRY_COUNT = 300;
+    public static final int DFT_PAGE_SIZE = 10000;
+    public static final int DFT_MAX_PAGE_COUNT = 15;
+
+    // service / op / joinVars / binding / idx
+    protected AsyncClaimingCache<ServiceCacheKey, ServiceCacheValue> cache;
+
+    protected AtomicLong entryCounter = new AtomicLong(0l);
+
+    /** Secondary index over cache keys */
+    protected Map<Long, ServiceCacheKey> idToKey = new ConcurrentHashMap<>();
+
+    public ServiceResponseCache() {
+        this(DFT_MAX_ENTRY_COUNT, DFT_PAGE_SIZE, DFT_MAX_PAGE_COUNT);
+    }
+
+    public ServiceResponseCache(int maxCacheSize, int pageSize, int maxPageCount) {
+        //super();
+        AsyncClaimingCacheImplGuava.Builder<ServiceCacheKey, ServiceCacheValue> builder =
+                AsyncClaimingCacheImplGuava.newBuilder(CacheBuilder.newBuilder().maximumSize(maxCacheSize));
+        builder = builder
+                .setCacheLoader(key -> {
+                    long id = entryCounter.getAndIncrement();
+                    idToKey.put(id, key);
+                    Slice<Binding[]> slice = SliceInMemoryCache.create(ArrayOps.createFor(Binding.class), pageSize, maxPageCount);
+                    ServiceCacheValue r = new ServiceCacheValue(id, slice);
+                    Log.debug(ServiceResponseCache.class, "Loaded cache entry: " + id);
+                    return r;
+                })
+                .setAtomicRemovalListener(n -> {
+                    // We are not yet handling cancellation of loading a key; in that case the value may not yet be available
+                    // Handle it here here with null for v?
+                    ServiceCacheValue v = n.getValue();
+                    if (v != null) {
+                        long id = v.getId();
+                        Log.debug(ServiceResponseCache.class, "Removed cache entry: " + id);
+                        idToKey.remove(id);
+                    }
+                });
+        cache = builder.build();
+    }
+
+    public AsyncClaimingCache<ServiceCacheKey, ServiceCacheValue> getCache() {
+        return cache;
+    }
+
+    public RefFuture<ServiceCacheValue> claim(ServiceCacheKey key) {
+        return cache.claim(key);
+    }
+
+    public Map<Long, ServiceCacheKey> getIdToKey() {
+        return idToKey;
+    }
+
+    public void invalidateAll() {
+        cache.invalidateAll();
+    }
+
+    /** Return the global instance (if any) in ARQ.getContex() */
+    public static ServiceResponseCache get() {
+        return get(ARQ.getContext());
+    }
+
+    public static ServiceResponseCache get(Context cxt) {
+        return cxt.get(ServiceEnhancerConstants.serviceCache);
+    }
+
+    public static void set(Context cxt, ServiceResponseCache cache) {
+        cxt.put(ServiceEnhancerConstants.serviceCache, cache);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceResultSizeCache.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceResultSizeCache.java
new file mode 100644
index 0000000000..b2113acd31
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/ServiceResultSizeCache.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import org.apache.jena.ext.com.google.common.cache.Cache;
+import org.apache.jena.ext.com.google.common.cache.CacheBuilder;
+import org.apache.jena.graph.Node;
+import org.apache.jena.query.ARQ;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.util.Context;
+
+/**
+ * A mapping of service IRIs to result set size limits.
+ * A flag indicates whether the limit is a lower bound or exact.
+ */
+public class ServiceResultSizeCache {
+    // The estimate should should never be higher than the actual limit
+    protected Cache<Node, Estimate<Long>> serviceToLimit = CacheBuilder.newBuilder()
+            .maximumSize(10000).build(); // new ConcurrentHashMap<>(); // new LinkedHashMap<>();
+
+    public Estimate<Long> getLimit(Node service) {
+        Estimate<Long> result = serviceToLimit.getIfPresent(service);
+        if (result == null) {
+            result = new Estimate<>(0l, false);
+        }
+        return result;
+    }
+
+    public void updateLimit(Node service, Estimate<Long> estimate) {
+//        Log.debug(ServiceResultSizeCache.class, "Setting backend result set limit for " + service + " to " + estimate);
+//        if (estimate.getValue() < 2) {
+//            System.err.println("Should not happen");
+//        }
+
+        serviceToLimit.put(service, estimate);
+    }
+
+    public void invalidateAll() {
+        serviceToLimit.invalidateAll();
+    }
+
+    /** Return the global instance (if any) in ARQ.getContex() */
+    public static ServiceResultSizeCache get() {
+        return get(ARQ.getContext());
+    }
+
+    public static ServiceResultSizeCache get(Context cxt) {
+        return cxt.get(ServiceEnhancerConstants.serviceResultSizeCache);
+    }
+
+    public static void set(Context cxt, ServiceResultSizeCache cache) {
+        cxt.put(ServiceEnhancerConstants.serviceResultSizeCache, cache);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/SliceKey.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/SliceKey.java
new file mode 100644
index 0000000000..9c9e6d6dc0
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/SliceKey.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl;
+
+import java.util.Objects;
+
+/**
+ * A key object capturing the id of an input binding and an id for a
+ * corresponding consecutive range of data which needs to be accessed/retrieved.
+ */
+public class SliceKey
+    implements Comparable<SliceKey>
+{
+    protected int inputId;
+    protected int rangeId;
+
+    public SliceKey(int inputId, int rangeId) {
+        super();
+        this.inputId = inputId;
+        this.rangeId = rangeId;
+    }
+
+    public int getInputId() {
+        return inputId;
+    }
+
+    public int getRangeId() {
+        return rangeId;
+    }
+
+    @Override
+    public int compareTo(SliceKey o) {
+        int result = inputId == o.inputId
+                ? o.rangeId - rangeId
+                : o.inputId - inputId;
+        return result;
+    }
+
+    @Override
+    public String toString() {
+        return "SliceKey [inputId=" + inputId + ", rangeId=" + rangeId + "]";
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(inputId, rangeId);
+    }
+
+    @Override
+    public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+        if (obj == null)
+            return false;
+        if (getClass() != obj.getClass())
+            return false;
+        SliceKey other = (SliceKey) obj;
+        return inputId == other.inputId && rangeId == other.rangeId;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/AutoCloseableBase.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/AutoCloseableBase.java
new file mode 100644
index 0000000000..18c1fbc171
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/AutoCloseableBase.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+public class AutoCloseableBase
+    implements AutoCloseable
+{
+    protected volatile boolean isClosed = false;
+
+    /**
+     * To be called within synchronized functions
+     */
+    protected void ensureOpen() {
+        if (isClosed) {
+            throw new RuntimeException("Object already closed");
+        }
+    }
+
+    protected void closeActual() throws Exception {
+        // Nothing to do here; override if needed
+    }
+
+    @Override
+    public final void close() {
+        if (!isClosed) {
+            synchronized (this) {
+                if (!isClosed) {
+                    isClosed = true;
+
+                    try {
+                        closeActual();
+                    } catch (Exception e) {
+                        throw new RuntimeException(e);
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/AutoCloseableWithLeakDetectionBase.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/AutoCloseableWithLeakDetectionBase.java
new file mode 100644
index 0000000000..95a0269e5f
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/AutoCloseableWithLeakDetectionBase.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A base implementation of AutoClosable that helps detecting resource leaks.
+ * Creation of an instance of this class captures a snapshot of the stack trace.
+ * If finalize is called (typically only by the GC) and there was no prior call to close then
+ * a warning including the stack trace is logged.
+ *
+ * Implementing classes should override {@link #closeActual()} rather than
+ * {@link #close()}.
+ */
+public class AutoCloseableWithLeakDetectionBase
+    extends AutoCloseableBase
+{
+    private static final Logger logger = LoggerFactory.getLogger(AutoCloseableWithLeakDetectionBase.class);
+
+    protected final StackTraceElement[] instantiationStackTrace = StackTraceUtils.getStackTraceIfEnabled();
+
+    public StackTraceElement[] getInstantiationStackTrace() {
+        return instantiationStackTrace;
+    }
+
+    @SuppressWarnings("deprecation")
+    @Override
+    protected void finalize() throws Throwable {
+        try {
+            if (!isClosed) {
+                String str = StackTraceUtils.toString(instantiationStackTrace);
+
+                logger.warn("Close invoked via GC rather than user logic - indicates resource leak. Object constructed at " + str);
+
+                close();
+            }
+        } finally {
+            super.finalize();
+        }
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/BindingUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/BindingUtils.java
new file mode 100644
index 0000000000..5c7d32fc13
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/BindingUtils.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+
+import org.apache.jena.atlas.iterator.Iter;
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingBuilder;
+import org.apache.jena.sparql.expr.ExprEvalException;
+import org.apache.jena.sparql.graph.NodeTransformLib;
+import org.apache.jena.sparql.syntax.syntaxtransform.NodeTransformSubst;
+
+public class BindingUtils {
+    /** Return a new binding that projects all variables having non-null values */
+    public static Binding project(Binding binding, Iterator<Var> vars) {
+        BindingBuilder builder = BindingBuilder.create();
+
+        while (vars.hasNext()) {
+            Var var = vars.next();
+            Node node = binding.get(var);
+            if (node != null) {
+                builder.add(var, node);
+            }
+        }
+
+        return builder.build();
+    }
+
+    /** Return a new binding with all non-null-valued and non-excluded variables projected */
+    public static Binding project(Binding binding, Iterator<Var> vars, Var exclusion) {
+        return project(binding, Iter.notFilter(vars, v -> Objects.equals(v, exclusion)));
+    }
+
+    /** Return a new binding with all non-null-valued and non-excluded variables projected */
+    public static Binding project(Binding binding, Iterator<Var> vars, Set<Var> exclusions) {
+        return project(binding, Iter.notFilter(vars, exclusions::contains));
+    }
+
+    public static <C extends Collection<Var>> C addAll(C acc, Binding binding) {
+        Iterator<Var> it = binding.vars();
+        while (it.hasNext()) {
+            Var v = it.next();
+            acc.add(v);
+        }
+        return acc;
+    }
+
+    public static <C extends Collection<Var>> C varsMentioned(C out, Iterator<Binding> it) {
+        while (it.hasNext()) {
+            Binding b = it.next();
+            CollectionUtils.addAll(out, b.vars());
+        }
+        return out;
+    }
+
+    /** Transform the keys of a binding w.r.t. the given varMap */
+    public static Binding renameKeys(Binding binding, Map<Var, Var> varMap) {
+        return NodeTransformLib.transform(binding, new NodeTransformSubst(varMap));
+    }
+
+    /** Return the set of all variables mentioned in a collection of bindings */
+    public static Set<Var> varsMentioned(Iterable<Binding> bindings) {
+        Set<Var> result = new LinkedHashSet<>();
+        return varsMentioned(result, bindings.iterator());
+    }
+
+    public static Set<Var> varsMentioned(Binding binding) {
+        Set<Var> result = new LinkedHashSet<>();
+        binding.vars().forEachRemaining(result::add);
+        return result;
+    }
+
+    /** Attempt to extract a number from a binding and a variable.
+     * If the node value is null then null is returned.
+     * If the node value is not a number literal then an {@link ExprEvalException} is raised. */
+    public static Number getNumberOrNull(Binding binding, Var var) {
+        Node node = binding.get(var);
+        Number result = NodeUtilsExtra.getNumberOrNull(node);
+        return result;
+    }
+
+    /** Get a binding's values for var as a number using {@link #getNumberOrNull(Binding, Var)}.
+     * Raises an {@link NullPointerException} if no number can be obtained */
+    public static Number getNumber(Binding binding, Var var) {
+        return Objects.requireNonNull(getNumberOrNull(binding, var), "Number must not be null");
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/CollectionUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/CollectionUtils.java
new file mode 100644
index 0000000000..74f5131a96
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/CollectionUtils.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Collection;
+import java.util.Iterator;
+
+public class CollectionUtils {
+    /** Add all items from an iterator to a collection and return that collection */
+    public static <T, C extends Collection<T>> C addAll(C acc, Iterator<T> it) {
+        while (it.hasNext()) {
+            T item = it.next();
+            acc.add(item);
+        }
+        return acc;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/FinallyRunAll.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/FinallyRunAll.java
new file mode 100644
index 0000000000..a6484c79b5
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/FinallyRunAll.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.Callable;
+
+/**
+ * Force all actions in a list to run.
+ *
+ * Usage:
+ * <pre>{@code
+ * FinallyAll.run(
+ *   () -> action1(),
+ *   () -> action2(),
+ *   () -> actionN()
+ * );
+ * }</pre>
+ *
+ * This is more succinct than nested finally blocks such as:
+ * <pre>{@code
+ * try { action1(); } finally {
+ *   try { action2(); } finally {
+ *     try { actionM(); } finally {
+ *       actionN();
+ *     }
+ *   }
+ * }
+ * }</pre>
+ *
+ */
+public class FinallyRunAll
+    implements Runnable
+{
+    protected List<ThrowingRunnable> actions;
+
+    public static FinallyRunAll create() {
+        return new FinallyRunAll();
+    }
+
+    public FinallyRunAll() {
+        this(new ArrayList<>());
+    }
+
+    public FinallyRunAll(List<ThrowingRunnable> actions) {
+        super();
+        this.actions = actions;
+    }
+
+    public void addThrowing(ThrowingRunnable action) {
+        actions.add(action);
+    }
+
+    public void add(Callable<?> callable) {
+        addThrowing(() -> { callable.call(); });
+    }
+
+    public void add(Runnable runnable) {
+        addThrowing(runnable::run);
+    }
+
+    @Override
+    public void run() {
+        runAction(0);
+    }
+
+    protected void runAction(int index) {
+        if (index < actions.size()) {
+            ThrowingRunnable action = actions.get(index);
+            try {
+                action.run();
+            } catch (Exception e) {
+                throw new RuntimeException(e);
+            } finally {
+                runAction(index + 1);
+            }
+        }
+    }
+
+    public static void run(ThrowingRunnable ... actions) {
+        new FinallyRunAll(Arrays.asList(actions)).run();
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/GraphUtilsExtra.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/GraphUtilsExtra.java
new file mode 100644
index 0000000000..a045139a92
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/GraphUtilsExtra.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Optional;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.rdf.model.Property;
+import org.apache.jena.rdf.model.RDFNode;
+import org.apache.jena.rdf.model.Resource;
+import org.apache.jena.sparql.util.graph.GraphUtils;
+
+public class GraphUtilsExtra {
+    public static Number getAsNumber(Resource resource, Property property) {
+        Number result = null;
+        RDFNode rdfNode = GraphUtils.getAsRDFNode(resource, property);
+        if (rdfNode != null) {
+            Node node = rdfNode.asNode();
+            result = NodeUtilsExtra.getNumberOrNull(node);
+        }
+        return result;
+    }
+
+    public static int getAsInt(Resource resource, Property property, int fallback) {
+        return Optional.ofNullable(getAsNumber(resource, property)).map(Number::intValue).orElse(fallback);
+    }
+
+    public static long getAsLong(Resource resource, Property property, long fallback) {
+        return Optional.ofNullable(getAsNumber(resource, property)).map(Number::longValue).orElse(fallback);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/IteratorUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/IteratorUtils.java
new file mode 100644
index 0000000000..80ea354ddd
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/IteratorUtils.java
@@ -0,0 +1,195 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.AbstractMap.SimpleEntry;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.function.Function;
+
+import org.apache.jena.atlas.iterator.Iter;
+import org.apache.jena.ext.com.google.common.collect.AbstractIterator;
+import org.apache.jena.ext.com.google.common.collect.Table.Cell;
+import org.apache.jena.ext.com.google.common.collect.Tables;
+
+public class IteratorUtils {
+
+    /**
+     * For every item in lhs return an iterator over the corresponding sub-range of items in rhs.
+     * As soon as rhs is consumed, then for every remaining key in lhs the iterator will be null.
+     *
+     * Keys in both iterators must appear in the same order - although rhs may omit some or all keys.
+     * There MUST NOT exist an item in rhs which has a key that does not correspond to an item in lhs!
+     */
+    public static <K, X, Y> AbstractIterator<Cell<K, X, Iterator<Y>>> partialLeftMergeJoin(
+            Iterator<X> lhs,
+            Iterator<Y> rhs,
+            Function<X, K> xToK,
+            Function<Y, K> yToK) {
+
+        // View rhs as an iterator over (key, value) pairs w.r.t. vToK
+        Iterator<Entry<K, X>> lhsKvIt = Iter.map(lhs, x -> new SimpleEntry<>(xToK.apply(x), x));
+
+        Iterator<Entry<K, Y>> rawRhsKvIt = Iter.map(rhs, v -> new SimpleEntry<>(yToK.apply(v), v));
+        PeekIteratorLazy<Entry<K, Y>> rhsKvIt = PeekIteratorLazy.create(rawRhsKvIt);
+
+        // TODO We should add sanity checks if rhs contain keys not in lhs
+        AbstractIterator<Cell<K, X, Iterator<Y>>> result = new AbstractIterator<>() {
+            @Override
+            protected Cell<K, X, Iterator<Y>> computeNext() {
+                // TODO If there is a prior rhs iterator then consume it
+
+                Cell<K, X, Iterator<Y>> r;
+
+                if (lhsKvIt.hasNext()) {
+                    Entry<K, X> lhsE = lhsKvIt.next();
+                    K lhsK = lhsE.getKey();
+                    X x = lhsE.getValue();
+
+                    // For every lhs key create a sub iterator over consecutive items in rhs having key lhsK
+                    Iterator<Y> rhsSubIt = null;
+                    if (rhsKvIt.hasNext()) {
+                        Entry<K, Y> e = rhsKvIt.peek();
+                        K rhsK = e.getKey();
+
+                        if (Objects.equals(lhsK, rhsK)) {
+                            rhsSubIt = new AbstractIterator<>() {
+                                @Override
+                                protected Y computeNext() {
+                                    Y rhsR;
+                                    if (rhsKvIt.hasNext()) {
+                                        Entry<K, Y> subE = rhsKvIt.peek();
+                                        K subK = subE.getKey();
+                                        if (Objects.equals(lhsK, subK)) {
+                                            rhsR = subE.getValue();
+                                            rhsKvIt.next();
+                                        } else {
+                                            rhsR = endOfData();
+                                        }
+                                    } else {
+                                        rhsR = endOfData();
+                                    }
+                                    return rhsR;
+                                }
+                            };
+                        } else {
+                            rhsSubIt = Collections.emptyIterator();
+                        }
+                    } else {
+                        // Return null to indicate that there will be no more value for rhs
+                    }
+
+                    r = Tables.immutableCell(lhsK, x, rhsSubIt);
+                } else {
+                    r = endOfData();
+                }
+
+                return r;
+            }
+        };
+        return result;
+    }
+
+
+    public static void mainBasic() {
+        List<Integer> lhs = Arrays.asList(1, 4, 5, 8, 9, 11, 12);
+        List<Entry<Integer, String>> rhs = new ArrayList<>();
+        rhs.add(new SimpleEntry<>(4, "fourA"));
+        rhs.add(new SimpleEntry<>(4, "fourB"));
+        rhs.add(new SimpleEntry<>(8, "eightA"));
+        rhs.add(new SimpleEntry<>(8, "eightB"));
+        rhs.add(new SimpleEntry<>(9, "nineA"));
+
+        Iterator<Cell<Object, Integer, Iterator<Entry<Integer, String>>>> it = partialLeftMergeJoin(lhs.iterator(), rhs.iterator(), x -> x, Entry::getKey);
+
+        while (it.hasNext()) {
+            Cell<Object, Integer, Iterator<Entry<Integer, String>>> cell = it.next();
+            System.out.println(cell.getColumnKey() + ":");
+
+            Iterator<Entry<Integer, String>> subIt = cell.getValue();
+            if (subIt == null) {
+                System.out.println("  No more items");
+            } else {
+                subIt.forEachRemaining(x -> System.out.println("  " + x));
+            }
+        }
+/*
+ * expected:
+1:
+4:
+  4=fourA
+  4=fourB
+5:
+8:
+  8=eightA
+  8=eightB
+9:
+  9=nineA
+11:
+  No more items
+12:
+  No more items
+*/
+    }
+
+
+    // mainWithEndMarker
+    public static void main(String[] args) {
+        List<Integer> lhs = Arrays.asList(1, 4, 5, 8, 9, 11, 12, 666);
+        List<Entry<Integer, String>> rhs = new ArrayList<>();
+        rhs.add(new SimpleEntry<>(4, "fourA"));
+        rhs.add(new SimpleEntry<>(4, "fourB"));
+        rhs.add(new SimpleEntry<>(8, "eightA"));
+        rhs.add(new SimpleEntry<>(8, "eightB"));
+        rhs.add(new SimpleEntry<>(9, "nineA"));
+        rhs.add(new SimpleEntry<>(666, "endMarker"));
+
+        AbstractIterator<Cell<Object, Integer, Iterator<Entry<Integer, String>>>> it = partialLeftMergeJoin(lhs.iterator(), rhs.iterator(), x -> x, Entry::getKey);
+
+        while (it.hasNext()) {
+            Cell<Object, Integer, Iterator<Entry<Integer, String>>> cell = it.next();
+            Integer key = cell.getColumnKey();
+
+            System.out.println(key + ":");
+
+            Iterator<Entry<Integer, String>> subIt = cell.getValue();
+            if (subIt == null) {
+                System.out.println("  No more items");
+            } else {
+                subIt.forEachRemaining(x -> System.out.println("  " + x));
+
+                if (Objects.equals(key, 666)) {
+                    System.out.println("End marker reached");
+                }
+            }
+        }
+/*
+ * expected:
+...
+666:
+  666=endMarker
+End marker reached
+*/
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/LockUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/LockUtils.java
new file mode 100644
index 0000000000..9d154048aa
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/LockUtils.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.time.Duration;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.locks.Lock;
+import java.util.function.Consumer;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class LockUtils {
+    private static final Logger logger = LoggerFactory.getLogger(LockUtils.class);
+
+    /**
+     * Perform an action which requires acquisition of a lock first.
+     * An attempt is made to acquire the lock. If this fails then the action is not run.
+     * Upon completion of the action (successful or exceptional) the lock is released again.
+     */
+    public static <T> T runWithLock(Lock lock, Callable<T> action) {
+        T result = null;
+        try {
+            lock.lock();
+            result = action.call();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        } finally {
+            lock.unlock();
+        }
+        return result;
+    }
+
+    /** Run an action after locking; eventually the lock is unlocked in a finally block */
+    public static void runWithLock(Lock lock, ThrowingRunnable action) {
+        runWithLock(lock, () -> { action.run(); return null; });
+    }
+
+    /**
+     * Run this action with a short-lived locked. If the lock cannot be acquired
+     * within the given time it is considered stale and forcibly unlocked.
+     * Subsequently another attempt is made to acquire the lock.
+     */
+    public static <T, L extends Lock> T runWithMgmtLock(
+            L lock,
+            Consumer<? super L> forceUnlock,
+            Duration duration,
+            Callable<T> action) {
+        T result = null;
+        try {
+            long timeout = duration.toMillis();
+            boolean isLocked;
+            if (!(isLocked = lock.tryLock(timeout, TimeUnit.MILLISECONDS))) {
+
+                logger.warn(String.format("Forcibly unlocking stale lock %s", lock));
+                forceUnlock.accept(lock);
+
+                isLocked = lock.tryLock(timeout, TimeUnit.MILLISECONDS);
+                if (!isLocked) {
+                    throw new RuntimeException("Failed to acquire lock despite forced unlocking");
+                }
+            }
+
+            result = action.call();
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        } finally {
+            lock.unlock();
+        }
+        return result;
+    }
+
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/NodeUtilsExtra.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/NodeUtilsExtra.java
new file mode 100644
index 0000000000..72fa73eb63
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/NodeUtilsExtra.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.expr.ExprEvalTypeException;
+
+public class NodeUtilsExtra {
+    public static Number getNumberOrNull(Node node) {
+        Number result = null;
+        if (node != null) {
+            Object obj = node.getLiteralValue();
+            if (!(obj instanceof Number)) {
+                throw new ExprEvalTypeException("Not a number");
+            }
+            result = ((Number)obj);
+        }
+
+        return result;
+    }
+
+    public static Number getNumberOrDefault(Node node, Number fallback) {
+        Number result = getNumberOrNull(node);
+        if (result == null) {
+            result = fallback;
+        }
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PageUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PageUtils.java
new file mode 100644
index 0000000000..314caf0b9c
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PageUtils.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Collection;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.stream.Collectors;
+import java.util.stream.LongStream;
+
+import org.apache.jena.ext.com.google.common.collect.ContiguousSet;
+import org.apache.jena.ext.com.google.common.collect.DiscreteDomain;
+import org.apache.jena.ext.com.google.common.collect.Range;
+
+/**
+ * Utility methods for working with (fixed-size) pages.
+ */
+public class PageUtils {
+    public static long getPageIndexForOffset(long offset, long pageSize) {
+        return offset / pageSize;
+    }
+
+    public static long getIndexInPage(long offset, long pageSize) {
+        return offset % pageSize;
+    }
+
+    public static int getIndexInPage(long offset, int pageSize) {
+        return (int)(offset % pageSize);
+    }
+
+    public static long getPageOffsetForId(long pageId, long pageSize) {
+        return pageId * pageSize;
+    }
+
+    /** Return a stream of the page indices touched by the range w.r.t. the page size */
+    public static LongStream touchedPageIndices(Range<Long> range, long pageSize) {
+        ContiguousSet<Long> set = ContiguousSet.create(range, DiscreteDomain.longs());
+        LongStream result = set.isEmpty()
+                ? LongStream.empty()
+                : LongStream.rangeClosed(
+                        getPageIndexForOffset(set.first(), pageSize),
+                        getPageIndexForOffset(set.last(), pageSize));
+        return result;
+    }
+
+    public static NavigableSet<Long> touchedPageIndices(Collection<Range<Long>> ranges, long pageSize) {
+        NavigableSet<Long> result = ranges.stream()
+            .flatMapToLong(range -> PageUtils.touchedPageIndices(range, pageSize))
+            .boxed()
+            .collect(Collectors.toCollection(TreeSet::new));
+
+        return result;
+    }
+
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PeekIteratorLazy.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PeekIteratorLazy.java
new file mode 100644
index 0000000000..2b4c5548df
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PeekIteratorLazy.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Iterator;
+import java.util.Objects;
+
+import org.apache.jena.ext.com.google.common.collect.AbstractIterator;
+import org.apache.jena.ext.com.google.common.collect.PeekingIterator;
+
+/** The atlas version does active read ahead; this one only fetches data when needed */
+public class PeekIteratorLazy<T>
+    extends AbstractIterator<T> // AbstractIterator already has a public peek method
+    implements PeekingIterator<T>
+{
+    protected Iterator<T> delegate;
+
+    public PeekIteratorLazy(Iterator<T> delegate) {
+        super();
+        this.delegate = Objects.requireNonNull(delegate);
+    }
+
+    public static <T> PeekIteratorLazy<T> create(Iterator<T> it) {
+        PeekIteratorLazy<T> result = it instanceof PeekIteratorLazy
+                ? (PeekIteratorLazy<T>)it
+                : new PeekIteratorLazy<>(it);
+        return result;
+    }
+
+    @Override
+    protected T computeNext() {
+        T result = delegate.hasNext()
+                ? delegate.next()
+                : endOfData();
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PropFuncArgUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PropFuncArgUtils.java
new file mode 100644
index 0000000000..1c6cd05fac
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/PropFuncArgUtils.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Collections;
+import java.util.List;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.sparql.pfunction.PropFuncArg;
+import org.apache.jena.vocabulary.RDF;
+
+public class PropFuncArgUtils {
+
+    /** If the argument is neither null nor rdf:nil then the result is a singleton list containing it.
+     *  Otherwise an empty list is returned. */
+    public static List<Node> nodeToList(Node node) {
+        List<Node> result = node == null || RDF.Nodes.nil.equals(node)
+                ? Collections.emptyList()
+                : Collections.singletonList(node);
+        return result;
+    }
+
+    /** Return a list also if the given argument holds a single Node */
+    public static List<Node> getAsList(PropFuncArg arg) {
+        List<Node> result = arg.isNode()
+                ? nodeToList(arg.getArg())
+                : arg.getArgList();
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/QueryIterDefer.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/QueryIterDefer.java
new file mode 100644
index 0000000000..6773834491
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/QueryIterDefer.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Objects;
+import java.util.function.Supplier;
+
+import org.apache.jena.atlas.io.IndentedWriter;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.iterator.QueryIteratorWrapper;
+import org.apache.jena.sparql.serializer.SerializationContext;
+
+/** Deferred (lazy) iterator which initializes a delegate from a supplier only when needed */
+public class QueryIterDefer
+    extends QueryIteratorWrapper
+{
+    protected Supplier<QueryIterator> supplier;
+
+    public QueryIterDefer(Supplier<QueryIterator> supplier) {
+        super(null);
+        this.supplier = supplier;
+    }
+
+    protected void ensureInitialized() {
+        if (iterator == null) {
+            iterator = Objects.requireNonNull(supplier.get(), "Deferred iterator supplier yeld null");
+        }
+    }
+
+    @Override
+    protected boolean hasNextBinding() {
+        ensureInitialized();
+        return super.hasNextBinding();
+    }
+
+    @Override
+    protected Binding moveToNextBinding() {
+        ensureInitialized();
+        return super.moveToNextBinding();
+    }
+
+    @Override
+    public void output(IndentedWriter out) {
+        ensureInitialized();
+        super.output(out);
+    }
+
+    @Override
+    protected void closeIterator() {
+        super.closeIterator();
+    }
+
+    @Override
+    public void output(IndentedWriter out, SerializationContext sCxt) {
+        ensureInitialized();
+        super.output(out, sCxt);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/QueryIterSlottedBase.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/QueryIterSlottedBase.java
new file mode 100644
index 0000000000..b1da52a7c6
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/QueryIterSlottedBase.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import org.apache.jena.atlas.io.IndentedWriter;
+import org.apache.jena.atlas.io.PrintUtils;
+import org.apache.jena.atlas.iterator.IteratorSlotted;
+import org.apache.jena.atlas.lib.Lib;
+import org.apache.jena.shared.PrefixMapping;
+import org.apache.jena.sparql.engine.Plan;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.serializer.SerializationContext;
+import org.apache.jena.sparql.util.QueryOutputUtils;
+
+/**
+ * QueryIterator implementation based on IteratorSlotted.
+ * Its purpose is to ease wrapping a non-QueryIterator as one based
+ * on a {@link #moveToNext()} method analogous to guava's AbstractIterator.
+ */
+public abstract class QueryIterSlottedBase
+    extends IteratorSlotted<Binding>
+    implements QueryIterator
+{
+    @Override
+    public Binding nextBinding() {
+        Binding result = isFinished()
+                ? null
+                : next();
+        return result;
+    }
+
+    @Override
+    protected boolean hasMore() {
+        return !isFinished();
+    }
+
+    @Override
+    public String toString(PrefixMapping pmap)
+    { return QueryOutputUtils.toString(this, pmap) ; }
+
+    // final stops it being overridden and missing the output() route.
+    @Override
+    public final String toString()
+    { return PrintUtils.toString(this) ; }
+
+    /** Normally overridden for better information */
+    @Override
+    public void output(IndentedWriter out)
+    {
+        out.print(Plan.startMarker) ;
+        out.print(Lib.className(this)) ;
+        out.print(Plan.finishMarker) ;
+    }
+
+    @Override
+    public void cancel() {
+        close();
+    }
+
+    @Override
+    public void output(IndentedWriter out, SerializationContext sCxt) {
+        output(out);
+//	        out.println(Lib.className(this) + "/" + Lib.className(iterator));
+//	        out.incIndent();
+//	        // iterator.output(out, sCxt);
+//	        out.decIndent();
+//	        // out.println(Utils.className(this)+"/"+Utils.className(iterator)) ;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/RangeUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/RangeUtils.java
new file mode 100644
index 0000000000..825716412d
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/RangeUtils.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.function.Function;
+
+import org.apache.jena.ext.com.google.common.collect.Range;
+import org.apache.jena.ext.com.google.common.collect.RangeSet;
+import org.apache.jena.query.Query;
+
+/** Utility methods for working with guava {@link Range} instances */
+public class RangeUtils {
+    public static <C extends Comparable<C>> RangeSet<C> gaps(Range<C> requestRange, RangeSet<C> availableRanges) {
+        RangeSet<C> absentRanges = availableRanges.complement();
+        RangeSet<C> gaps = absentRanges.subRangeSet(requestRange);
+        return gaps;
+    }
+
+    public static Range<Long> toRange(Query query) {
+        Range<Long> result = toRange(query.getOffset(), query.getLimit());
+        return result;
+    }
+
+    public static Range<Long> toRange(Long offset, Long limit) {
+        Long min = offset == null || offset.equals(Query.NOLIMIT) ? 0 : offset;
+        Long delta = limit == null || limit.equals(Query.NOLIMIT) ? null : limit;
+        Long max = delta == null ? null : min + delta;
+
+        Range<Long> result = max == null
+                ? Range.atLeast(min)
+                : Range.closedOpen(min, max);
+
+        return result;
+    }
+
+    /** Shift the endpoints of the range of type 'Long' by the given distance */
+    public static Range<Long> shiftLong(Range<Long> rawRange, long distance) {
+        return map(rawRange, v -> v + distance);
+    }
+
+    /** Perform a map operation on all present endpoints */
+    public static <I extends Comparable<I>, O extends Comparable<O>> Range<O> map(
+            Range<I> range,
+            Function<? super I, ? extends O> mapper)
+    {
+        Range<O> result;
+
+        if (range.hasLowerBound()) {
+            if (range.hasUpperBound()) {
+                result = Range.range(mapper.apply(range.lowerEndpoint()), range.lowerBoundType(), mapper.apply(range.upperEndpoint()), range.upperBoundType());
+            } else {
+                result = Range.downTo(mapper.apply(range.lowerEndpoint()), range.lowerBoundType());
+            }
+        } else {
+            if (range.hasUpperBound()) {
+                result = Range.upTo(mapper.apply(range.upperEndpoint()), range.upperBoundType());
+            } else {
+                result = Range.all();
+            }
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/SinglePrefetchIterator.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/SinglePrefetchIterator.java
new file mode 100644
index 0000000000..4b0f7f7a15
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/SinglePrefetchIterator.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.io.Closeable;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+/**
+ * An abstract base class for iterating over containers of unknown size. This
+ * works by prefetching junks of the container: Whenever the iterator reaches
+ * the end of a chunk, the method "myPrefetch" is called.
+ *
+ *
+ * Note that once the iterator is finished (myPrefetch returned null),
+ * myPrefetch will never be called again. This means, that if myPrefetch is
+ * called, the iterator hasn't reached its end yet.
+ *
+ */
+public abstract class SinglePrefetchIterator<T>
+    implements Iterator<T>, Closeable
+{
+    private T	    current		= null;
+    private boolean finished	= false;
+
+    private boolean advance     = true;
+
+    private boolean wasNextCalled = false;
+
+    protected abstract T prefetch()
+        throws Exception;
+
+    protected SinglePrefetchIterator()
+    {
+    }
+
+    protected T finish()
+    {
+        this.finished = true;
+
+        close();
+        return null;
+    }
+
+    private void _prefetch()
+    {
+        try {
+            current = prefetch();
+        }
+        catch(Exception e) {
+            current = null;
+            throw new RuntimeException("Prefetching data failed. Reason: " + e.getMessage(), e);
+        }
+    }
+
+    @Override
+    public boolean hasNext()
+    {
+        wasNextCalled = false;
+        if (advance) {
+            _prefetch();
+            advance = false;
+        }
+
+        return finished == false;
+    }
+
+    @Override
+    public T next()
+    {
+        wasNextCalled = true;
+
+        if (finished) {
+            throw new NoSuchElementException();
+        }
+
+        if (advance) {
+            _prefetch();
+        }
+
+        advance = true;
+        return current;
+    }
+
+    public T current() {
+        T result;
+        if (hasNext()) {
+            result = current;
+        } else {
+            throw new NoSuchElementException();
+        }
+        return result;
+    }
+
+    /** Whether the next call to next() or hasNext() will trigger loading the next element */
+    protected boolean willAdvance() {
+        return advance;
+    }
+
+    protected boolean wasNextCalled() {
+        return wasNextCalled;
+    }
+
+    protected boolean wasHasNextCalled() {
+        return !finished && !wasNextCalled && !advance;
+    }
+
+    /**
+     * An iterator must always free all resources once done with iteration.
+     * However, if iteration is aborted, this method should be called.
+     */
+    @Override
+    public void close() {
+        // Nothing to do
+    }
+
+    @Override
+    public final void remove() {
+        if (!wasNextCalled) {
+            throw new RuntimeException("remove must not be called after .hasNext() - invoke .next() first");
+        }
+
+        doRemove(current);
+    }
+
+    /**
+     * @param item The item being removed
+     */
+    protected void doRemove(T item) {
+        throw new UnsupportedOperationException("Not supported.");
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/StackTraceUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/StackTraceUtils.java
new file mode 100644
index 0000000000..eb643af5d7
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/StackTraceUtils.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Arrays;
+import java.util.Objects;
+import java.util.stream.Collectors;
+
+public class StackTraceUtils {
+
+    public static final boolean IS_ASSERT_ENABLED = isAssertEnabled();
+
+    public static boolean isAssertEnabled() {
+        boolean result;
+        try {
+           assert false;
+           result = false;
+        } catch (@SuppressWarnings("unused") AssertionError e) {
+           result = true;
+        }
+        return result;
+    }
+
+    public static StackTraceElement[] getStackTraceIfEnabled() {
+        StackTraceElement[] result = IS_ASSERT_ENABLED
+                ? Thread.currentThread().getStackTrace()
+                : null;
+
+        return result;
+    }
+
+
+    public static String toString(StackTraceElement[] stackTrace) {
+        String result = stackTrace == null
+                ? "(stack traces not enabled - enable assertions using the -ea jvm option)"
+                : Arrays.asList(stackTrace).stream().map(s -> "  " + Objects.toString(s))
+                    .collect(Collectors.joining("\n"));
+
+        return result;
+    }
+
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/ThrowingRunnable.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/ThrowingRunnable.java
new file mode 100644
index 0000000000..455d85dbe2
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/ThrowingRunnable.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+public interface ThrowingRunnable {
+    void run() throws Exception;
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/VarScopeUtils.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/VarScopeUtils.java
new file mode 100644
index 0000000000..f8292c6eed
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/VarScopeUtils.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.jena.ext.com.google.common.collect.BiMap;
+import org.apache.jena.ext.com.google.common.collect.HashBiMap;
+import org.apache.jena.sparql.ARQConstants;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.Rename;
+
+/**
+ * Methods for working with scope levels of SPARQL variables.
+ * Includes methods for getting, setting and normalizing scope levels.
+ */
+public class VarScopeUtils {
+
+    public static Map<Var, Var> reverseVarRenameMap(Collection<Var> vars) {
+        Map<Var, Var> result = vars.stream()
+                .collect(Collectors.toMap(
+                        v -> v,
+                        v -> (Var)Rename.reverseVarRename(v),
+                        (v, w) -> v,
+                        LinkedHashMap::new));
+        return result;
+    }
+
+    public static Set<Var> reverseVarRename(Collection<Var> vars) {
+        return reverseVarRename(vars, new LinkedHashSet<>());
+    }
+
+    /** Reverse-rename all variables in the given collection */
+    public static <C extends Collection<? super Var>> C reverseVarRename(Collection<Var> vars, C acc) {
+        for (Var v : vars) {
+            Var w = (Var)Rename.reverseVarRename(v);
+            acc.add(w);
+        }
+        return acc;
+    }
+
+    public static String getPlainName(String varName) {
+        int delta = ARQConstants.allocVarScopeHiding.length();
+        int pos = 0;
+        while (varName.startsWith(ARQConstants.allocVarScopeHiding, pos)) {
+            pos += delta;
+        }
+        String result = varName.substring(pos);
+        return result;
+    }
+
+    public static int getScopeLevel(Var var) {
+        return getScopeLevel(var.getName());
+    }
+
+    public static int getScopeLevel(String varName) {
+        int result = 0;
+
+        int delta = ARQConstants.allocVarScopeHiding.length();
+        int pos = 0;
+        while (varName.startsWith(ARQConstants.allocVarScopeHiding, pos)) {
+            pos += delta;
+            ++result;
+        }
+
+        return result;
+    }
+
+    public static Var allocScoped(String baseName, int level) {
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < level; ++i) {
+            sb.append(ARQConstants.allocVarScopeHiding);
+        }
+        sb.append(baseName);
+        String varName = sb.toString();
+        Var result = Var.alloc(varName);
+        return result;
+    }
+
+    /**
+     * Returns a mapping of every variable's base name to the minimum seen scope level.
+     * Example:
+     * <pre>
+     * The input { ?/s, ?//s ?///p }
+     * yields { "s": 1, "p": 3 }
+     * </pre>
+     */
+    public static Map<String, Integer> getMinimumScopeLevels(Collection<Var> vars) {
+        Map<String, Integer> result = new LinkedHashMap<>();
+        for (Var var : vars) {
+            String scopedName = var.getName();
+            String plainName = getPlainName(scopedName);
+
+            Integer priorLevel = result.get(plainName);
+            int thisLevel = getScopeLevel(scopedName);
+
+            if (priorLevel == null || thisLevel < priorLevel) {
+                result.put(plainName, thisLevel);
+            }
+        }
+        return result;
+    }
+
+    public static BiMap<Var, Var> normalizeVarScopes(Collection<Var> vars) {
+        Map<String, Integer> varToMinLevel = getMinimumScopeLevels(vars);
+
+        BiMap<Var, Var> result = HashBiMap.create();
+
+        for (Var from : vars) {
+            String fromName = from.getName();
+            int fromLevel = getScopeLevel(fromName);
+
+            String plainName = getPlainName(fromName);
+            int minLevel = varToMinLevel.get(plainName);
+            int normalizedLevel = fromLevel - minLevel;
+            Var to = allocScoped(plainName, normalizedLevel);
+            result.put(from, to);
+        }
+
+        return result;
+    }
+
+    public static BiMap<Var, Var> normalizeVarScopesGlobal(Collection<Var> vars) {
+        int globalMinScopeLevel = vars.stream().mapToInt(VarScopeUtils::getScopeLevel).min().orElse(0);
+
+        // Reduce all scopes by the global min level
+        BiMap<Var, Var> result = HashBiMap.create();
+        for (Var from : vars) {
+            String fromName = from.getName();
+            int fromLevel = getScopeLevel(fromName);
+
+            String plainName = getPlainName(fromName);
+            int normalizedLevel = fromLevel - globalMinScopeLevel;
+            Var to = allocScoped(plainName, normalizedLevel);
+            result.put(from, to);
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/VarUtilsExtra.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/VarUtilsExtra.java
new file mode 100644
index 0000000000..262c39ad63
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/impl/util/VarUtilsExtra.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.impl.util;
+
+import java.util.Collection;
+
+import org.apache.jena.sparql.core.Var;
+
+public class VarUtilsExtra {
+    /**
+     * Allocate a variable whose name is not in black list
+     *
+     * @param baseName The desired name. If it is contained in the set of excluded vars
+     * then repeated attempts
+     * with the name pattern "baseName_counter" are made until successful.
+     * @param excludedVars The set of excluded Var instances
+     * @return The fresh variable
+     */
+    public static Var freshVar(String baseName, Collection<Var> excludedVars) {
+        Var result = Var.alloc(baseName);
+        int i = 0;
+        while (excludedVars.contains(result)) {
+            result = Var.alloc(baseName + "_" + ++i);
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/init/ServiceEnhancerConstants.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/init/ServiceEnhancerConstants.java
new file mode 100644
index 0000000000..76d3f5a69d
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/init/ServiceEnhancerConstants.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.init;
+
+import org.apache.jena.graph.Node;
+import org.apache.jena.graph.NodeFactory;
+import org.apache.jena.sparql.SystemARQ;
+import org.apache.jena.sparql.service.enhancer.assembler.ServiceEnhancerVocab;
+import org.apache.jena.sparql.util.Symbol;
+
+public class ServiceEnhancerConstants {
+    /** An IRI constant for referencing the active dataset within a SERVICE clause */
+    public static final Node SELF = NodeFactory.createURI("urn:x-arq:self");
+
+    /** Namespace for context symbols. Same as the assembler vocabulary. */
+    public static final String NS = ServiceEnhancerVocab.NS;
+
+    public static String getURI() { return NS; }
+
+    /** Maximum number of bindings to group into a single bulk request; restricts serviceBulkRequestItemCount */
+    public static final Symbol serviceBulkMaxBindingCount = SystemARQ.allocSymbol(NS, "serviceBulkMaxBindingCount") ;
+
+    /** Number of bindings to group into a single bulk request */
+    public static final Symbol serviceBulkBindingCount = SystemARQ.allocSymbol(NS, "serviceBulkMaxBindingCount") ;
+
+    /** Symbol for the cache of services' result sets */
+    public static final Symbol serviceCache = SystemARQ.allocSymbol(NS, "serviceCache") ;
+
+    /** Symbol for the cache of services' result set sizes */
+    public static final Symbol serviceResultSizeCache = SystemARQ.allocSymbol(NS, "serviceResultSizeCache") ;
+
+    /** Symbol with IRI (String) value. References to {@link #SELF} will be resolved to the given IRI when writing cache entries. */
+    public static final Symbol datasetId = SystemARQ.allocSymbol(NS, "datasetId") ;
+
+    /** This symbol must be set to true in the context in order to allow calling certain "privileged" SPARQL functions. */
+    public static final Symbol enableMgmt = SystemARQ.allocSymbol(NS, "enableMgmt") ;
+
+    /*
+     * A guide number to limit bulk SERVICE requests to roughly this byte size.
+     * Implementations may use a heuristic to estimate the number of bytes in order to avoid
+     * excessive string serializations of query/algebra objects.
+     * For example, an approach may just sum up Binding.toString().
+     * The limit is ignored for the first binding added to such a request
+     */
+    // public static final Symbol serviceBulkRequestMaxByteSize = SystemARQ.allocSymbol("serviceBulkRequestMaxByteSize") ;
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/init/ServiceEnhancerInit.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/init/ServiceEnhancerInit.java
new file mode 100644
index 0000000000..19f0953037
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/init/ServiceEnhancerInit.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.init;
+
+import org.apache.jena.assembler.Assembler;
+import org.apache.jena.assembler.assemblers.AssemblerGroup;
+import org.apache.jena.graph.Node;
+import org.apache.jena.graph.NodeFactory;
+import org.apache.jena.query.ARQ;
+import org.apache.jena.sparql.ARQConstants;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.Transformer;
+import org.apache.jena.sparql.algebra.op.OpService;
+import org.apache.jena.sparql.algebra.optimize.Optimize;
+import org.apache.jena.sparql.algebra.optimize.Rewrite;
+import org.apache.jena.sparql.algebra.optimize.RewriteFactory;
+import org.apache.jena.sparql.core.DatasetGraph;
+import org.apache.jena.sparql.core.assembler.AssemblerUtils;
+import org.apache.jena.sparql.core.assembler.DatasetAssembler;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.main.QC;
+import org.apache.jena.sparql.function.FunctionRegistry;
+import org.apache.jena.sparql.pfunction.PropertyFunctionRegistry;
+import org.apache.jena.sparql.service.ServiceExecutorRegistry;
+import org.apache.jena.sparql.service.enhancer.algebra.TransformSE_EffectiveOptions;
+import org.apache.jena.sparql.service.enhancer.algebra.TransformSE_JoinStrategy;
+import org.apache.jena.sparql.service.enhancer.assembler.DatasetAssemblerServiceEnhancer;
+import org.apache.jena.sparql.service.enhancer.assembler.ServiceEnhancerVocab;
+import org.apache.jena.sparql.service.enhancer.function.cacheRm;
+import org.apache.jena.sparql.service.enhancer.impl.ChainingServiceExecutorBulkServiceEnhancer;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceOpts;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceResponseCache;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceResultSizeCache;
+import org.apache.jena.sparql.service.enhancer.pfunction.cacheLs;
+import org.apache.jena.sparql.service.single.ChainingServiceExecutor;
+import org.apache.jena.sparql.util.Context;
+import org.apache.jena.sys.JenaSubsystemLifecycle;
+
+public class ServiceEnhancerInit
+    implements JenaSubsystemLifecycle
+{
+    @Override
+    public void start() {
+        init();
+    }
+
+    @Override
+    public void stop() {
+        // Nothing to do
+    }
+
+    public static void init() {
+        ServiceResponseCache cache = new ServiceResponseCache();
+        ARQ.getContext().put(ServiceEnhancerConstants.serviceCache, cache);
+
+        ServiceResultSizeCache resultSizeCache = new ServiceResultSizeCache();
+        ServiceResultSizeCache.set(ARQ.getContext(), resultSizeCache);
+
+        ServiceExecutorRegistry.get().addBulkLink(new ChainingServiceExecutorBulkServiceEnhancer());
+
+        // Register SELF extension
+        registerServiceExecutorSelf(ServiceExecutorRegistry.get());
+
+        registerWith(Assembler.general);
+
+        // Important: This registers the (property) functions but
+        // without setting enableMgmt to true in the context some of them
+        // will refuse to work
+        registerFunctions(FunctionRegistry.get());
+        registerPFunctions(PropertyFunctionRegistry.get());
+    }
+
+    public static void registerFunctions(FunctionRegistry reg) {
+        reg.put(cacheRm.DEFAULT_IRI, cacheRm.class);
+    }
+
+    public static void registerPFunctions(PropertyFunctionRegistry reg) {
+        reg.put(cacheLs.DEFAULT_IRI, cacheLs.class);
+    }
+
+    public static void registerServiceExecutorSelf(ServiceExecutorRegistry registry) {
+        ChainingServiceExecutor selfExec = (opExec, opOrig, binding, execCxt, chain) -> {
+            QueryIterator r;
+            ServiceOpts so = ServiceOpts.getEffectiveService(opExec);
+            OpService target = so.getTargetService();
+
+            // It seems that we always need to run the optimizer here
+            // in order to have property functions recognized properly
+            if (ServiceEnhancerConstants.SELF.equals(target.getService())) {
+                String optimizerMode = so.getFirstValue(ServiceOpts.SO_OPTIMIZE, "on", "on");
+                Op op = opExec.getSubOp();
+                // Run the optimizer unless disabled
+                if (!"off".equals(optimizerMode)) {
+                    Context cxt = execCxt.getContext();
+                    RewriteFactory rf = decideOptimizer(cxt);
+                    Rewrite rw = rf.create(cxt);
+                    op = rw.rewrite(op);
+                }
+                r = QC.execute(op, binding, execCxt);
+            } else {
+                r = chain.createExecution(opExec, opOrig, binding, execCxt);
+            }
+            return r;
+        };
+        registry.addSingleLink(selfExec);
+    }
+
+    static void registerWith(AssemblerGroup g)
+    {
+        AssemblerUtils.register(g, ServiceEnhancerVocab.DatasetServiceEnhancer, new DatasetAssemblerServiceEnhancer(), DatasetAssembler.getType());
+
+        // Note: We can't install the plugin on graphs because they don't have a context
+    }
+
+    /** If there is an optimizer in tgt that wrap it. Otherwise put a fresh optimizer into tgt
+     * that lazily wraps the optimizer from src */
+    public static void wrapOptimizer(Context tgt, Context src) {
+        if (tgt == src) {
+            throw new IllegalArgumentException("Target and source contexts for optimizer must differ to avoid infinite loop during lookup");
+        }
+
+        RewriteFactory baseFactory = tgt.get(ARQConstants.sysOptimizerFactory);
+        if (baseFactory == null) {
+            // Wrap the already present optimizer
+            wrapOptimizer(tgt);
+        } else {
+            // Lazily delegate to the optimizer in src
+            RewriteFactory factory = cxt -> op -> {
+                RewriteFactory f = decideOptimizer(src);
+                f = enhance(f);
+                Context mergedCxt = Context.mergeCopy(src, cxt);
+                Rewrite r = f.create(mergedCxt);
+                return r.rewrite(op);
+            };
+            tgt.set(ARQConstants.sysOptimizerFactory, factory);
+        }
+    }
+
+    public static RewriteFactory decideOptimizer(Context context) {
+        RewriteFactory result = context.get(ARQConstants.sysOptimizerFactory);
+        if (result == null) {
+            result = Optimize.getFactory();
+
+            if (result == null) {
+                result = Optimize.stdOptimizationFactory;
+            }
+        }
+        return result;
+    }
+
+    /** Register the algebra transformer that enables forcing linear joins via {@code SERVICE <loop:>}*/
+    public static void wrapOptimizer(Context cxt) {
+        RewriteFactory baseFactory = decideOptimizer(cxt);
+        RewriteFactory enhancedFactory = enhance(baseFactory);
+        cxt.set(ARQConstants.sysOptimizerFactory, enhancedFactory);
+    }
+
+    public static RewriteFactory enhance(RewriteFactory baseFactory) {
+        RewriteFactory enhancedFactory = cxt -> {
+            Rewrite baseRewrite = baseFactory.create(cxt);
+            Rewrite[] rw = { null };
+            rw[0] = op -> {
+                Op a = Transformer.transform(new TransformSE_EffectiveOptions(), op);
+                Op b = Transformer.transform(new TransformSE_JoinStrategy(), a);
+                Op r = baseRewrite.rewrite(b);
+                Op q = Transformer.transform(new TransformSE_JoinStrategy(), r);
+                return q;
+            };
+            return rw[0];
+        };
+        return enhancedFactory;
+    }
+
+    public static Node resolveServiceNode(Node node, ExecutionContext execCxt) {
+        Node result = ServiceEnhancerConstants.SELF.equals(node)
+                ? resolveSelfId(execCxt)
+                : node;
+
+        return result;
+    }
+
+    public static Node resolveSelfId(ExecutionContext execCxt) {
+        Context context = execCxt.getContext();
+
+        Node id = context.get(ServiceEnhancerConstants.datasetId);
+        if (id == null) {
+            DatasetGraph dg = execCxt.getDataset();
+            int hashCode = System.identityHashCode(dg);
+            id = NodeFactory.createLiteral(ServiceEnhancerConstants.SELF.getURI() + "@dataset" + hashCode);
+        }
+
+        return id;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/pfunction/cacheLs.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/pfunction/cacheLs.java
new file mode 100644
index 0000000000..5a6ca393ed
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/pfunction/cacheLs.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.pfunction;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.locks.Lock;
+import java.util.function.Supplier;
+import java.util.stream.Stream;
+
+import org.apache.jena.ext.com.google.common.collect.Range;
+import org.apache.jena.ext.com.google.common.collect.Sets;
+import org.apache.jena.graph.Node;
+import org.apache.jena.graph.NodeFactory;
+import org.apache.jena.query.Query;
+import org.apache.jena.sparql.algebra.Op;
+import org.apache.jena.sparql.algebra.OpAsQuery;
+import org.apache.jena.sparql.core.Var;
+import org.apache.jena.sparql.engine.ExecutionContext;
+import org.apache.jena.sparql.engine.QueryIterator;
+import org.apache.jena.sparql.engine.Rename;
+import org.apache.jena.sparql.engine.binding.Binding;
+import org.apache.jena.sparql.engine.binding.BindingBuilder;
+import org.apache.jena.sparql.engine.binding.BindingFactory;
+import org.apache.jena.sparql.engine.iterator.QueryIterPlainWrapper;
+import org.apache.jena.sparql.expr.NodeValue;
+import org.apache.jena.sparql.pfunction.PropFuncArg;
+import org.apache.jena.sparql.pfunction.PropFuncArgType;
+import org.apache.jena.sparql.pfunction.PropertyFunctionEval;
+import org.apache.jena.sparql.service.enhancer.assembler.ServiceEnhancerVocab;
+import org.apache.jena.sparql.service.enhancer.claimingcache.RefFuture;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceCacheKey;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceCacheValue;
+import org.apache.jena.sparql.service.enhancer.impl.ServiceResponseCache;
+import org.apache.jena.sparql.service.enhancer.impl.util.PropFuncArgUtils;
+import org.apache.jena.sparql.service.enhancer.init.ServiceEnhancerConstants;
+import org.apache.jena.sparql.service.enhancer.slice.api.Slice;
+import org.apache.jena.sparql.util.Context;
+import org.apache.jena.sparql.util.NodeFactoryExtra;
+
+
+/**
+ * A property function for listing the cache's content.
+ * Accessible via IRI {@value org.apache.jena.sparql.service.enhancer.pfunction.cacheLs#DEFAULT_IRI}.
+ * <br />
+ * Alternatively via:
+ * {@code ?id <java:org.apache.jena.sparql.service.enhancer.pfunction.cacheLs> (?serviceIri ?queryStr ?joinBindingStr ?start ?end}
+ */
+public class cacheLs
+    extends PropertyFunctionEval
+{
+    public static final String DEFAULT_IRI = ServiceEnhancerVocab.NS + "cacheLs";
+
+    public cacheLs() {
+        super(PropFuncArgType.PF_ARG_SINGLE, PropFuncArgType.PF_ARG_EITHER);
+    }
+
+    private static Optional<BindingBuilder> processArg(Optional<BindingBuilder> builderOpt, List<Node> nodes, int i, Supplier<Node> valueSupplier) {
+        Optional<BindingBuilder> result = builderOpt;
+        if (builderOpt.isPresent()) {
+            BindingBuilder builder = builderOpt.get();
+            int n = nodes.size();
+            if (i < n) {
+                Node key = nodes.get(i);
+
+                Node value = valueSupplier.get();
+                if (key.isVariable()) {
+                    builder.add((Var)key, value);
+                } else if (!Objects.equals(key, value)) {
+                    result = Optional.empty();
+                }
+            }
+        }
+
+        return result;
+    }
+
+    @Override
+    public QueryIterator execEvaluated(Binding inputBinding, PropFuncArg subject, Node predicate, PropFuncArg object,
+            ExecutionContext execCxt) {
+
+        Context context = execCxt.getContext();
+        ServiceResponseCache cache = context.get(ServiceEnhancerConstants.serviceCache);
+
+        Node s = subject.getArg();
+        Var sv = s instanceof Var ? (Var)s : null;
+
+        Set<Long> subset = null;
+        if (sv == null) {
+            NodeValue snv = NodeValue.makeNode(s);
+            if (snv.isInteger()) {
+                long v = snv.getInteger().longValue();
+                subset = Collections.singleton(v);
+            }
+        }
+
+        List<Node> objectArgs = PropFuncArgUtils.getAsList(object);
+
+        Map<Long, ServiceCacheKey> idToKey = cache.getIdToKey();
+        Set<Long> baseIds = idToKey.keySet();
+
+        Collection<Long> ids = subset == null
+                ? baseIds
+                : Sets.intersection(subset, baseIds);
+
+        Iterator<Binding> it = ids.stream()
+            .flatMap(id -> {
+                Node idNode = NodeValue.makeInteger(id).asNode();
+
+                Optional<BindingBuilder> parentBuilder = Optional.of(BindingFactory.builder(inputBinding));
+                if (sv != null) {
+                    parentBuilder.get().add(sv, idNode);
+                }
+
+                ServiceCacheKey key = idToKey.get(id);
+
+                parentBuilder = processArg(parentBuilder, objectArgs, 0, () -> key.getServiceNode());
+                parentBuilder = processArg(parentBuilder, objectArgs, 1, () -> {
+                    Op normOp = key.getOp();
+                    Op op = Rename.reverseVarRename(normOp, true);
+                    Query query = OpAsQuery.asQuery(op);
+                    return NodeFactory.createLiteral(query.toString());
+                });
+
+                parentBuilder = processArg(parentBuilder, objectArgs, 2, () -> NodeFactory.createLiteral(key.getBinding().toString()));
+
+                Optional<Binding> parentBindingOpt = parentBuilder.map(BindingBuilder::build);
+
+                Stream<Binding> r = parentBindingOpt.stream();
+
+                // Join in the range information if more than 3 arguments were supplied
+                if (objectArgs.size() > 3) {
+                    r = r.flatMap(parentBinding -> {
+
+                        Collection<Range<Long>> ranges;
+                        try (RefFuture<ServiceCacheValue> refFuture = cache.getCache().claimIfPresent(key)) {
+                            if (refFuture != null) {
+                                ServiceCacheValue entry = refFuture.await();
+                                Slice<Binding[]> slice = entry.getSlice();
+                                Lock lock = slice.getReadWriteLock().readLock();
+                                lock.lock();
+                                try {
+                                    ranges = new ArrayList<>(entry.getSlice().getLoadedRanges().asRanges());
+                                } finally {
+                                    lock.unlock();
+                                }
+
+                                if (ranges.isEmpty()) {
+                                    ranges = Collections.singletonList(Range.closedOpen(0l, 0l));
+                                }
+                            } else {
+                                // Flat-mapping an empty collection prevents the cache key from showing up
+                                // This should be ok when the future is not ready yet
+                                ranges = Collections.emptyList();
+                            }
+                        }
+
+                        return ranges.stream().flatMap(range -> {
+                            Optional<BindingBuilder> bb = Optional.of(BindingBuilder.create(parentBinding));
+
+                            if (range.hasLowerBound()) {
+                                bb = processArg(bb, objectArgs, 3, () -> NodeFactoryExtra.intToNode(range.lowerEndpoint()));
+                            }
+
+                            if (range.hasUpperBound()) {
+                                bb = processArg(bb, objectArgs, 4, () -> NodeFactoryExtra.intToNode(range.upperEndpoint()));
+                            }
+
+                            return bb.map(BindingBuilder::build).stream();
+                        });
+                    });
+                }
+                return r;
+            })
+            .iterator();
+
+        return QueryIterPlainWrapper.create(it, execCxt);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ArrayOps.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ArrayOps.java
new file mode 100644
index 0000000000..385e86f356
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ArrayOps.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.lang.reflect.Array;
+import java.util.function.IntFunction;
+
+import org.apache.jena.sparql.service.enhancer.slice.impl.ArrayOpsObject;
+
+/**
+ * Abstraction for arrays of objects and primitive types (most prominently byte).
+ */
+public interface ArrayOps<A> {
+    A create(int size);
+
+    Object get(A array, int index);
+    void set(A array, int index, Object value);
+
+    int length(A array);
+
+    void fill(A array, int offset, int length, Object value);
+    void copy(A src, int srcPos, A dest, int destPos, int length);
+    Object getDefaultValue();
+
+    @SuppressWarnings("unchecked")
+    default void fillRaw(Object array, int offset, int length, Object value) {
+        fill((A)array, offset, length, value);
+    }
+
+    @SuppressWarnings("unchecked")
+    default void copyRaw(Object src, int srcPos, Object dest, int destPos, int length) {
+        copy((A)src, srcPos, (A)dest, destPos, length);
+    }
+
+    @SuppressWarnings("unchecked")
+    default Object getRaw(Object array, int index) {
+        return get((A)array, index);
+    }
+
+    @SuppressWarnings("unchecked")
+    default void setRaw(Object array, int index, Object value) {
+        set((A)array, index, value);
+    }
+
+    @SuppressWarnings("unchecked")
+    default void lengthRaw(Object array) {
+        length((A)array);
+    }
+
+    // TODO Cache with a ClassInstanceMap?
+    @SuppressWarnings("unchecked")
+    public static <T> ArrayOpsObject<T> createFor(Class<T> componentType) {
+        return new ArrayOpsObject<>(size -> (T[])Array.newInstance(componentType, size));
+    }
+
+    public static <T> ArrayOpsObject<T> createFor(IntFunction<T[]> arrayConstructor) {
+        return new ArrayOpsObject<>(arrayConstructor);
+    }
+
+    public static final ArrayOpsObject<Object> OBJECT = createFor(Object.class);
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ChannelBase.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ChannelBase.java
new file mode 100644
index 0000000000..58a64552c1
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ChannelBase.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.nio.channels.Channel;
+
+import org.apache.jena.sparql.service.enhancer.impl.util.AutoCloseableWithLeakDetectionBase;
+
+public abstract class ChannelBase
+    extends AutoCloseableWithLeakDetectionBase
+    implements Channel
+{
+    @Override
+    public boolean isOpen() {
+        return !isClosed;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/Disposable.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/Disposable.java
new file mode 100644
index 0000000000..c1161cbdfe
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/Disposable.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+/** Interface typically used for removing listener registrations */
+public interface Disposable
+    extends AutoCloseable
+{
+    @Override
+    void close();
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/HasArrayOps.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/HasArrayOps.java
new file mode 100644
index 0000000000..68dc0a753a
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/HasArrayOps.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+/** Interface for retrieving {@link ArrayOps} */
+public interface HasArrayOps<A> {
+    ArrayOps<A> getArrayOps();
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/IteratorOverReadableChannel.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/IteratorOverReadableChannel.java
new file mode 100644
index 0000000000..af91fed23e
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/IteratorOverReadableChannel.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.io.IOException;
+
+import org.apache.jena.atlas.iterator.IteratorCloseable;
+import org.apache.jena.ext.com.google.common.base.Preconditions;
+import org.apache.jena.ext.com.google.common.collect.AbstractIterator;
+
+public class IteratorOverReadableChannel<T>
+    extends AbstractIterator<T>
+    implements IteratorCloseable<T>
+{
+    protected ReadableChannel<T[]> dataStream;
+
+    protected ArrayOps<T[]> arrayOps;
+
+    // We need to use Object because assigning arrays of primitive typesto T[]
+    // raises a class cast exception
+    protected Object array;
+    protected int arrayLength;
+
+    protected int currentOffset;
+    protected int currentDataLength;
+
+    /**
+     *
+     * @param arrayOps
+     * @param dataStream
+     * @param internalBufferSize The number of items to read from the dataStream at once.
+     */
+    public IteratorOverReadableChannel(ArrayOps<T[]> arrayOps, ReadableChannel<T[]> dataStream, int internalBufferSize) {
+        super();
+        Preconditions.checkArgument(internalBufferSize >= 0, "Internal buffer size must be greater than 0");
+
+        this.arrayOps = arrayOps;
+        this.dataStream = dataStream;
+        this.arrayLength = internalBufferSize;
+        this.array = arrayOps.create(internalBufferSize);
+
+        this.currentDataLength = 0;
+
+        // Initialized at end of buffer in order to trigger immediate read on next computeNext() call.
+        this.currentOffset = 0;
+    }
+
+    @Override
+    protected T computeNext() {
+        if (currentOffset >= currentDataLength) {
+            try {
+                currentDataLength = dataStream.readRaw(array, 0, arrayLength);
+                currentOffset = 0;
+            } catch (IOException e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        Object tmp;
+        if (currentDataLength == -1) {
+            tmp = endOfData();
+        } else {
+            tmp = arrayOps.getRaw(array, currentOffset);
+            if (tmp == null) {
+                throw new NullPointerException("Unexpected null value");
+            }
+        }
+
+        ++currentOffset;
+
+        @SuppressWarnings("unchecked")
+        T result = (T)tmp;
+        return result;
+    }
+
+    @Override
+    public void close() {
+        try {
+            dataStream.close();
+        } catch (IOException e) {
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/PageHelper.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/PageHelper.java
new file mode 100644
index 0000000000..e32f0c3666
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/PageHelper.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+/**
+ * Interface to ease working with fixed size pages.
+ */
+public interface PageHelper {
+    long getPageSize();
+
+    default long getPageOffsetForPageId(long pageId) {
+        long pageSize = getPageSize();
+        return getPageOffsetForPageId(pageId, pageSize);
+    }
+
+    default long getPageIdForOffset(long offset) {
+        long pageSize = getPageSize();
+        return getPageIdForOffset(offset, pageSize);
+    }
+
+    default long getIndexInPageForOffset(long offset) {
+        long pageSize = getPageSize();
+        return getIndexInPageForOffset(offset, pageSize);
+    }
+
+    public static long getPageIdForOffset(long offset, long pageSize) {
+        long result = offset / pageSize;
+        return result;
+    }
+
+    public static long getIndexInPageForOffset(long offset, long pageSize) {
+        return offset % pageSize;
+    }
+
+    public static long getPageOffsetForPageId(long pageId, long pageSize) {
+        return pageId * pageSize;
+    }
+
+    public static long getLastPageId(long size, long pageSize) {
+        return size / pageSize;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannel.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannel.java
new file mode 100644
index 0000000000..941f720182
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannel.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.io.IOException;
+import java.nio.channels.Channel;
+
+/**
+ * A data stream allows for repeated retrieval of arrays of consecutive items.
+ * Data streams can be seen as a low level generalizaton / unification of Iterators and InputStreams.
+ *
+ * Akin to an InputStream, the {@link ReadableChannel} interface does not provide a seek() method.
+ * Usually there should be another factory that creates data streams
+ * for given offsets. The reason is, that a sequential reader is typically backed by a stream of items
+ * (such as a http response, or a sql/sparql result set) and that stream needs to be re-created when
+ * jumping to arbitrary offsets.
+ *
+ * @param <A> The array type for transferring data in blocks
+ */
+public interface ReadableChannel<A>
+    extends HasArrayOps<A>, Channel
+{
+    /**
+     * Read method following the usual InputStream protocol.
+     *
+     * @param array The array into which to put the read data
+     * @param position Offset into array where to start writing
+     * @param length Maximum number of items to read.
+     * @return The number of items read. Return -1 if end of data was reached, and 0 iff length was 0.
+     *
+     * @throws IOException
+     */
+    int read(A array, int position, int length) throws IOException;
+
+    @SuppressWarnings("unchecked")
+    default int readRaw(Object array, int position, int length) throws IOException {
+        return read((A)array, position, length);
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelBase.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelBase.java
new file mode 100644
index 0000000000..de93a526bd
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelBase.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+public abstract class ReadableChannelBase<A>
+    extends ChannelBase
+    implements ReadableChannel<A>
+{
+    // So far no members needed here
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelOverSliceAccessor.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelOverSliceAccessor.java
new file mode 100644
index 0000000000..68dcebdd83
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelOverSliceAccessor.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.io.IOException;
+
+public class ReadableChannelOverSliceAccessor<A>
+    extends ReadableChannelBase<A>
+{
+    protected SliceAccessor<A> accessor;
+    protected long posInSlice;
+
+    public ReadableChannelOverSliceAccessor(SliceAccessor<A> accessor, long posInSlice) {
+        super();
+        this.accessor = accessor;
+        this.posInSlice = posInSlice;
+    }
+
+    @Override
+    public ArrayOps<A> getArrayOps() {
+        return accessor.getSlice().getArrayOps();
+    }
+
+    @Override
+    public void closeActual() throws IOException {
+        accessor.close();
+    }
+
+    @Override
+    public int read(A array, int position, int length) throws IOException {
+        accessor.claimByOffsetRange(posInSlice, posInSlice + length);
+        int result = accessor.unsafeRead(array, position, posInSlice, length);
+        if (result > 0) {
+            posInSlice += result;
+        }
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelWithLimit.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelWithLimit.java
new file mode 100644
index 0000000000..59d277b7a8
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/ReadableChannelWithLimit.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.io.IOException;
+
+import org.apache.jena.ext.com.google.common.primitives.Ints;
+
+public class ReadableChannelWithLimit<A>
+    implements ReadableChannel<A>
+{
+    protected ReadableChannel<A> delegate;
+    protected long limit;
+    protected long remaining;
+
+    public ReadableChannelWithLimit(ReadableChannel<A> backend, long limit) {
+        super();
+        this.delegate = backend;
+        this.limit = limit;
+        this.remaining = limit;
+    }
+
+    public ReadableChannel<A> getDelegate() {
+        return delegate;
+    }
+
+    @Override
+    public ArrayOps<A> getArrayOps() {
+        return getDelegate().getArrayOps();
+    }
+
+    @Override
+    public void close() throws IOException {
+        getDelegate().close();
+    }
+
+    @Override
+    public boolean isOpen() {
+        return getDelegate().isOpen();
+    }
+
+    @Override
+    public int read(A array, int position, int length) throws IOException {
+        int result;
+        if (remaining <= 0) {
+            result = -1;
+        } else {
+            int n = Math.min(Ints.saturatedCast(remaining), length);
+            result = getDelegate().read(array, position, n);
+
+            if (result > 0) {
+                remaining -= result;
+            }
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/Slice.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/Slice.java
new file mode 100644
index 0000000000..3c6f998b81
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/Slice.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.util.Set;
+import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import org.apache.jena.atlas.lib.Sync;
+import org.apache.jena.ext.com.google.common.collect.Range;
+import org.apache.jena.ext.com.google.common.collect.RangeSet;
+
+/**
+ * A concurrently accessible sequence of data of possibly unknown size.
+ *
+ * @param <T> The array type for transferring data in blocks
+ */
+public interface Slice<T>
+    extends SliceMetaDataBasic, HasArrayOps<T>, Sync
+{
+    ReadWriteLock getReadWriteLock();
+    Condition getHasDataCondition();
+
+    /**
+     * Protect a set of ranges from eviction.
+     * If the slice does make use of eviction then this method can return null.
+     * Otherwise, a disposable must be returned. As long as it is not disposed, the
+     * no data in the range may get lost due to eviction.
+     *
+     * This method should not be used directly but via {@link SliceAccessor#addEvictionGuard(Range)}.
+     */
+    Disposable addEvictionGuard(RangeSet<Long> range);
+
+    /**
+     * Read the metadata and check whether the slice has a known size and
+     * there is only a single range of loaded data starting from offset 0 to that size.
+     */
+    default boolean isComplete() {
+        boolean result = computeFromMetaData(false, metaData -> {
+            long knownSize = metaData.getKnownSize();
+            Set<Range<Long>> ranges = metaData.getLoadedRanges().asRanges();
+
+            Range<Long> range = ranges.size() == 1 ? ranges.iterator().next() : null;
+
+            long endpoint = range != null ? range.upperEndpoint() : -1;
+
+            boolean r = endpoint >= 0 && knownSize == endpoint; // implied: knownSize >= 0
+            return r;
+        });
+
+        return result;
+    }
+
+    default void mutateMetaData(Consumer<? super SliceMetaDataBasic> fn) {
+        computeFromMetaData(true, metaData -> { fn.accept(metaData); return null; });
+    }
+
+    default void readMetaData(Consumer<? super SliceMetaDataBasic> fn) {
+        computeFromMetaData(false, metaData -> { fn.accept(metaData); return null; });
+    }
+
+    /**
+     * Lock the metadata and then invoke a value returning function on it.
+     * Afterwards release the lock. Returns the obtained value.
+     *
+     * @param <X> The type of the value being computed
+     * @param isWrite If true then lock for writing, otherwise for reading
+     * @param fn The custom computing function
+     * @return The computed value
+     */
+    default <X> X computeFromMetaData(boolean isWrite, Function<? super SliceMetaDataBasic, X> fn) {
+        X result;
+        ReadWriteLock rwl = this.getReadWriteLock();
+        Lock lock = isWrite ? rwl.writeLock() : rwl.readLock();
+        lock.lock();
+        try {
+            result = fn.apply(this);
+
+            if (isWrite) {
+                this.getHasDataCondition().signalAll();
+            }
+        } finally {
+            lock.unlock();
+        }
+
+        return result;
+    }
+
+    /**
+     * An accessor which allows for 'claiming' a sub-range of this slice. The claimed range can be incrementally
+     * modified which may re-use already allocated resources (e.g. claimed pages) and thus improve performance.
+     *
+     * Sub-ranges of a slice can be loaded and iterated or inserted into.
+     * The sub-ranges can be modified dynamically.
+     */
+    SliceAccessor<T> newSliceAccessor();
+
+    /** Reset this slice - removes all data and sets the size to unknown */
+    void clear();
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceAccessor.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceAccessor.java
new file mode 100644
index 0000000000..d3c6bf926a
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceAccessor.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.io.IOException;
+
+import org.apache.jena.ext.com.google.common.collect.ImmutableRangeSet;
+import org.apache.jena.ext.com.google.common.collect.Range;
+import org.apache.jena.ext.com.google.common.collect.RangeSet;
+
+/**
+ * Abstraction over a sequence of pages to view their content as
+ * consecutive items. The underlying pages may be claimed by multiple page ranges held by different clients.
+ * Clients must eventually close pages ranges in order to allow for resources to be freed.
+ *
+ * Note: The page range abstraction enables consumers and producers to claim the same pages independently.
+ * A consumer does not have to wait for the producers to advertise pages they are working on, instead the (low-level/internal) consumer can
+ * simply claim the pages it wants to read in advance and then schedule any needed executors.
+ *
+ * The claim range can be mutated which performs only the necessary
+ * (un)claim operations.
+ *
+ * @param <A> The array type for transferring data in blocks
+ */
+public interface SliceAccessor<A>
+    extends AutoCloseable
+{
+    Slice<A> getSlice();
+
+    /**
+     * Adds an eviction guard (if the slice supports it) and binds its life cycle
+     * to this accessor.
+     * Closing an accessor thus also removes all eviction guards created by it.
+     *
+     * This method must be called after acquiring a read lock on the slice's metadata.
+     *
+     * @param ranges The set of ranges which to protected from eviction
+     */
+     void addEvictionGuard(RangeSet<Long> ranges);
+
+     default void addEvictionGuard(Range<Long> range) {
+         addEvictionGuard(ImmutableRangeSet.of(range));
+     }
+
+    /**
+     * Set or update the claimed range - this will immediately request references to any pages providing the data for that range.
+     * Pages outside of that range are considered as no longer needed pages will immediately be released.
+     *
+     * This method prepares the pages which can be subsequently locked.
+     * Calling this method while the page range is locked ({@link #lock()}) raises an {@link IllegalStateException}.
+     *
+     * @param startOffset
+     * @param endOffset
+     */
+    void claimByOffsetRange(long startOffset, long endOffset);
+
+    /**
+     * Lock the range for writing
+     */
+    void lock();
+
+    /**
+     * Put a sequence of items into the claimed range
+     * Attempts to put items outside of the claimed range raises an {@link IndexOutOfBoundsException}
+     *
+     * The page range should be locked when calling this method.
+     */
+    void write(long offset, A arrayWithItemsOfTypeT, int arrOffset, int arrLength) throws IOException;
+
+    /**
+     * Read operation that assumes a prior check for available ranges has been performed.
+     * Only use this method after locking.
+     */
+    int unsafeRead(A tgt, int tgtOffset, long srcOffset, int length) throws IOException;
+
+    /**
+     * Unlock the range
+     */
+    void unlock();
+
+    /**
+     * Releases all currently held pages.
+     * Future requests via {@link #claimByOffsetRange(long, long)} are allowed.
+     *
+     */
+    void releaseAll();
+
+    /**
+     * Closes the page range. Implementations of this method should call
+     * {{@link #releaseAll()} and an addition prevent any further claims.
+     */
+    @Override
+    void close();
+}
+
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceMetaDataBasic.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceMetaDataBasic.java
new file mode 100644
index 0000000000..e75ff75838
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceMetaDataBasic.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import java.util.List;
+
+import org.apache.jena.ext.com.google.common.base.Preconditions;
+import org.apache.jena.ext.com.google.common.collect.Range;
+import org.apache.jena.ext.com.google.common.collect.RangeMap;
+import org.apache.jena.ext.com.google.common.collect.RangeSet;
+import org.apache.jena.ext.com.google.common.collect.TreeRangeSet;
+import org.apache.jena.sparql.service.enhancer.impl.util.RangeUtils;
+
+public interface SliceMetaDataBasic {
+    RangeSet<Long> getLoadedRanges();
+    RangeMap<Long, List<Throwable>> getFailedRanges();
+
+    long getMinimumKnownSize();
+    void setMinimumKnownSize(long size);
+
+    long getMaximumKnownSize();
+    void setMaximumKnownSize(long size);
+
+    /** Updates the maximum known size iff the argument is less than the current known maximum */
+    default SliceMetaDataBasic updateMaximumKnownSize(long size) {
+        long current = getMaximumKnownSize();
+
+        if (size < current) {
+            setMaximumKnownSize(size);
+        }
+
+        return this;
+    }
+
+    /** Updates the minimum known size iff the argument is greater than the current known minimum */
+    default SliceMetaDataBasic updateMinimumKnownSize(long size) {
+        long current = getMinimumKnownSize();
+
+        if (size > current) {
+            setMinimumKnownSize(size);
+        }
+
+        return this;
+    }
+
+    default long getKnownSize() {
+        long minSize = getMinimumKnownSize();
+        long maxSize = getMaximumKnownSize();
+
+        return minSize == maxSize ? minSize : -1;
+    }
+
+    default SliceMetaDataBasic setKnownSize(long size) {
+        Preconditions.checkArgument(size >= 0, "Negative known size");
+
+        setMinimumKnownSize(size);
+        setMaximumKnownSize(size);
+
+        return this;
+    }
+
+    default RangeSet<Long> getGaps(Range<Long> requestRange) {
+        long maxKnownSize = getMaximumKnownSize();
+        Range<Long> maxKnownRange = Range.closedOpen(0l, maxKnownSize);
+
+        boolean isConnected = requestRange.isConnected(maxKnownRange);
+
+        RangeSet<Long> result;
+        if (isConnected) {
+            Range<Long> effectiveRequestRange = requestRange.intersection(maxKnownRange);
+            RangeSet<Long> loadedRanges = getLoadedRanges();
+            result = RangeUtils.gaps(effectiveRequestRange, loadedRanges);
+        } else {
+            result = TreeRangeSet.create();
+        }
+
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceWithPages.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceWithPages.java
new file mode 100644
index 0000000000..c1c7d13b7f
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/api/SliceWithPages.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.api;
+
+import org.apache.jena.sparql.service.enhancer.claimingcache.RefFuture;
+import org.apache.jena.sparql.service.enhancer.slice.impl.BufferView;
+import org.apache.jena.sparql.service.enhancer.slice.impl.SliceAccessorImpl;
+
+public interface SliceWithPages<T>
+    extends Slice<T>, PageHelper
+{
+    @Override
+    long getPageSize();
+
+    @Override
+    default SliceAccessor<T> newSliceAccessor() {
+        return new SliceAccessorImpl<>(this);
+    }
+
+    RefFuture<BufferView<T>> getPageForPageId(long pageId);
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayOpsObject.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayOpsObject.java
new file mode 100644
index 0000000000..47b8ae2cd3
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayOpsObject.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.impl;
+
+import java.util.Arrays;
+import java.util.function.IntFunction;
+
+import org.apache.jena.sparql.service.enhancer.slice.api.ArrayOps;
+
+/**
+ * Class for abstracting (bulk) operations on arrays.
+ * This indirection allows for uniform handling of arrays of primitive and non-primitive types.
+ */
+public class ArrayOpsObject<T>
+    implements ArrayOps<T[]>
+{
+    // When operations operate on that many items then use the system functions
+    public static final int SYSTEM_THRESHOLD = 16;
+    protected IntFunction<T[]> arrayConstructor;
+
+    public ArrayOpsObject(IntFunction<T[]> arrayConstructor) {
+        super();
+        this.arrayConstructor = arrayConstructor;
+    }
+
+    @Override
+    public T[] create(int size) {
+        return arrayConstructor.apply(size);
+    }
+
+    @Override
+    public Object getDefaultValue() {
+        return null;
+    }
+
+    @Override
+    public Object get(Object[] array, int index) {
+        return array[index];
+    }
+
+    @Override
+    public void set(Object[] array, int index, Object value) {
+        array[index] = value;
+    }
+
+    @Override
+    public void fill(Object[] array, int offset, int length, Object value) {
+        if (length < SYSTEM_THRESHOLD) {
+            for (int i = 0; i < length; ++i) {
+                array[offset + i] = value;
+            }
+        } else {
+            Arrays.fill(array, offset, length, value);
+        }
+    }
+
+    @Override
+    public void copy(Object[] src, int srcPos, Object[] dest, int destPos, int length) {
+        if (length < SYSTEM_THRESHOLD) {
+            for (int i = 0; i < length; ++i) {
+                dest[destPos + i] = src[srcPos + i];
+            }
+        } else {
+            System.arraycopy(src, srcPos, dest, destPos, length);
+        }
+    }
+
+    @Override
+    public int length(Object[] array) {
+        return array.length;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayReadable.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayReadable.java
new file mode 100644
index 0000000000..c48d9fe6b0
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayReadable.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.impl;
+
+import java.io.IOException;
+
+import org.apache.jena.sparql.service.enhancer.slice.api.ArrayOps;
+import org.apache.jena.sparql.service.enhancer.slice.api.HasArrayOps;
+
+public interface ArrayReadable<A>
+    extends HasArrayOps<A>
+{
+    int readInto(A tgt, int tgtOffset, long srcOffset, int length) throws IOException;
+
+    @SuppressWarnings("unchecked")
+    default int readIntoRaw(Object tgt, int tgtOffset, long srcOffset, int length) throws IOException {
+        return readInto((A)tgt, tgtOffset, srcOffset, length);
+    }
+
+    default Object get(long index) throws IOException {
+        ArrayOps<A> arrayOps = getArrayOps();
+        A singleton = arrayOps.create(1);
+        readInto(singleton, 0, index, 1);
+        Object result = arrayOps.get(singleton, 0);
+        return result;
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayWritable.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayWritable.java
new file mode 100644
index 0000000000..d71c392028
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/ArrayWritable.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.impl;
+
+import java.io.IOException;
+import java.lang.reflect.Array;
+
+import org.apache.jena.sparql.service.enhancer.slice.api.ArrayOps;
+import org.apache.jena.sparql.service.enhancer.slice.api.HasArrayOps;
+
+/** Interface for putting an array of items into a sequence at a certain offset */
+public interface ArrayWritable<A>
+    extends HasArrayOps<A>
+{
+    /** The method that needs to be implemented; all other methods default-delegate to this one. */
+    void write(long offsetInBuffer, A arrayWithItemsOfTypeT, int arrOffset, int arrLength) throws IOException;
+
+    default void put(long offset, Object item) throws IOException {
+        ArrayOps<A> arrayOps = getArrayOps();
+        A singleton = arrayOps.create(1);
+        arrayOps.set(singleton, 0, item);
+        write(offset, singleton);
+    }
+
+    default void write(long offset, A arrayWithItemsOfTypeT, int arrOffset) throws IOException {
+        write(offset, arrayWithItemsOfTypeT, 0, Array.getLength(arrayWithItemsOfTypeT) - arrOffset);
+    }
+
+    default void write(long offset, A arrayWithItemsOfTypeT) throws IOException {
+        write(offset, arrayWithItemsOfTypeT, 0, Array.getLength(arrayWithItemsOfTypeT));
+    }
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/Buffer.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/Buffer.java
new file mode 100644
index 0000000000..e736309a9e
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/Buffer.java
@@ -0,0 +1,26 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.impl;
+
+public interface Buffer<A>
+    extends BufferLike<A>
+{
+    // The following methods have not been ported to Jena because so far they were not needed:
+    // slice(), asList()
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/BufferLike.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/BufferLike.java
new file mode 100644
index 0000000000..5db672ae85
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/BufferLike.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.impl;
+
+/**
+ * BufferLike is a common interface for Buffer and RangeBuffer.
+ * Even though both specializations have most methods in common, the semantics differ in subtle ways:
+ * A buffer must support reading any slice of data within its capacity.
+ * A range buffer only allows for reading within valid ranges and raises an exception upon violation.
+ */
+public interface BufferLike<A>
+    extends ArrayWritable<A>, ArrayReadable<A>
+{
+    /** Buffers with 'unlimited' capacity should return Long.MAX_VALUE */
+    long getCapacity();
+
+    // The original API also some additional operations; they may be needed for disk-based storage
+    // BufferLike<A> slice(long offset, long length);
+}
diff --git a/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/BufferOverArray.java b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/BufferOverArray.java
new file mode 100644
index 0000000000..89ead9e4f2
--- /dev/null
+++ b/jena-extras/jena-serviceenhancer/src/main/java/org/apache/jena/sparql/service/enhancer/slice/impl/BufferOverArray.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.jena.sparql.service.enhancer.slice.impl;
+
+import org.apache.jena.ext.com.google.common.primitives.Ints;
+import org.apache.jena.sparql.service.enhancer.slice.api.ArrayOps;
+
+public class BufferOverArray<A>
+    implements Buffer<A>
+{
+    protected ArrayOps<A> arrayOps;
+    protected A array;
+
+    public BufferOverArray(ArrayOps<A> arrayOps, int size) {
+        this(arrayOps, arrayOps.create(size));
+    }
+
+    public BufferOverArray(ArrayOps<A> arrayOps, A array) {
+        this.arrayOps = arrayOps;
+        this.array = array;
+    }
+
+    public static <A> BufferOverArray<A> create(ArrayOps<A> arrayOps, int size) {
+        return new BufferOverArray<>(arrayOps, size);
+    }
+
+    public static <A> BufferOverArray<A> create(ArrayOps<A> arrayOps, A array) {
+        return new BufferOverArray<>(arrayOps, array);
+    }
+
+    @Override
+    public void write(long offsetInBuffer, A arrayWithItemsOfTypeT, int arrOffset, int arrLength) {
+        int offsetInBufferInt = Ints.checkedCast(offsetInBuffer);
+        arrayOps.copy(arrayWithItemsOfTypeT, arrOffset, array, offsetInBufferInt, arrLength);
+    }
+
+    @Override
+    public long getCapacity() {
+        return arrayOps.length(array);
+    }
... 2529 lines suppressed ...