You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@marmotta.apache.org by ss...@apache.org on 2015/12/12 17:06:42 UTC

[1/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Repository: marmotta
Updated Branches:
  refs/heads/develop ed387b973 -> 0ff22a0c3


http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoDatatypeLiteral.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoDatatypeLiteral.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoDatatypeLiteral.java
new file mode 100644
index 0000000..efaca0d
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoDatatypeLiteral.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.BNode;
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ProtoDatatypeLiteral extends ProtoLiteralBase implements Literal {
+
+    private Model.DatatypeLiteral message;
+
+    public ProtoDatatypeLiteral(Model.DatatypeLiteral message) {
+        this.message = message;
+    }
+
+    public ProtoDatatypeLiteral(String content) {
+        this.message = Model.DatatypeLiteral.newBuilder()
+                .setContent(content)
+                .build();
+    }
+
+    public ProtoDatatypeLiteral(String content, URI datatype) {
+        this.message = Model.DatatypeLiteral.newBuilder()
+                .setContent(content)
+                .setDatatype(Model.URI.newBuilder().setUri(datatype.stringValue()).build())
+                .build();
+    }
+
+    public ProtoDatatypeLiteral(String content, String datatype) {
+        this.message = Model.DatatypeLiteral.newBuilder()
+                .setContent(content)
+                .setDatatype(Model.URI.newBuilder().setUri(datatype).build())
+                .build();
+    }
+
+    public Model.DatatypeLiteral getMessage() {
+        return message;
+    }
+
+    /**
+     * Gets the label of this literal.
+     *
+     * @return The literal's label.
+     */
+    @Override
+    public String getLabel() {
+        return message.getContent();
+    }
+
+    /**
+     * Gets the language tag for this literal, normalized to lower case.
+     *
+     * @return The language tag for this literal, or <tt>null</tt> if it
+     * doesn't have one.
+     */
+    @Override
+    public String getLanguage() {
+        return null;
+    }
+
+    /**
+     * Gets the datatype for this literal.
+     *
+     * @return The datatype for this literal, or <tt>null</tt> if it doesn't
+     * have one.
+     */
+    @Override
+    public URI getDatatype() {
+        if (!message.hasDatatype()) {
+            return null;
+        }
+        return new ProtoURI(message.getDatatype());
+    }
+
+
+    /**
+     * Returns the String-value of a <tt>Value</tt> object. This returns either
+     * a {@link Literal}'s label, a {@link URI}'s URI or a {@link BNode}'s ID.
+     */
+    @Override
+    public String stringValue() {
+        return message.getContent();
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoLiteralBase.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoLiteralBase.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoLiteralBase.java
new file mode 100644
index 0000000..559258f
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoLiteralBase.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+import org.apache.marmotta.commons.util.DateUtils;
+import org.openrdf.model.Literal;
+import org.openrdf.model.datatypes.XMLDatatypeUtil;
+
+import javax.xml.datatype.XMLGregorianCalendar;
+import java.math.BigDecimal;
+import java.math.BigInteger;
+import java.util.Date;
+
+/**
+ * Base functionality for both types of literals (type conversions, equals, etc).
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public abstract class ProtoLiteralBase implements Literal {
+
+
+    /**
+     * Returns the <tt>boolean</tt> value of this literal.
+     *
+     * @return The <tt>long</tt> value of the literal.
+     * @throws IllegalArgumentException If the literal's label cannot be represented by a <tt>boolean</tt>.
+     */
+    @Override
+    public boolean booleanValue() {
+        return Boolean.parseBoolean(getLabel());
+    }
+
+
+    /**
+     * Returns the <tt>byte</tt> value of this literal.
+     *
+     * @return The <tt>byte value of the literal.
+     * @throws NumberFormatException If the literal cannot be represented by a <tt>byte</tt>.
+     */
+    @Override
+    public byte byteValue() {
+        return Byte.parseByte(getLabel());
+    }
+
+    /**
+     * Returns the <tt>short</tt> value of this literal.
+     *
+     * @return The <tt>short</tt> value of the literal.
+     * @throws NumberFormatException If the literal's label cannot be represented by a <tt>short</tt>.
+     */
+    @Override
+    public short shortValue() {
+        return Short.parseShort(getLabel());
+    }
+
+    /**
+     * Returns the <tt>int</tt> value of this literal.
+     *
+     * @return The <tt>int</tt> value of the literal.
+     * @throws NumberFormatException If the literal's label cannot be represented by a <tt>int</tt>.
+     */
+    @Override
+    public int intValue() {
+        return Integer.parseInt(getLabel());
+    }
+
+    /**
+     * Returns the <tt>long</tt> value of this literal.
+     *
+     * @return The <tt>long</tt> value of the literal.
+     * @throws NumberFormatException If the literal's label cannot be represented by to a <tt>long</tt>.
+     */
+    @Override
+    public long longValue() {
+        return Long.parseLong(getLabel());
+    }
+
+    /**
+     * Returns the integer value of this literal.
+     *
+     * @return The integer value of the literal.
+     * @throws NumberFormatException If the literal's label is not a valid integer.
+     */
+    @Override
+    public BigInteger integerValue() {
+        return new BigInteger(getLabel());
+    }
+
+    /**
+     * Returns the decimal value of this literal.
+     *
+     * @return The decimal value of the literal.
+     * @throws NumberFormatException If the literal's label is not a valid decimal.
+     */
+    @Override
+    public BigDecimal decimalValue() {
+        return new BigDecimal(getLabel());
+    }
+
+    /**
+     * Returns the <tt>float</tt> value of this literal.
+     *
+     * @return The <tt>float</tt> value of the literal.
+     * @throws NumberFormatException If the literal's label cannot be represented by a <tt>float</tt>.
+     */
+    @Override
+    public float floatValue() {
+        return Float.parseFloat(getLabel());
+    }
+
+    /**
+     * Returns the <tt>double</tt> value of this literal.
+     *
+     * @return The <tt>double</tt> value of the literal.
+     * @throws NumberFormatException If the literal's label cannot be represented by a <tt>double</tt>.
+     */
+    @Override
+    public double doubleValue() {
+        return Double.parseDouble(getLabel());
+    }
+
+    /**
+     * Returns the {@link XMLGregorianCalendar} value of this literal. A calendar
+     * representation can be given for literals whose label conforms to the
+     * syntax of the following <a href="http://www.w3.org/TR/xmlschema-2/">XML
+     * Schema datatypes</a>: <tt>dateTime</tt>, <tt>time</tt>,
+     * <tt>date</tt>, <tt>gYearMonth</tt>, <tt>gMonthDay</tt>,
+     * <tt>gYear</tt>, <tt>gMonth</tt> or <tt>gDay</tt>.
+     *
+     * @return The calendar value of the literal.
+     * @throws IllegalArgumentException If the literal cannot be represented by a
+     *                                  {@link XMLGregorianCalendar}.
+     */
+    @Override
+    public XMLGregorianCalendar calendarValue() {
+        try {
+            return XMLDatatypeUtil.parseCalendar(getLabel());
+        } catch(IllegalArgumentException ex) {
+            // try harder to parse the label, sometimes they have stupid formats ...
+            Date cv = DateUtils.parseDate(getLabel());
+            return DateUtils.getXMLCalendar(cv);
+        }
+    }
+
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+
+        if(o instanceof Literal) {
+            Literal that = (Literal)o;
+
+            if(!this.getLabel().equals(that.getLabel())) return false;
+
+            if(this.getLanguage() != null && !(this.getLanguage().equals(that.getLanguage()))) return false;
+
+            if(this.getDatatype()==null && that.getDatatype()!=null) return false;
+
+            if(this.getDatatype() != null && !this.getDatatype().equals(that.getDatatype())) return false;
+
+            return true;
+        }
+
+        return false;
+    }
+
+    @Override
+    public int hashCode() {
+        return getLabel().hashCode();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoNamespace.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoNamespace.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoNamespace.java
new file mode 100644
index 0000000..34bf219
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoNamespace.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.Namespace;
+
+/**
+ * An implementation of a Sesame Namespace backed by a protocol buffer.
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ProtoNamespace implements Namespace {
+    private Model.Namespace message;
+
+    public ProtoNamespace(Model.Namespace message) {
+        this.message = message;
+    }
+
+    public ProtoNamespace(String prefix, String uri) {
+        message = Model.Namespace.newBuilder()
+                .setUri(uri)
+                .setPrefix(prefix).build();
+    }
+
+    public Model.Namespace getMessage() {
+        return message;
+    }
+
+    /**
+     * Gets the name of the current namespace (i.e. it's URI).
+     *
+     * @return name of namespace
+     */
+    @Override
+    public String getName() {
+        return message.getUri();
+    }
+
+    /**
+     * Gets the prefix of the current namespace. The default namespace is
+     * represented by an empty prefix string.
+     *
+     * @return prefix of namespace, or an empty string in case of the default
+     * namespace.
+     */
+    @Override
+    public String getPrefix() {
+        return message.getPrefix();
+    }
+
+    @Override
+    public int compareTo(Namespace namespace) {
+        return getPrefix().compareTo(namespace.getPrefix());
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || !(o instanceof Namespace)) return false;
+
+        Namespace that = (Namespace) o;
+
+        return getPrefix().equals(that.getPrefix());
+    }
+
+    @Override
+    public int hashCode() {
+        return getPrefix().hashCode();
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStatement.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStatement.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStatement.java
new file mode 100644
index 0000000..d6ad805
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStatement.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.*;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ProtoStatement implements Statement {
+
+    private Model.Statement message;
+
+    public ProtoStatement(Model.Statement message) {
+        this.message = message;
+    }
+
+    /**
+     * Build a statement backed by a proto message. The constructor can be used with any Sesame values, but
+     * using ProtoValues provides slighty better performance.
+     *
+     * @param subject
+     * @param predicate
+     * @param object
+     * @param context
+     */
+    public ProtoStatement(Resource subject, URI predicate, Value object, Resource context) {
+        // Build statement, mapping the Java inheritance structure to the Proto oneof structure.
+        Model.Statement.Builder builder = Model.Statement.newBuilder();
+        if (subject instanceof ProtoURI) {
+            builder.getSubjectBuilder().setUri(((ProtoURI) subject).getMessage()).build();
+        } else if (subject instanceof ProtoBNode) {
+            builder.getSubjectBuilder().setBnode(((ProtoBNode) subject).getMessage()).build();
+        } else if (subject instanceof URI) {
+            builder.getSubjectBuilder().getUriBuilder().setUri(subject.stringValue()).build();
+        } else if (subject instanceof BNode) {
+            builder.getSubjectBuilder().getBnodeBuilder().setId(subject.stringValue()).build();
+        }
+
+        if (predicate instanceof ProtoURI) {
+            builder.setPredicate(((ProtoURI) predicate).getMessage()).build();
+        } else if (predicate instanceof URI){
+            builder.getPredicateBuilder().setUri(predicate.stringValue()).build();
+        }
+
+        if (object instanceof ProtoStringLiteral) {
+            builder.getObjectBuilder().getLiteralBuilder().setStringliteral(
+                    ((ProtoStringLiteral) object).getMessage()).build();
+        } else if (object instanceof ProtoDatatypeLiteral) {
+            builder.getObjectBuilder().getLiteralBuilder().setDataliteral(
+                    ((ProtoDatatypeLiteral) object).getMessage()).build();
+        } else if (object instanceof Literal) {
+            Literal l = (Literal)object;
+            if (l.getDatatype() != null) {
+                builder.getObjectBuilder().getLiteralBuilder().getDataliteralBuilder()
+                        .setContent(l.stringValue())
+                        .getDatatypeBuilder().setUri(l.getDatatype().stringValue())
+                        .build();
+            } else if(l.getLanguage() != null) {
+                builder.getObjectBuilder().getLiteralBuilder().getStringliteralBuilder()
+                        .setContent(l.stringValue())
+                        .setLanguage(l.getLanguage())
+                        .build();
+            } else {
+                builder.getObjectBuilder().getLiteralBuilder().getStringliteralBuilder()
+                        .setContent(l.stringValue())
+                        .build();
+            }
+        } else if (object instanceof ProtoURI) {
+            builder.getObjectBuilder().getResourceBuilder().setUri(
+                    ((ProtoURI) object).getMessage()).build();
+        } else if (object instanceof ProtoBNode) {
+            builder.getObjectBuilder().getResourceBuilder().setBnode(
+                    ((ProtoBNode) object).getMessage()).build();
+        } else if (object instanceof URI) {
+            builder.getObjectBuilder().getResourceBuilder().getUriBuilder().setUri(
+                    object.stringValue()).build();
+        } else if (object instanceof BNode) {
+            builder.getObjectBuilder().getResourceBuilder().getBnodeBuilder().setId(
+                    object.stringValue()).build();
+        }
+
+        if (context instanceof ProtoURI) {
+            builder.getContextBuilder().setUri(((ProtoURI) context).getMessage()).build();
+        } else if (context instanceof ProtoBNode) {
+            builder.getContextBuilder().setBnode(((ProtoBNode) context).getMessage()).build();
+        } else if (context instanceof URI) {
+            builder.getContextBuilder().getUriBuilder().setUri(context.stringValue()).build();
+        } else if (context instanceof BNode) {
+            builder.getContextBuilder().getBnodeBuilder().setId(context.stringValue()).build();
+        }
+
+        message = builder.build();
+    }
+
+    public Model.Statement getMessage() {
+        return message;
+    }
+
+    /**
+     * Gets the context of this statement.
+     *
+     * @return The statement's context, or <tt>null</tt> in case of the null
+     * context or if not applicable.
+     */
+    @Override
+    public Resource getContext() {
+        if (!message.hasContext()) {
+            return null;
+        }
+        switch(message.getContext().getResourcesCase()) {
+            case URI:
+                return new ProtoURI(message.getContext().getUri());
+            case BNODE:
+                return new ProtoBNode(message.getContext().getBnode());
+        }
+        return null;
+    }
+
+    /**
+     * Gets the subject of this statement.
+     *
+     * @return The statement's subject.
+     */
+    @Override
+    public Resource getSubject() {
+        if (!message.hasSubject()) {
+            return null;
+        }
+        switch(message.getSubject().getResourcesCase()) {
+            case URI:
+                return new ProtoURI(message.getSubject().getUri());
+            case BNODE:
+                return new ProtoBNode(message.getSubject().getBnode());
+        }
+        return null;
+    }
+
+    /**
+     * Gets the predicate of this statement.
+     *
+     * @return The statement's predicate.
+     */
+    @Override
+    public URI getPredicate() {
+        if (!message.hasPredicate()) {
+            return null;
+        }
+
+        return new ProtoURI(message.getPredicate());
+    }
+
+    /**
+     * Gets the object of this statement.
+     *
+     * @return The statement's object.
+     */
+    @Override
+    public Value getObject() {
+        if (!message.hasObject()) {
+            return null;
+        }
+
+        // Convert back from proto oneof to Java inheritance. It's ugly.
+        switch (message.getObject().getValuesCase()) {
+            case RESOURCE:
+                switch(message.getObject().getResource().getResourcesCase()) {
+                    case URI:
+                        return new ProtoURI(message.getObject().getResource().getUri());
+                    case BNODE:
+                        return new ProtoBNode(message.getObject().getResource().getBnode());
+                }
+            case LITERAL:
+                switch(message.getObject().getLiteral().getLiteralsCase()) {
+                    case STRINGLITERAL:
+                        return new ProtoStringLiteral(message.getObject().getLiteral().getStringliteral());
+                    case DATALITERAL:
+                        return new ProtoDatatypeLiteral(message.getObject().getLiteral().getDataliteral());
+                }
+        }
+
+        return null;
+    }
+
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+
+        Statement triple = (Statement) o;
+//        changed according to https://openrdf.atlassian.net/browse/SES-1924
+//        if (!getContext().equals(triple.getContext())) return false;
+        if (!getObject().equals(triple.getObject())) return false;
+        if (!getPredicate().equals(triple.getPredicate())) return false;
+        return getSubject().equals(triple.getSubject());
+
+    }
+
+    @Override
+    public int hashCode() {
+        return 961 * getSubject().hashCode() + 31 * getPredicate().hashCode() + getObject().hashCode();
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStringLiteral.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStringLiteral.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStringLiteral.java
new file mode 100644
index 0000000..2000a76
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoStringLiteral.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.BNode;
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+
+/**
+ * An implementation of a Sesame Literal backed by a StringLiteral protocol buffer.
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ProtoStringLiteral extends ProtoLiteralBase implements Literal {
+
+    private Model.StringLiteral message;
+
+    public ProtoStringLiteral(Model.StringLiteral message) {
+        this.message = message;
+    }
+
+    public ProtoStringLiteral(String content) {
+        this.message = Model.StringLiteral.newBuilder()
+                .setContent(content)
+                .build();
+    }
+
+    public ProtoStringLiteral(String content, String language) {
+        this.message = Model.StringLiteral.newBuilder()
+                .setContent(content)
+                .setLanguage(language)
+                .build();
+    }
+
+    public Model.StringLiteral getMessage() {
+        return message;
+    }
+
+    /**
+     * Gets the language tag for this literal, normalized to lower case.
+     *
+     * @return The language tag for this literal, or <tt>null</tt> if it
+     * doesn't have one.
+     */
+    @Override
+    public String getLanguage() {
+        if ("".equals(message.getLanguage()) || message.getLanguage() == null) {
+            return null;
+        }
+        return message.getLanguage();
+    }
+
+    /**
+     * Gets the datatype for this literal.
+     *
+     * @return The datatype for this literal, or <tt>null</tt> if it doesn't
+     * have one.
+     */
+    @Override
+    public URI getDatatype() {
+        return null;
+    }
+
+
+    /**
+     * Gets the label of this literal.
+     *
+     * @return The literal's label.
+     */
+    @Override
+    public String getLabel() {
+        return message.getContent();
+    }
+
+
+    /**
+     * Returns the String-value of a <tt>Value</tt> object. This returns either
+     * a {@link Literal}'s label, a {@link URI}'s URI or a {@link BNode}'s ID.
+     */
+    @Override
+    public String stringValue() {
+        return message.getContent();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoURI.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoURI.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoURI.java
new file mode 100644
index 0000000..af92a75
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoURI.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+
+import org.apache.marmotta.commons.sesame.model.URICommons;
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.BNode;
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+
+/**
+ * An implementation of a Sesame URI backed by a protocol buffer.
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ProtoURI implements URI {
+
+    private Model.URI message;
+
+    private String namespace, localName;
+
+
+    public ProtoURI(String uri) {
+        message = Model.URI.newBuilder().setUri(uri).build();
+    }
+
+    public ProtoURI(Model.URI message) {
+        this.message = message;
+    }
+
+    public Model.URI getMessage() {
+        return message;
+    }
+
+    /**
+     * Gets the local name of this URI. The local name is defined as per the
+     * algorithm described in the class documentation.
+     *
+     * @return The URI's local name.
+     */
+    @Override
+    public String getLocalName() {
+        initNamespace();
+
+        return localName;
+    }
+
+    /**
+     * Gets the namespace of this URI. The namespace is defined as per the
+     * algorithm described in the class documentation.
+     *
+     * @return The URI's namespace.
+     */
+    @Override
+    public String getNamespace() {
+        initNamespace();
+
+        return namespace;
+    }
+
+    /**
+     * Returns the String-value of a <tt>Value</tt> object. This returns either
+     * a {@link Literal}'s label, a {@link URI}'s URI or a {@link BNode}'s ID.
+     */
+    @Override
+    public String stringValue() {
+        return message.getUri();
+    }
+
+    @Override
+    public String toString() {
+        return message.getUri();
+    }
+
+    private void initNamespace() {
+        if(namespace == null || localName == null) {
+            String[] components = URICommons.splitNamespace(message.getUri());
+            namespace = components[0];
+            localName = components[1];
+        }
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+
+        if(o instanceof URI) {
+            return this.stringValue().equals(((URI)o).stringValue());
+        }
+        return false;
+    }
+
+    @Override
+    public int hashCode() {
+        return stringValue().hashCode();
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/StatementTest.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/StatementTest.java b/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/StatementTest.java
new file mode 100644
index 0000000..c8deede
--- /dev/null
+++ b/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/StatementTest.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model.test;
+
+import org.apache.marmotta.ostrich.model.ProtoBNode;
+import org.apache.marmotta.ostrich.model.ProtoStatement;
+import org.apache.marmotta.ostrich.model.ProtoStringLiteral;
+import org.apache.marmotta.ostrich.model.ProtoURI;
+import org.junit.Assert;
+import org.junit.Test;
+import org.openrdf.model.BNode;
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+import org.openrdf.model.impl.BNodeImpl;
+import org.openrdf.model.impl.LiteralImpl;
+import org.openrdf.model.impl.URIImpl;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class StatementTest {
+
+    @Test
+    public void testCreateFromProtoValues() {
+        ProtoBNode s = new ProtoBNode("1234");
+        ProtoURI p = new ProtoURI("http://apache.org/example/P1");
+        ProtoStringLiteral o = new ProtoStringLiteral("Hello, World", "en");
+        ProtoURI c = new ProtoURI("http://apache.org/example/C1");
+        ProtoStatement stmt = new ProtoStatement(s, p, o, c);
+
+        Assert.assertEquals(stmt.getSubject(), s);
+        Assert.assertEquals(stmt.getPredicate(), p);
+        Assert.assertEquals(stmt.getObject(), o);
+        Assert.assertEquals(stmt.getContext(), c);
+    }
+
+    @Test
+    public void testCreateFromSesameValues() {
+        BNode s = new BNodeImpl("1234");
+        URI p = new URIImpl("http://apache.org/example/P1");
+        Literal o = new LiteralImpl("Hello, World", "en");
+        URI c = new URIImpl("http://apache.org/example/C1");
+        ProtoStatement stmt = new ProtoStatement(s, p, o, c);
+
+        Assert.assertEquals(stmt.getSubject(), s);
+        Assert.assertEquals(stmt.getPredicate(), p);
+        Assert.assertEquals(stmt.getObject(), o);
+        Assert.assertEquals(stmt.getContext(), c);
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/URITest.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/URITest.java b/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/URITest.java
new file mode 100644
index 0000000..4635d02
--- /dev/null
+++ b/libraries/ostrich/model/src/test/java/org/apache/marmotta/ostrich/model/test/URITest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model.test;
+
+import org.apache.marmotta.ostrich.model.ProtoURI;
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.junit.Assert;
+import org.junit.Test;
+import org.openrdf.model.URI;
+
+/**
+ * Test constructing URIs.
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class URITest {
+
+    @Test
+    public void testCreateFromString() {
+        URI uri = new ProtoURI("http://apache.org/example");
+
+        Assert.assertEquals(uri.stringValue(), "http://apache.org/example");
+    }
+
+    @Test
+    public void testCreateFromMessage() {
+        Model.URI msg = Model.URI.newBuilder().setUri("http://apache.org/example").build();
+        URI uri = new ProtoURI(msg);
+
+        Assert.assertEquals(uri.stringValue(), "http://apache.org/example");
+    }
+
+    @Test
+    public void testEquals() {
+        URI uri1 = new ProtoURI("http://apache.org/example");
+        URI uri2 = new ProtoURI("http://apache.org/example");
+
+        Assert.assertEquals(uri1, uri2);
+
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/test/resources/logback.xml
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/test/resources/logback.xml b/libraries/ostrich/model/src/test/resources/logback.xml
new file mode 100644
index 0000000..9152e16
--- /dev/null
+++ b/libraries/ostrich/model/src/test/resources/logback.xml
@@ -0,0 +1,28 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<configuration>
+    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
+        <encoder>
+            <pattern>%d{HH:mm:ss.SSS} %level %logger{15} - %m%n</pattern>
+        </encoder>
+    </appender>
+
+    <root level="${root-level:-INFO}">
+        <appender-ref ref="CONSOLE"/>
+    </root>
+</configuration>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/pom.xml
----------------------------------------------------------------------
diff --git a/libraries/ostrich/pom.xml b/libraries/ostrich/pom.xml
new file mode 100644
index 0000000..321a07b
--- /dev/null
+++ b/libraries/ostrich/pom.xml
@@ -0,0 +1,50 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.marmotta</groupId>
+        <artifactId>marmotta-parent</artifactId>
+        <version>3.4.0-SNAPSHOT</version>
+        <relativePath>../../parent</relativePath>
+    </parent>
+
+    <artifactId>ostrich-parent</artifactId>
+    <packaging>pom</packaging>
+
+    <name>Ostrich Triplestore: Parent</name>
+    <description>A Sesame Triple Store based on a fast C++ LevelDB database.</description>
+
+    <inceptionYear>2015</inceptionYear>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+            </plugin>
+        </plugins>
+    </build>
+
+    <modules>
+        <module>model</module>
+        <module>client</module>
+    </modules>
+
+</project>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/pom.xml
----------------------------------------------------------------------
diff --git a/libraries/pom.xml b/libraries/pom.xml
index 78e560f..b21c862 100644
--- a/libraries/pom.xml
+++ b/libraries/pom.xml
@@ -69,4 +69,13 @@
         <module>ldclient</module>
         <module>ldpath</module>
     </modules>
+
+    <profiles>
+        <profile>
+            <id>ostrich</id>
+            <modules>
+                <module>ostrich</module>
+            </modules>
+        </profile>
+    </profiles>
 </project>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/pom.xml
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/pom.xml b/platform/backends/marmotta-backend-ostrich/pom.xml
new file mode 100644
index 0000000..76b52f2
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/pom.xml
@@ -0,0 +1,175 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.marmotta</groupId>
+        <artifactId>marmotta-parent</artifactId>
+        <version>3.4.0-SNAPSHOT</version>
+    </parent>
+
+    <groupId>org.apache.marmotta</groupId>
+    <artifactId>marmotta-backend-ostrich</artifactId>
+
+    <name>Apache Marmotta Platform: C++ LevelDB Backend</name>
+    <description>
+        This module provides an Apache Marmotta backend using a LevelDB database. This is a suitable backend
+        for high performance environments.
+    </description>
+
+    <build>
+        <pluginManagement>
+            <plugins>
+                <plugin> <!-- generate JRebel Configuration -->
+                    <groupId>org.zeroturnaround</groupId>
+                    <artifactId>jrebel-maven-plugin</artifactId>
+                    <version>1.1.3</version>
+                    <executions>
+                        <execution>
+                            <id>generate-rebel-xml</id>
+                            <phase>process-resources</phase>
+                            <goals>
+                                <goal>generate</goal>
+                            </goals>
+                        </execution>
+                    </executions>
+                    <configuration>
+                        <relativePath>../../../</relativePath>
+                        <rootPath>$${rebel.root}</rootPath>
+                        <classpath>
+                            <resources>
+                                <resource><!-- default resource --></resource>
+                                <resource><directory>src/main/resources</directory></resource>
+                            </resources>
+                        </classpath>
+                    </configuration>
+                </plugin>
+            </plugins>
+        </pluginManagement>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.marmotta</groupId>
+                <artifactId>buildinfo-maven-plugin</artifactId>
+                <configuration>
+                    <systemProperties>
+                        <systemProperty>user.name</systemProperty>
+                        <systemProperty>user.timezone</systemProperty>
+                        <systemProperty>java.vm.vendor</systemProperty>
+                        <systemProperty>java.vm.version</systemProperty>
+                        <systemProperty>java.vm.name</systemProperty>
+                        <systemProperty>java.runtime.version</systemProperty>
+                        <systemProperty>os.name</systemProperty>
+                        <systemProperty>os.version</systemProperty>
+                        <systemProperty>os.arch</systemProperty>
+                    </systemProperties>
+                </configuration>
+                <executions>
+                    <execution>
+                        <phase>process-resources</phase>
+                        <goals>
+                            <goal>extract</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-javadoc-plugin</artifactId>
+                <executions>
+                    <!--
+                    <execution>
+                        <id>aggregate</id>
+                        <goals>
+                            <goal>aggregate</goal>
+                        </goals>
+                        <phase>site</phase>
+                    </execution>
+                    -->
+                    <execution>
+                        <!-- configure how the REST API documentation will be produced -->
+                        <id>restapi</id>
+                        <configuration>
+                            <doclet>com.lunatech.doclets.jax.jaxrs.JAXRSDoclet</doclet>
+
+                            <name>REST API</name>
+                            <description>REST API for Marmotta Webservices</description>
+
+                            <outputDirectory>${project.build.outputDirectory}/doc</outputDirectory>
+                            <reportOutputDirectory>${project.build.outputDirectory}/web/doc</reportOutputDirectory>
+                            <destDir>rest</destDir>
+
+                            <docletArtifact>
+                                <groupId>com.lunatech.jax-doclets</groupId>
+                                <artifactId>doclets</artifactId>
+                                <version>${jax.doclets.version}</version>
+                            </docletArtifact>
+                            <additionalparam>
+                                -jaxrscontext {BASE}
+                                -charset UTF-8
+                            </additionalparam>
+
+                            <!--
+                                                        <stylesheetfile>${project.parent.basedir}/config/doc/doclet.css</stylesheetfile>
+                            -->
+
+                            <header><![CDATA[<!--###BEGIN_CONTENT###--><div class="javadoc">]]></header>
+                            <footer><![CDATA[</div><!--###END_CONTENT###-->]]></footer>
+                            <encoding>UTF-8</encoding>
+                            <detectOfflineLinks>false</detectOfflineLinks>
+
+                            <!-- For the project-reports page -->
+                        </configuration>
+                        <goals>
+                            <goal>javadoc</goal>
+                        </goals>
+                        <phase>generate-resources</phase>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>marmotta-core</artifactId>
+            <version>${project.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>marmotta-sail-transactions</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>ostrich-model</artifactId>
+            <version>3.4.0-SNAPSHOT</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>ostrich-client</artifactId>
+            <version>3.4.0-SNAPSHOT</version>
+        </dependency>
+
+    </dependencies>
+
+
+</project>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/java/org/apache/marmotta/platform/backend/ostrich/OstrichProvider.java
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/java/org/apache/marmotta/platform/backend/ostrich/OstrichProvider.java b/platform/backends/marmotta-backend-ostrich/src/main/java/org/apache/marmotta/platform/backend/ostrich/OstrichProvider.java
new file mode 100644
index 0000000..e62d639
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/java/org/apache/marmotta/platform/backend/ostrich/OstrichProvider.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.platform.backend.ostrich;
+
+import org.apache.marmotta.ostrich.sail.OstrichSail;
+import org.apache.marmotta.platform.core.api.config.ConfigurationService;
+import org.apache.marmotta.platform.core.api.triplestore.StoreProvider;
+import org.openrdf.repository.sail.SailRepository;
+import org.openrdf.sail.NotifyingSail;
+import org.openrdf.sail.Sail;
+import org.slf4j.Logger;
+
+import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
+
+/**
+ * A store implementation using a Sesame NativeStore as backend for Marmotta. The triples are stored in the
+ * Marmotta home directory in the subdirectory "triples".
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+@ApplicationScoped
+public class OstrichProvider implements StoreProvider {
+
+    @Inject
+    private Logger log;
+
+    @Inject
+    private ConfigurationService configurationService;
+
+
+
+    /**
+     * Create the store provided by this SailProvider
+     *
+     * @return a new instance of the store
+     */
+    @Override
+    public NotifyingSail createStore() {
+        log.info("Initializing Backend: LevelDB Store");
+
+        return new OstrichSail(
+                configurationService.getStringConfiguration("ostrich.host", "localhost"),
+                configurationService.getIntConfiguration("ostrich.port", 10000));
+    }
+
+    /**
+     * Create the repository using the sail given as argument. This method is needed because some backends
+     * use custom implementations of SailRepository.
+     *
+     * @param sail
+     * @return
+     */
+    @Override
+    public SailRepository createRepository(Sail sail) {
+        return new SailRepository(sail);
+    }
+
+
+    /**
+     * Return the name of the provider. Used e.g. for displaying status information or logging.
+     *
+     * @return
+     */
+    @Override
+    public String getName() {
+        return "LevelDB Store";
+    }
+
+    /**
+     * Return true if this sail provider is enabled in the configuration.
+     *
+     * @return
+     */
+    @Override
+    public boolean isEnabled() {
+        return true;
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/resources/META-INF/beans.xml
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/resources/META-INF/beans.xml b/platform/backends/marmotta-backend-ostrich/src/main/resources/META-INF/beans.xml
new file mode 100644
index 0000000..461858e
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/resources/META-INF/beans.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+
+    Licensed to the Apache Software Foundation (ASF) under one
+    or more contributor license agreements. See the NOTICE file
+    distributed with this work for additional information
+    regarding copyright ownership. The ASF licenses this file
+    to you under the Apache License, Version 2.0 (the
+    "License"); you may not use this file except in compliance
+    with the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+-->
+<beans
+   xmlns="http://java.sun.com/xml/ns/javaee"
+   xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+   xsi:schemaLocation="
+      http://java.sun.com/xml/ns/javaee
+      http://java.sun.com/xml/ns/javaee/beans_1_0.xsd">
+
+</beans>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/resources/config-defaults.properties
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/resources/config-defaults.properties b/platform/backends/marmotta-backend-ostrich/src/main/resources/config-defaults.properties
new file mode 100644
index 0000000..0bad4d9
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/resources/config-defaults.properties
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+###############################################################################
+# LevelDB storage configuration
+###############################################################################
+
+ostrich.host = localhost
+ostrich.port = 10000
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/resources/config-descriptions.properties
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/resources/config-descriptions.properties b/platform/backends/marmotta-backend-ostrich/src/main/resources/config-descriptions.properties
new file mode 100644
index 0000000..378d664
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/resources/config-descriptions.properties
@@ -0,0 +1,26 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+###############################################################################
+# LevelDB storage configuration
+###############################################################################
+
+ostrich.host.description = Host name of the server running the LevelDB/Ostrich backend server,
+ostrich.host.type = java.lang.String
+
+ostrich.portt.description = Port of the server running the LevelDB/Ostrich backend server.
+ostrich.port.type = java.lang.Integer

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/resources/kiwi-module.properties
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/resources/kiwi-module.properties b/platform/backends/marmotta-backend-ostrich/src/main/resources/kiwi-module.properties
new file mode 100644
index 0000000..3e99ed0
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/resources/kiwi-module.properties
@@ -0,0 +1,38 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+name=Storage Backend: LevelDB
+
+container=Generic
+container.weight = 10
+
+subtitle = Configure LevelDB Backend
+weight = 10
+
+icon_small = /admin/img/config_small.png
+
+#do not change!!!
+baseurl=/storage-leveldb
+
+adminpage.0.title=About
+adminpage.0.link=/admin/about.html
+
+adminpage.1.title=Configuration
+adminpage.1.link=/admin/configuration.html
+
+webservices=
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/about.html
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/about.html b/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/about.html
new file mode 100644
index 0000000..8a08d74
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/about.html
@@ -0,0 +1,36 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<html>
+<head>
+    <!--###BEGIN_HEAD###-->
+    <title>Storage Backend: LevelDB</title>
+    <!--###END_HEAD###-->
+</head>
+<body>
+<!--###BEGIN_CONTENT###-->
+<h1>Storage Backend: LevelDB</h1>
+
+<p>
+    This module provides an Apache Marmotta storage backend based on a C++ LevelDB triple store (Ostrich). Ostrich is a highly scalable,
+    highly performant, highly concurrent triple store implementation that you can use in cases where your volume of data is very high and/or
+    you do not want/need to store your data in a relational database as with the KiWi backend.
+</p>
+<!--###END_CONTENT###-->
+</body>
+</html>
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/configuration.html
----------------------------------------------------------------------
diff --git a/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/configuration.html b/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/configuration.html
new file mode 100644
index 0000000..e5d690e
--- /dev/null
+++ b/platform/backends/marmotta-backend-ostrich/src/main/resources/web/admin/configuration.html
@@ -0,0 +1,52 @@
+<!--
+  ~ Licensed to the Apache Software Foundation (ASF) under one or more
+  ~ contributor license agreements.  See the NOTICE file distributed with
+  ~ this work for additional information regarding copyright ownership.
+  ~ The ASF licenses this file to You under the Apache License, Version 2.0
+  ~ (the "License"); you may not use this file except in compliance with
+  ~ the License.  You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License.
+  -->
+
+<html>
+<head>
+<title>Storage Backend: LevelDB</title>
+<script type="text/javascript">
+	var _SERVER_URL = "http://localhost:8080/LMF/";
+</script>
+<!--###BEGIN_HEAD###-->
+    <script type="text/javascript" src="../../webjars/jquery/1.8.2/jquery.min.js"></script>
+    <script type="text/javascript" src="../../core/public/js/widgets/configurator/configurator.js"></script>
+    <link rel="stylesheet" href="style.css" />
+    <link type="text/css" rel="stylesheet" href="../../core/public/js/widgets/configurator/style.css">
+    <script type="text/javascript">
+        jQuery(document).ready(function(){
+            var options = {
+                url : _SERVER_URL,
+                container : "ostrich_configurator",
+                prefix : 'ostrich'
+            }
+            var configurator = new Configurator(options);
+        });
+    </script>
+<!--###END_HEAD###-->
+</head>
+<body>
+  <!--###BEGIN_CONTENT###-->
+  <h1>LevelDB Backend Configuration</h1>
+  <p>
+    Here you can configure options offered by the Ostrich triple store. The configuration currently contains:
+  </p>
+  <div id="ostrich_configurator">
+    <h4>Loading configurator</h4>
+  </div>
+  <!--###END_CONTENT###-->
+</body>
+</html>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/platform/backends/pom.xml
----------------------------------------------------------------------
diff --git a/platform/backends/pom.xml b/platform/backends/pom.xml
index fe41526..766d1b3 100644
--- a/platform/backends/pom.xml
+++ b/platform/backends/pom.xml
@@ -80,6 +80,12 @@
             </modules>
         </profile>
         <profile>
+            <id>ostrich</id>
+            <modules>
+                <module>marmotta-backend-ostrich</module>
+            </modules>
+        </profile>
+        <profile>
             <id>experimental</id>
             <modules>
                 <module>marmotta-backend-http</module>


[6/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Posted by ss...@apache.org.
http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/parser/rdf_parser.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/parser/rdf_parser.h b/libraries/ostrich/backend/parser/rdf_parser.h
new file mode 100644
index 0000000..ae03bbf
--- /dev/null
+++ b/libraries/ostrich/backend/parser/rdf_parser.h
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_RDF_PARSER_H
+#define MARMOTTA_RDF_PARSER_H
+
+#include <string>
+#include <functional>
+
+#include <raptor2/raptor2.h>
+#include <model/rdf_model.h>
+
+namespace marmotta {
+namespace parser {
+
+enum Format {
+    RDFXML, TURTLE, NTRIPLES, NQUADS, RDFJSON, RDFA, TRIG, GUESS
+};
+
+/**
+ * Return the format matching the string name passed as argument.
+ */
+Format FormatFromString(const std::string& name);
+
+class Parser {
+ public:
+
+    Parser(const rdf::URI& baseUri) : Parser(baseUri, Format::GUESS) {};
+    Parser(const rdf::URI& baseUri, Format format);
+
+    // TODO: copy and move constructors
+
+    ~Parser();
+
+    void setStatementHandler(std::function<void(const rdf::Statement&)> const &handler) {
+        Parser::stmt_handler = handler;
+    }
+
+    void setNamespaceHandler(std::function<void(const rdf::Namespace&)> const &handler) {
+        Parser::ns_handler = handler;
+    }
+
+
+    void parse(std::istream& in);
+
+ private:
+    raptor_parser* parser;
+    raptor_world*  world;
+    raptor_uri*    base;
+
+    std::function<void(const rdf::Statement&)> stmt_handler;
+    std::function<void(const rdf::Namespace&)> ns_handler;
+
+    static void raptor_stmt_handler(void* user_data, raptor_statement* statement);
+    static void raptor_ns_handler(void* user_data, raptor_namespace *nspace);
+};
+
+class ParseError : std::exception {
+ public:
+    ParseError(const char* message) : message(message) { }
+    ParseError(std::string &message) : message(message) { }
+
+    const std::string &getMessage() const {
+        return message;
+    }
+
+ private:
+    std::string message;
+};
+}
+}
+
+#endif //MARMOTTA_RDF_PARSER_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/CMakeLists.txt b/libraries/ostrich/backend/persistence/CMakeLists.txt
new file mode 100644
index 0000000..3300940
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/CMakeLists.txt
@@ -0,0 +1,10 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/../model ${RAPTOR_INCLUDE_DIR}/raptor2)
+
+add_executable(marmotta_persistence
+        leveldb_persistence.cc leveldb_persistence.h leveldb_service.cc leveldb_service.h
+        leveldb_server.cc leveldb_sparql.cc leveldb_sparql.h)
+target_link_libraries(marmotta_persistence
+        marmotta_model marmotta_service marmotta_util marmotta_sparql
+        ${LevelDB_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}
+        ${CMAKE_THREAD_LIBS_INIT} ${PROTOBUF_LIBRARIES} ${GRPC_LIBRARIES} ${Tcmalloc_LIBRARIES})
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_persistence.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_persistence.cc b/libraries/ostrich/backend/persistence/leveldb_persistence.cc
new file mode 100644
index 0000000..da767b5
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_persistence.cc
@@ -0,0 +1,685 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define KEY_LENGTH 16
+
+#include <chrono>
+
+#include <glog/logging.h>
+#include <leveldb/filter_policy.h>
+#include <leveldb/write_batch.h>
+#include <google/protobuf/wrappers.pb.h>
+#include <thread>
+
+#include "leveldb_persistence.h"
+#include "model/rdf_operators.h"
+#include "util/murmur3.h"
+
+#define CHECK_STATUS(s) CHECK(s.ok()) << "Writing to database failed: " << s.ToString()
+
+
+using leveldb::WriteBatch;
+using leveldb::Slice;
+using marmotta::rdf::proto::Statement;
+using marmotta::rdf::proto::Namespace;
+using marmotta::rdf::proto::Resource;
+
+namespace marmotta {
+namespace persistence {
+namespace {
+
+
+// Creates an index key based on hashing values of the 4 messages in proper order.
+inline void computeKey(const std::string* a, const std::string* b, const std::string* c, const std::string* d, char* result) {
+    // 128bit keys, use murmur
+    int offset = 0;
+    for (auto m : {a, b, c, d}) {
+        if (m != nullptr) {
+#ifdef __x86_64__
+            MurmurHash3_x64_128(m->data(), m->size(), 13, &result[offset]);
+#else
+            MurmurHash3_x86_128(m->data(), m->size(), 13, &result[offset]);
+#endif
+        } else {
+            return;
+        }
+        offset += KEY_LENGTH;
+    }
+}
+
+enum Position {
+    S = 0, P = 1, O = 2, C = 3
+};
+
+// Reorder a hash key from the generated SPOC key without requiring to recompute murmur.
+inline void orderKey(char* dest, const char* src, Position a, Position b, Position c, Position d) {
+    int offset = 0;
+    for (int m : {a, b, c, d}) {
+        memcpy(&dest[offset], &src[m * KEY_LENGTH], KEY_LENGTH * sizeof(char));
+        offset += KEY_LENGTH;
+    }
+}
+
+/**
+ * Helper class to define proper cache keys and identify the index to use based on
+ * fields available in the pattern.
+ */
+class PatternQuery {
+ public:
+    enum IndexType {
+        SPOC, CSPO, OPSC, PCOS
+    };
+
+    PatternQuery(const Statement& pattern) : pattern(pattern) {
+        if (pattern.has_subject()) {
+            s.reset(new std::string());
+            pattern.subject().SerializeToString(s.get());
+        }
+        if (pattern.has_predicate()) {
+            p.reset(new std::string());
+            pattern.predicate().SerializeToString(p.get());
+        }
+        if (pattern.has_object()) {
+            o.reset(new std::string());
+            pattern.object().SerializeToString(o.get());
+        }
+        if (pattern.has_context()) {
+            c.reset(new std::string());
+            pattern.context().SerializeToString(c.get());
+        }
+
+        if (pattern.has_subject()) {
+            // Subject is usually most selective, so if it is present use the
+            // subject-based databases first.
+            if (pattern.has_context()) {
+                type_ = CSPO;
+            } else {
+                type_ = SPOC;
+            }
+        } else if (pattern.has_object()) {
+            // Second-best option is object.
+            type_ = OPSC;
+        } else if (pattern.has_predicate()) {
+            // Predicate is usually least selective.
+            type_ = PCOS;
+        } else if (pattern.has_context()) {
+            type_ = CSPO;
+        } else {
+            // Fall back to SPOC.
+            type_ = SPOC;
+        }
+    }
+
+    /**
+     * Return the lower key for querying the index (range [MinKey,MaxKey) ).
+     */
+    char* MinKey() const {
+        char* result = (char*)calloc(4 * KEY_LENGTH, sizeof(char));
+        compute(result);
+        return result;
+    }
+
+    /**
+     * Return the upper key for querying the index (range [MinKey,MaxKey) ).
+     */
+    char* MaxKey() const {
+        char* result = (char*)malloc(4 * KEY_LENGTH * sizeof(char));
+        for (int i=0; i < 4 * KEY_LENGTH; i++) {
+            result[i] = (char)0xFF;
+        }
+
+        compute(result);
+        return result;
+    }
+
+    IndexType Type() const {
+        return type_;
+    }
+
+    PatternQuery& Type(IndexType t) {
+        type_ = t;
+        return *this;
+    }
+
+ private:
+    const Statement& pattern;
+    std::unique_ptr<std::string> s, p, o, c;
+
+    // Creates a cache key based on hashing values of the 4 messages in proper order.
+    void compute(char* result) const {
+        switch(Type()) {
+            case SPOC:
+                computeKey(s.get(), p.get(), o.get(), c.get(), result);
+                break;
+            case CSPO:
+                computeKey(c.get(), s.get(), p.get(), o.get(), result);
+                break;
+            case OPSC:
+                computeKey(o.get(), p.get(), s.get(), c.get(), result);
+                break;
+            case PCOS:
+                computeKey(p.get(), c.get(), o.get(), s.get(), result);
+                break;
+        }
+    }
+
+    IndexType type_;
+};
+
+
+// Base tterator for wrapping a LevelDB iterators.
+template<typename T>
+class LevelDBIterator : public util::CloseableIterator<T> {
+ public:
+
+    LevelDBIterator(leveldb::Iterator *it)
+        : it(it), parsed(false) {
+        it->SeekToFirst();
+    }
+
+    virtual ~LevelDBIterator() override {
+        delete it;
+    };
+
+    util::CloseableIterator<T> &operator++() override {
+        it->Next();
+        parsed = false;
+        return *this;
+    };
+
+    T &operator*() override {
+        if (!parsed)
+            proto.ParseFromString(it->value().ToString());
+        return proto;
+    };
+
+    T *operator->() override {
+        if (!parsed)
+            proto.ParseFromString(it->value().ToString());
+        return &proto;
+    };
+
+    virtual bool hasNext() override {
+        return it->Valid();
+    }
+
+
+
+ protected:
+    leveldb::Iterator* it;
+
+    T proto;
+    bool parsed;
+};
+
+
+
+// Iterator wrapping a LevelDB Statement iterator over a given key range.
+class StatementRangeIterator : public LevelDBIterator<Statement> {
+ public:
+
+    StatementRangeIterator(leveldb::Iterator *it, char *loKey, char *hiKey)
+            : LevelDBIterator(it), loKey(loKey), hiKey(hiKey) {
+        it->Seek(leveldb::Slice(loKey, 4 * KEY_LENGTH));
+    }
+
+    ~StatementRangeIterator() override {
+        free(loKey);
+        free(hiKey);
+    };
+
+    bool hasNext() override {
+        return it->Valid() && it->key().compare(leveldb::Slice(hiKey, 4 * KEY_LENGTH)) <= 0;
+    }
+
+ private:
+    char *loKey;
+    char *hiKey;
+};
+
+
+/**
+ * Check if a statement matches with a partial pattern.
+ */
+bool matches(const Statement& stmt, const Statement& pattern) {
+    // equality operators defined in rdf_model.h
+    if (pattern.has_context() && stmt.context() != pattern.context()) {
+        return false;
+    }
+    if (pattern.has_subject() && stmt.subject() != pattern.subject()) {
+        return false;
+    }
+    if (pattern.has_predicate() && stmt.predicate() != pattern.predicate()) {
+        return false;
+    }
+    if (pattern.has_object() && stmt.object() != pattern.object()) {
+        return false;
+    }
+    return true;
+}
+
+}  // namespace
+
+
+/**
+ * Build database with default options.
+ */
+leveldb::DB* buildDB(const std::string& path, const std::string& suffix, const leveldb::Options& options) {
+    leveldb::DB* db;
+    leveldb::Status status = leveldb::DB::Open(options, path + "/" + suffix + ".db", &db);
+    assert(status.ok());
+    return db;
+}
+
+leveldb::Options* buildOptions(KeyComparator* cmp, leveldb::Cache* cache) {
+    leveldb::Options *options = new leveldb::Options();
+    options->create_if_missing = true;
+
+    // Custom comparator for our keys.
+    options->comparator = cmp;
+
+    // Cache reads in memory.
+    options->block_cache = cache;
+
+    // Set a bloom filter of 10 bits.
+    options->filter_policy = leveldb::NewBloomFilterPolicy(10);
+    return options;
+}
+
+leveldb::Options buildNsOptions() {
+    leveldb::Options options;
+    options.create_if_missing = true;
+    return options;
+}
+
+LevelDBPersistence::LevelDBPersistence(const std::string &path, int64_t cacheSize)
+        : comparator(new KeyComparator())
+        , cache(leveldb::NewLRUCache(cacheSize))
+        , options(buildOptions(comparator.get(), cache.get()))
+        , db_spoc(buildDB(path, "spoc", *options)), db_cspo(buildDB(path, "cspo", *options))
+        , db_opsc(buildDB(path, "opsc", *options)), db_pcos(buildDB(path, "pcos", *options))
+        , db_ns_prefix(buildDB(path, "ns_prefix", buildNsOptions()))
+        , db_ns_url(buildDB(path, "ns_url", buildNsOptions()))
+        , db_meta(buildDB(path, "metadata", buildNsOptions())) { }
+
+
+int64_t LevelDBPersistence::AddNamespaces(NamespaceIterator& it) {
+    DLOG(INFO) << "Starting batch namespace import operation.";
+    int64_t count = 0;
+
+    leveldb::WriteBatch batch_prefix, batch_url;
+
+    for (; it.hasNext(); ++it) {
+        AddNamespace(*it, batch_prefix, batch_url);
+        count++;
+    }
+    CHECK_STATUS(db_ns_prefix->Write(leveldb::WriteOptions(), &batch_prefix));
+    CHECK_STATUS(db_ns_url->Write(leveldb::WriteOptions(), &batch_url));
+
+    DLOG(INFO) << "Imported " << count << " namespaces";
+
+    return count;
+}
+
+std::unique_ptr<LevelDBPersistence::NamespaceIterator> LevelDBPersistence::GetNamespaces(
+        const rdf::proto::Namespace &pattern) {
+    DLOG(INFO) << "Get namespaces matching pattern " << pattern.DebugString();
+
+    Namespace ns;
+
+    leveldb::DB *db = nullptr;
+    std::string key, value;
+    if (pattern.prefix() != "") {
+        key = pattern.prefix();
+        db = db_ns_prefix.get();
+    } else if(pattern.uri() != "") {
+        key = pattern.uri();
+        db = db_ns_url.get();
+    }
+    if (db != nullptr) {
+        // Either prefix or uri given, report the correct namespace value.
+        leveldb::Status s = db->Get(leveldb::ReadOptions(), key, &value);
+        if (s.ok()) {
+            ns.ParseFromString(value);
+            return std::unique_ptr<NamespaceIterator>(
+                    new util::SingletonIterator<Namespace>(ns));
+        } else {
+            return std::unique_ptr<NamespaceIterator>(
+                    new util::EmptyIterator<Namespace>());
+        }
+    } else {
+        // Pattern was empty, iterate over all namespaces and report them.
+        return std::unique_ptr<NamespaceIterator>(
+            new LevelDBIterator<Namespace>(db_ns_prefix->NewIterator(leveldb::ReadOptions())));
+    }
+}
+
+
+void LevelDBPersistence::GetNamespaces(
+        const Namespace &pattern, LevelDBPersistence::NamespaceHandler callback) {
+    int64_t count = 0;
+
+    bool cbsuccess = true;
+    for(auto it = GetNamespaces(pattern); cbsuccess && it->hasNext(); ++(*it)) {
+        cbsuccess = callback(**it);
+        count++;
+    }
+
+    DLOG(INFO) << "Get namespaces done (count=" << count <<")";
+}
+
+
+int64_t LevelDBPersistence::AddStatements(StatementIterator& it) {
+    auto start = std::chrono::steady_clock::now();
+    LOG(INFO) << "Starting batch statement import operation.";
+    int64_t count = 0;
+
+    leveldb::WriteBatch batch_spoc, batch_cspo, batch_opsc, batch_pcos;
+    for (; it.hasNext(); ++it) {
+        AddStatement(*it, batch_spoc, batch_cspo, batch_opsc, batch_pcos);
+        count++;
+    }
+
+    std::vector<std::thread> writers;
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_pcos->Write(leveldb::WriteOptions(), &batch_pcos));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_opsc->Write(leveldb::WriteOptions(), &batch_opsc));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_cspo->Write(leveldb::WriteOptions(), &batch_cspo));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_spoc->Write(leveldb::WriteOptions(), &batch_spoc));
+    }));
+
+    for (auto& t : writers) {
+        t.join();
+    }
+
+    LOG(INFO) << "Imported " << count << " statements (time="
+              << std::chrono::duration <double, std::milli> (
+                   std::chrono::steady_clock::now() - start).count()
+              << "ms).";
+
+    return count;
+}
+
+
+std::unique_ptr<LevelDBPersistence::StatementIterator> LevelDBPersistence::GetStatements(
+        const rdf::proto::Statement &pattern) {
+    DLOG(INFO) << "Get statements matching pattern " << pattern.DebugString();
+
+    PatternQuery query(pattern);
+
+    leveldb::DB* db;
+    switch (query.Type()) {
+        case PatternQuery::SPOC:
+            db = db_spoc.get();
+            DLOG(INFO) << "Query: Using index type SPOC";
+            break;
+        case PatternQuery::CSPO:
+            db = db_cspo.get();
+            DLOG(INFO) << "Query: Using index type CSPO";
+            break;
+        case PatternQuery::OPSC:
+            db = db_opsc.get();
+            DLOG(INFO) << "Query: Using index type OPSC";
+            break;
+        case PatternQuery::PCOS:
+            db = db_pcos.get();
+            DLOG(INFO) << "Query: Using index type PCOS";
+            break;
+    };
+
+    return std::unique_ptr<StatementIterator>(new StatementRangeIterator(
+            db->NewIterator(leveldb::ReadOptions()), query.MinKey(), query.MaxKey()));
+}
+
+
+void LevelDBPersistence::GetStatements(
+        const Statement& pattern, std::function<bool(const Statement&)> callback) {
+    auto start = std::chrono::steady_clock::now();
+    int64_t count = 0;
+
+    bool cbsuccess = true;
+    for(auto it = GetStatements(pattern); cbsuccess && it->hasNext(); ++(*it)) {
+        cbsuccess = callback(**it);
+        count++;
+    }
+
+    DLOG(INFO) << "Get statements done (count=" << count << ", time="
+               << std::chrono::duration <double, std::milli> (
+                    std::chrono::steady_clock::now() - start).count()
+               << "ms).";
+}
+
+
+int64_t LevelDBPersistence::RemoveStatements(const rdf::proto::Statement& pattern) {
+    auto start = std::chrono::steady_clock::now();
+    DLOG(INFO) << "Remove statements matching pattern " << pattern.DebugString();
+
+    int64_t count = 0;
+
+    Statement stmt;
+    leveldb::WriteBatch batch_spoc, batch_cspo, batch_opsc, batch_pcos;
+
+    count = RemoveStatements(pattern, batch_spoc, batch_cspo, batch_opsc, batch_pcos);
+
+    std::vector<std::thread> writers;
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_pcos->Write(leveldb::WriteOptions(), &batch_pcos));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_opsc->Write(leveldb::WriteOptions(), &batch_opsc));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_cspo->Write(leveldb::WriteOptions(), &batch_cspo));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_spoc->Write(leveldb::WriteOptions(), &batch_spoc));
+    }));
+
+    for (auto& t : writers) {
+        t.join();
+    }
+
+    DLOG(INFO) << "Removed " << count << " statements (time=" <<
+               std::chrono::duration <double, std::milli> (
+                       std::chrono::steady_clock::now() - start).count()
+               << "ms).";
+
+    return count;
+}
+
+UpdateStatistics LevelDBPersistence::Update(LevelDBPersistence::UpdateIterator &it) {
+    auto start = std::chrono::steady_clock::now();
+    DLOG(INFO) << "Starting batch update operation.";
+    UpdateStatistics stats;
+
+    WriteBatch b_spoc, b_cspo, b_opsc, b_pcos, b_prefix, b_url;
+    for (; it.hasNext(); ++it) {
+        if (it->has_stmt_added()) {
+            AddStatement(it->stmt_added(), b_spoc, b_cspo, b_opsc, b_pcos);
+            stats.added_stmts++;
+        } else if (it->has_stmt_removed()) {
+            stats.removed_stmts +=
+                    RemoveStatements(it->stmt_removed(), b_spoc, b_cspo, b_opsc, b_pcos);
+        } else if(it->has_ns_added()) {
+            AddNamespace(it->ns_added(), b_prefix, b_url);
+            stats.added_ns++;
+        } else if(it->has_ns_removed()) {
+            RemoveNamespace(it->ns_removed(), b_prefix, b_url);
+        }
+    }
+    std::vector<std::thread> writers;
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_pcos->Write(leveldb::WriteOptions(), &b_pcos));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_opsc->Write(leveldb::WriteOptions(), &b_opsc));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_cspo->Write(leveldb::WriteOptions(), &b_cspo));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_spoc->Write(leveldb::WriteOptions(), &b_spoc));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_ns_prefix->Write(leveldb::WriteOptions(), &b_prefix));
+    }));
+    writers.push_back(std::thread([&]() {
+        CHECK_STATUS(db_ns_url->Write(leveldb::WriteOptions(), &b_url));
+    }));
+
+    for (auto& t : writers) {
+        t.join();
+    }
+
+    DLOG(INFO) << "Batch update complete. (statements added: " << stats.added_stmts
+            << ", statements removed: " << stats.removed_stmts
+            << ", namespaces added: " << stats.added_ns
+            << ", namespaces removed: " << stats.removed_ns
+            << ", time=" << std::chrono::duration <double, std::milli> (
+                std::chrono::steady_clock::now() - start).count() << "ms).";
+
+    return stats;
+}
+
+void LevelDBPersistence::AddNamespace(
+        const Namespace &ns, WriteBatch &ns_prefix, WriteBatch &ns_url) {
+    DLOG(INFO) << "Adding namespace " << ns.DebugString();
+
+    std::string buffer;
+    ns.SerializeToString(&buffer);
+    ns_prefix.Put(ns.prefix(), buffer);
+    ns_url.Put(ns.uri(), buffer);
+}
+
+void LevelDBPersistence::RemoveNamespace(
+        const Namespace &pattern, WriteBatch &ns_prefix, WriteBatch &ns_url) {
+    DLOG(INFO) << "Removing namespaces matching pattern " << pattern.DebugString();
+
+    GetNamespaces(pattern, [&ns_prefix, &ns_url](const rdf::proto::Namespace& ns) -> bool {
+        ns_prefix.Delete(ns.prefix());
+        ns_url.Delete(ns.uri());
+        return true;
+    });
+}
+
+
+void LevelDBPersistence::AddStatement(
+        const Statement &stmt,
+        WriteBatch &spoc, WriteBatch &cspo, WriteBatch &opsc, WriteBatch &pcos) {
+    DLOG(INFO) << "Adding statement " << stmt.DebugString();
+
+    std::string buffer, bufs, bufp, bufo, bufc;
+
+    stmt.SerializeToString(&buffer);
+
+    stmt.subject().SerializeToString(&bufs);
+    stmt.predicate().SerializeToString(&bufp);
+    stmt.object().SerializeToString(&bufo);
+    stmt.context().SerializeToString(&bufc);
+
+    char *k_spoc = (char *) calloc(4 * KEY_LENGTH, sizeof(char));
+    computeKey(&bufs, &bufp, &bufo, &bufc, k_spoc);
+    spoc.Put(leveldb::Slice(k_spoc, 4 * KEY_LENGTH), buffer);
+
+    char *k_cspo = (char *) calloc(4 * KEY_LENGTH, sizeof(char));
+    orderKey(k_cspo, k_spoc, C, S, P, O);
+    cspo.Put(leveldb::Slice(k_cspo, 4 * KEY_LENGTH), buffer);
+
+    char *k_opsc = (char *) calloc(4 * KEY_LENGTH, sizeof(char));
+    orderKey(k_opsc, k_spoc, O, P, S, C);
+    opsc.Put(leveldb::Slice(k_opsc, 4 * KEY_LENGTH), buffer);
+
+    char *k_pcos = (char *) calloc(4 * KEY_LENGTH, sizeof(char));
+    orderKey(k_pcos, k_spoc, P, C, O, S);
+    pcos.Put(leveldb::Slice(k_pcos, 4 * KEY_LENGTH), buffer);
+
+    free(k_spoc);
+    free(k_cspo);
+    free(k_opsc);
+    free(k_pcos);
+}
+
+
+int64_t LevelDBPersistence::RemoveStatements(
+        const Statement& pattern,
+        WriteBatch& spoc, WriteBatch& cspo, WriteBatch& opsc, WriteBatch&pcos) {
+    DLOG(INFO) << "Removing statements matching " << pattern.DebugString();
+
+    int64_t count = 0;
+
+    std::string bufs, bufp, bufo, bufc;
+    GetStatements(pattern, [&](const Statement stmt) -> bool {
+        stmt.subject().SerializeToString(&bufs);
+        stmt.predicate().SerializeToString(&bufp);
+        stmt.object().SerializeToString(&bufo);
+        stmt.context().SerializeToString(&bufc);
+
+        char* k_spoc = (char*)calloc(4 * KEY_LENGTH, sizeof(char));
+        computeKey(&bufs, &bufp, &bufo, &bufc, k_spoc);
+        spoc.Delete(leveldb::Slice(k_spoc, 4 * KEY_LENGTH));
+
+        char* k_cspo = (char*)calloc(4 * KEY_LENGTH, sizeof(char));
+        orderKey(k_cspo, k_spoc, C, S, P, O);
+        cspo.Delete(leveldb::Slice(k_cspo, 4 * KEY_LENGTH));
+
+        char* k_opsc = (char*)calloc(4 * KEY_LENGTH, sizeof(char));
+        orderKey(k_opsc, k_spoc, O, P, S, C);
+        opsc.Delete(leveldb::Slice(k_opsc, 4 * KEY_LENGTH));
+
+        char* k_pcos = (char*)calloc(4 * KEY_LENGTH, sizeof(char));
+        orderKey(k_pcos, k_spoc, P, C, O, S);
+        pcos.Delete(leveldb::Slice(k_pcos, 4 * KEY_LENGTH));
+
+        free(k_spoc);
+        free(k_cspo);
+        free(k_opsc);
+        free(k_pcos);
+
+        count++;
+
+        return true;
+    });
+
+    return count;
+}
+
+int KeyComparator::Compare(const leveldb::Slice& a, const leveldb::Slice& b) const {
+    return memcmp(a.data(), b.data(), 4 * KEY_LENGTH);
+}
+
+
+int64_t LevelDBPersistence::Size() {
+    int64_t count = 0;
+    leveldb::Iterator* it = db_cspo->NewIterator(leveldb::ReadOptions());
+    for (it->SeekToFirst(); it->Valid(); it->Next()) {
+        count++;
+    }
+
+    delete it;
+    return count;
+}
+
+}  // namespace persistence
+}  // namespace marmotta
+
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_persistence.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_persistence.h b/libraries/ostrich/backend/persistence/leveldb_persistence.h
new file mode 100644
index 0000000..7a3da17
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_persistence.h
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_PERSISTENCE_H
+#define MARMOTTA_PERSISTENCE_H
+
+#include <memory>
+#include <string>
+#include <functional>
+
+#include <leveldb/db.h>
+#include <leveldb/cache.h>
+#include <leveldb/comparator.h>
+
+#include "model/rdf_model.h"
+#include "service/sail.pb.h"
+#include "util/iterator.h"
+
+namespace marmotta {
+namespace persistence {
+
+/**
+ * A custom comparator treating the bytes in the key as unsigned char.
+ */
+class KeyComparator : public leveldb::Comparator {
+ public:
+    int Compare(const leveldb::Slice& a, const leveldb::Slice& b) const;
+
+    const char* Name() const { return "KeyComparator"; }
+    void FindShortestSeparator(std::string*, const leveldb::Slice&) const { }
+    void FindShortSuccessor(std::string*) const { }
+};
+
+
+// Statistical data about updates.
+struct UpdateStatistics {
+    UpdateStatistics()
+            : added_stmts(0), removed_stmts(0), added_ns(0), removed_ns(0) {}
+
+    int64_t added_stmts, removed_stmts, added_ns, removed_ns;
+};
+
+/**
+ * Persistence implementation based on the LevelDB high performance database.
+ */
+class LevelDBPersistence {
+ public:
+    typedef util::CloseableIterator<rdf::proto::Statement> StatementIterator;
+    typedef util::CloseableIterator<rdf::proto::Namespace> NamespaceIterator;
+    typedef util::CloseableIterator<service::proto::UpdateRequest> UpdateIterator;
+
+    typedef std::function<bool(const rdf::proto::Statement&)> StatementHandler;
+    typedef std::function<bool(const rdf::proto::Namespace&)> NamespaceHandler;
+
+
+    /**
+     * Initialise a new LevelDB database using the given path and cache size (bytes).
+     */
+    LevelDBPersistence(const std::string& path, int64_t cacheSize);
+
+    /**
+     * Add the namespaces in the iterator to the database.
+     */
+    int64_t AddNamespaces(NamespaceIterator& it);
+
+    /**
+     * Add the statements in the iterator to the database.
+     */
+    int64_t AddStatements(StatementIterator& it);
+
+    /**
+     * Get all statements matching the pattern (which may have some fields
+     * unset to indicate wildcards). Call the callback function for each
+     * result.
+     */
+    void GetStatements(const rdf::proto::Statement& pattern,
+                       StatementHandler callback);
+
+    /**
+     * Get all statements matching the pattern (which may have some fields
+     * unset to indicate wildcards). Call the callback function for each
+     * result.
+     */
+    std::unique_ptr<StatementIterator>
+            GetStatements(const rdf::proto::Statement& pattern);
+
+    /**
+     * Get all namespaces matching the pattern (which may have some of all
+     * fields unset to indicate wildcards). Call the callback function for
+     * each result.
+     */
+    void GetNamespaces(const rdf::proto::Namespace &pattern,
+                       NamespaceHandler callback);
+
+    /**
+     * Get all namespaces matching the pattern (which may have some of all
+     * fields unset to indicate wildcards). Call the callback function for
+     * each result.
+     */
+    std::unique_ptr<NamespaceIterator>
+            GetNamespaces(const rdf::proto::Namespace &pattern);
+
+    /**
+     * Remove all statements matching the pattern (which may have some fields
+     * unset to indicate wildcards).
+     */
+    int64_t RemoveStatements(const rdf::proto::Statement& pattern);
+
+    /**
+     * Apply a batch of updates (mixed statement/namespace adds and removes).
+     * The updates are collected in LevelDB batches and written atomically to
+     * the database when iteration ends.
+     */
+    UpdateStatistics Update(UpdateIterator& it);
+
+    /**
+     * Return the size of this database.
+     */
+    int64_t Size();
+ private:
+
+    std::unique_ptr<KeyComparator> comparator;
+    std::unique_ptr<leveldb::Cache> cache;
+    std::unique_ptr<leveldb::Options> options;
+
+    // We currently support efficient lookups by subject, context and object.
+    std::unique_ptr<leveldb::DB>
+            // Statement databases, indexed for query performance
+            db_spoc, db_cspo, db_opsc, db_pcos,
+            // Namespace databases
+            db_ns_prefix, db_ns_url,
+            // Triple store metadata.
+            db_meta;
+
+    /**
+     * Add the namespace to the given database batch operations.
+     */
+    void AddNamespace(const rdf::proto::Namespace& ns,
+                      leveldb::WriteBatch& ns_prefix, leveldb::WriteBatch& ns_url);
+
+    /**
+     * Add the namespace to the given database batch operations.
+     */
+    void RemoveNamespace(const rdf::proto::Namespace& ns,
+                         leveldb::WriteBatch& ns_prefix, leveldb::WriteBatch& ns_url);
+
+    /**
+     * Add the statement to the given database batch operations.
+     */
+    void AddStatement(const rdf::proto::Statement& stmt,
+                      leveldb::WriteBatch& spoc, leveldb::WriteBatch& cspo,
+                      leveldb::WriteBatch& opsc, leveldb::WriteBatch&pcos);
+
+
+    /**
+     * Remove all statements matching the pattern (which may have some fields
+     * unset to indicate wildcards) from the given database batch operations.
+     */
+    int64_t RemoveStatements(const rdf::proto::Statement& pattern,
+                             leveldb::WriteBatch& spoc, leveldb::WriteBatch& cspo,
+                             leveldb::WriteBatch& opsc, leveldb::WriteBatch&pcos);
+
+
+};
+
+
+
+}  // namespace persistence
+}  // namespace marmotta
+
+#endif //MARMOTTA_PERSISTENCE_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_server.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_server.cc b/libraries/ostrich/backend/persistence/leveldb_server.cc
new file mode 100644
index 0000000..a03dc5f
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_server.cc
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Binary to start a persistence server implementing the sail.proto API.
+#include <gflags/gflags.h>
+#include <glog/logging.h>
+#include <sys/stat.h>
+#include <signal.h>
+
+#include "leveldb_service.h"
+
+using grpc::Status;
+using grpc::Server;
+using grpc::ServerBuilder;
+
+
+DEFINE_string(host, "0.0.0.0", "Address/name of server to access.");
+DEFINE_string(port, "10000", "Port of server to access.");
+DEFINE_string(db, "/tmp/testdb", "Path to database. Will be created if non-existant.");
+DEFINE_int64(cache_size, 100 * 1048576, "Cache size used by the database (in bytes).");
+
+std::unique_ptr<Server> server;
+
+void stopServer(int signal) {
+    if (server.get() != nullptr) {
+        LOG(INFO) << "Persistence Server shutting down";
+        server->Shutdown();
+    }
+}
+
+int main(int argc, char** argv) {
+    // Initialize Google's logging library.
+    google::InitGoogleLogging(argv[0]);
+    google::ParseCommandLineFlags(&argc, &argv, true);
+
+    mkdir(FLAGS_db.c_str(), 0700);
+    marmotta::persistence::LevelDBPersistence persistence(FLAGS_db, FLAGS_cache_size);
+
+    marmotta::service::LevelDBService sailService(&persistence);
+    marmotta::service::LevelDBSparqlService sparqlService(&persistence);
+
+    ServerBuilder builder;
+    builder.AddListeningPort(FLAGS_host + ":" + FLAGS_port, grpc::InsecureServerCredentials());
+    builder.RegisterService(&sailService);
+    builder.RegisterService(&sparqlService);
+
+    server = builder.BuildAndStart();
+    std::cout << "Persistence Server listening on " << FLAGS_host << ":" << FLAGS_port << std::endl;
+
+    LOG(INFO) << "Persistence Server listening on " << FLAGS_host << ":" << FLAGS_port;
+
+    signal(SIGINT, stopServer);
+    signal(SIGTERM, stopServer);
+
+    server->Wait();
+
+    return 0;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_service.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_service.cc b/libraries/ostrich/backend/persistence/leveldb_service.cc
new file mode 100644
index 0000000..e31af2d
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_service.cc
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "leveldb_service.h"
+#include "leveldb_sparql.h"
+
+#include <unordered_set>
+#include <model/rdf_operators.h>
+#include <util/iterator.h>
+
+using grpc::Status;
+using grpc::StatusCode;
+using grpc::Server;
+using grpc::ServerBuilder;
+using grpc::ServerContext;
+using grpc::ServerReader;
+using grpc::ServerWriter;
+using google::protobuf::Int64Value;
+using google::protobuf::Message;
+using google::protobuf::Empty;
+using marmotta::rdf::proto::Statement;
+using marmotta::rdf::proto::Namespace;
+using marmotta::rdf::proto::Resource;
+using marmotta::service::proto::ContextRequest;
+using marmotta::persistence::sparql::LevelDBTripleSource;
+using marmotta::sparql::SparqlService;
+using marmotta::sparql::TripleSource;
+
+namespace marmotta {
+namespace service {
+
+// A STL iterator wrapper around a client reader.
+template <class Proto>
+class ReaderIterator : public util::CloseableIterator<Proto> {
+ public:
+
+    ReaderIterator(grpc::ServerReader<Proto>* r) : reader(r), finished(false) {
+        // Immediately move to first element.
+        operator++();
+    }
+
+    util::CloseableIterator<Proto>& operator++() override {
+        if (!finished) {
+            finished = !reader->Read(&buffer);
+        }
+        return *this;
+    }
+
+    Proto& operator*() override {
+        return buffer;
+    }
+
+    Proto* operator->() override {
+        return &buffer;
+    }
+
+    bool hasNext() override {
+        return !finished;
+    }
+
+ private:
+    grpc::ServerReader<Proto>* reader;
+    Proto buffer;
+    bool finished;
+};
+
+typedef ReaderIterator<rdf::proto::Statement> StatementIterator;
+typedef ReaderIterator<rdf::proto::Namespace> NamespaceIterator;
+typedef ReaderIterator<service::proto::UpdateRequest> UpdateIterator;
+
+
+Status LevelDBService::AddNamespaces(
+        ServerContext* context, ServerReader<Namespace>* reader, Int64Value* result) {
+
+    auto it = NamespaceIterator(reader);
+    int64_t count = persistence->AddNamespaces(it);
+    result->set_value(count);
+
+    return Status::OK;
+}
+
+grpc::Status LevelDBService::GetNamespace(
+        ServerContext *context, const rdf::proto::Namespace *pattern, Namespace *result) {
+
+    Status status(StatusCode::NOT_FOUND, "Namespace not found");
+    persistence->GetNamespaces(*pattern, [&result, &status](const Namespace &r) -> bool {
+        *result = r;
+        status = Status::OK;
+        return true;
+    });
+
+    return status;
+}
+
+grpc::Status LevelDBService::GetNamespaces(
+        ServerContext *context, const Empty *ignored, ServerWriter<Namespace> *result) {
+
+    Namespace pattern; // empty pattern
+    persistence->GetNamespaces(pattern, [&result](const Namespace &r) -> bool {
+        return result->Write(r);
+    });
+
+    return Status::OK;
+}
+
+
+Status LevelDBService::AddStatements(
+        ServerContext* context, ServerReader<Statement>* reader, Int64Value* result) {
+
+    auto it = StatementIterator(reader);
+    int64_t count = persistence->AddStatements(it);
+    result->set_value(count);
+
+    return Status::OK;
+}
+
+
+Status LevelDBService::GetStatements(
+        ServerContext* context, const Statement* pattern, ServerWriter<Statement>* result) {
+
+    persistence->GetStatements(*pattern, [&result](const Statement& stmt) -> bool {
+        return result->Write(stmt);
+    });
+
+    return Status::OK;
+}
+
+Status LevelDBService::RemoveStatements(
+        ServerContext* context, const Statement* pattern, Int64Value* result) {
+
+    int64_t count = persistence->RemoveStatements(*pattern);
+    result->set_value(count);
+
+    return Status::OK;
+}
+
+Status LevelDBService::Clear(
+        ServerContext* context, const ContextRequest* contexts, Int64Value* result) {
+
+
+    int64_t count = 0;
+
+    Statement pattern;
+    if (contexts->context_size() > 0) {
+        for (const Resource &r : contexts->context()) {
+            pattern.mutable_context()->CopyFrom(r);
+            count += persistence->RemoveStatements(pattern);
+        }
+    } else {
+        count += persistence->RemoveStatements(pattern);
+    }
+    result->set_value(count);
+
+    return Status::OK;
+}
+
+Status LevelDBService::Size(
+        ServerContext* context, const ContextRequest* contexts, Int64Value* result) {
+
+    int64_t count = 0;
+
+    if (contexts->context_size() > 0) {
+        Statement pattern;
+        for (const Resource &r : contexts->context()) {
+            pattern.mutable_context()->CopyFrom(r);
+
+            persistence->GetStatements(pattern, [&count](const Statement& stmt) -> bool {
+                count++;
+                return true;
+            });
+        }
+    } else {
+        count = persistence->Size();
+    }
+    result->set_value(count);
+
+    return Status::OK;
+
+}
+
+
+grpc::Status LevelDBService::GetContexts(
+        ServerContext *context, const Empty *ignored, ServerWriter<Resource> *result) {
+    // Currently we need to iterate over all statements and collect the results.
+    Statement pattern;
+    std::unordered_set<Resource> contexts;
+
+    persistence->GetStatements(pattern, [&contexts](const Statement& stmt) -> bool {
+        if (stmt.has_context()) {
+            contexts.insert(stmt.context());
+        }
+        return true;
+    });
+
+    for (auto c : contexts) {
+        result->Write(c);
+    }
+    return Status::OK;
+}
+
+grpc::Status LevelDBService::Update(grpc::ServerContext *context,
+                                    grpc::ServerReader<service::proto::UpdateRequest> *reader,
+                                    service::proto::UpdateResponse *result) {
+
+    auto it = UpdateIterator(reader);
+    persistence::UpdateStatistics stats = persistence->Update(it);
+
+    result->set_added_namespaces(stats.added_ns);
+    result->set_removed_namespaces(stats.removed_ns);
+    result->set_added_statements(stats.added_stmts);
+    result->set_removed_statements(stats.removed_stmts);
+
+    return Status::OK;
+}
+
+
+grpc::Status LevelDBSparqlService::TupleQuery(
+        grpc::ServerContext* context, const spq::SparqlRequest* query,
+        grpc::ServerWriter<spq::SparqlResponse>* result) {
+
+    SparqlService svc(
+        std::unique_ptr<TripleSource>(
+                new LevelDBTripleSource(persistence)));
+
+    svc.TupleQuery(query->query(), [&result](const SparqlService::RowType& row) {
+        spq::SparqlResponse response;
+        for (auto it = row.cbegin(); it != row.cend(); it++) {
+            auto b = response.add_binding();
+            b->set_variable(it->first);
+            *b->mutable_value() = it->second.getMessage();
+        }
+        result->Write(response);
+        return true;
+    });
+
+    return Status::OK;
+}
+
+}  // namespace service
+}  // namespace marmotta
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_service.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_service.h b/libraries/ostrich/backend/persistence/leveldb_service.h
new file mode 100644
index 0000000..0cf4df9
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_service.h
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_SERVICE_H
+#define MARMOTTA_SERVICE_H
+
+#include "leveldb_persistence.h"
+
+#include <grpc/grpc.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include <grpc++/server_context.h>
+#include <grpc++/security/server_credentials.h>
+
+#include <google/protobuf/empty.pb.h>
+#include <google/protobuf/wrappers.pb.h>
+
+#include "service/sail.pb.h"
+#include "service/sail.grpc.pb.h"
+#include "service/sparql.pb.h"
+#include "service/sparql.grpc.pb.h"
+
+namespace marmotta {
+namespace service {
+
+namespace svc = marmotta::service::proto;
+namespace spq = marmotta::sparql::proto;
+
+/**
+ * An implementation of the gRPC service interface backed by a LevelDB database.
+ */
+class LevelDBService : public svc::SailService::Service {
+ public:
+    /**
+     * Construct a new SailService wrapper around the LevelDB persistence passed
+     * as argument. The service will not take ownership of the pointer.
+     */
+    LevelDBService(persistence::LevelDBPersistence* persistance) : persistence(persistance) { };
+
+    grpc::Status AddNamespaces(grpc::ServerContext* context,
+                               grpc::ServerReader<rdf::proto::Namespace>* reader,
+                               google::protobuf::Int64Value* result) override;
+
+    grpc::Status GetNamespace(grpc::ServerContext* context,
+                               const rdf::proto::Namespace* pattern,
+                               rdf::proto::Namespace* result) override;
+
+    grpc::Status GetNamespaces(grpc::ServerContext* context,
+                               const google::protobuf::Empty* ignored,
+                               grpc::ServerWriter<rdf::proto::Namespace>* result) override;
+
+    grpc::Status AddStatements(grpc::ServerContext* context,
+                               grpc::ServerReader<rdf::proto::Statement>* reader,
+                               google::protobuf::Int64Value* result) override;
+
+    grpc::Status GetStatements(grpc::ServerContext* context,
+                               const rdf::proto::Statement* pattern,
+                               grpc::ServerWriter<rdf::proto::Statement>* result) override;
+
+    grpc::Status RemoveStatements(grpc::ServerContext* context,
+                                  const rdf::proto::Statement* pattern,
+                                  google::protobuf::Int64Value* result) override;
+
+    grpc::Status GetContexts(grpc::ServerContext* context,
+                             const google::protobuf::Empty* ignored,
+                             grpc::ServerWriter<rdf::proto::Resource>* result) override;
+
+    grpc::Status Update(grpc::ServerContext* context,
+                        grpc::ServerReader<service::proto::UpdateRequest>* reader,
+                        service::proto::UpdateResponse* result) override;
+
+    grpc::Status Clear(grpc::ServerContext* context,
+                       const svc::ContextRequest* contexts,
+                       google::protobuf::Int64Value* result) override;
+
+    grpc::Status Size(grpc::ServerContext* context,
+                      const svc::ContextRequest* contexts,
+                      google::protobuf::Int64Value* result) override;
+
+ private:
+    persistence::LevelDBPersistence* persistence;
+};
+
+
+/**
+ * An implementation of the gRPC service interface backed by a LevelDB database.
+ */
+class LevelDBSparqlService : public spq::SparqlService::Service {
+ public:
+    /**
+     * Construct a new SparqlService wrapper around the LevelDB persistence passed
+     * as argument. The service will not take ownership of the pointer.
+     */
+    LevelDBSparqlService(persistence::LevelDBPersistence* persistence) : persistence(persistence) { };
+
+    grpc::Status TupleQuery(grpc::ServerContext* context,
+                            const spq::SparqlRequest* pattern,
+                            grpc::ServerWriter<spq::SparqlResponse>* result) override;
+ private:
+    persistence::LevelDBPersistence* persistence;
+};
+
+}
+}
+
+#endif //MARMOTTA_SERVICE_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_sparql.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_sparql.cc b/libraries/ostrich/backend/persistence/leveldb_sparql.cc
new file mode 100644
index 0000000..5d44db6
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_sparql.cc
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "leveldb_sparql.h"
+
+namespace marmotta {
+namespace persistence {
+namespace sparql {
+
+using ::marmotta::sparql::StatementIterator;
+
+class WrapProtoStatementIterator : public StatementIterator {
+
+ public:
+    WrapProtoStatementIterator(std::unique_ptr<persistence::LevelDBPersistence::StatementIterator> it)
+            : it(std::move(it)) { }
+
+    util::CloseableIterator<rdf::Statement> &operator++() override {
+        ++(*it);
+        parsed = false;
+        return *this;
+    };
+
+    rdf::Statement &operator*() override {
+        if (!parsed) {
+            current = std::move(**it);
+            parsed = true;
+        }
+        return current;
+    };
+
+    rdf::Statement *operator->() override {
+        if (!parsed) {
+            current = std::move(**it);
+            parsed = true;
+        }
+        return &current;
+    };
+
+    bool hasNext() override {
+        return it->hasNext();
+    }
+
+ private:
+    std::unique_ptr<persistence::LevelDBPersistence::StatementIterator> it;
+    rdf::Statement current;
+    bool parsed;
+};
+
+
+bool LevelDBTripleSource::HasStatement(
+        const rdf::Resource *s, const rdf::URI *p, const rdf::Value *o, const rdf::Resource *c) {
+    rdf::proto::Statement pattern;
+
+    if (s != nullptr) {
+        *pattern.mutable_subject() = s->getMessage();
+    }
+    if (p != nullptr) {
+        *pattern.mutable_predicate() = p->getMessage();
+    }
+    if (o != nullptr) {
+        *pattern.mutable_object() = o->getMessage();
+    }
+    if (c != nullptr) {
+        *pattern.mutable_context() = c->getMessage();
+    }
+
+    bool found = false;
+    persistence->GetStatements(pattern, [&found](rdf::proto::Statement) -> bool {
+        found = true;
+        return false;
+    });
+
+    return found;
+}
+
+std::unique_ptr<sparql::StatementIterator> LevelDBTripleSource::GetStatements(
+        const rdf::Resource *s, const rdf::URI *p, const rdf::Value *o, const rdf::Resource *c) {
+    rdf::proto::Statement pattern;
+
+    if (s != nullptr) {
+        *pattern.mutable_subject() = s->getMessage();
+    }
+    if (p != nullptr) {
+        *pattern.mutable_predicate() = p->getMessage();
+    }
+    if (o != nullptr) {
+        *pattern.mutable_object() = o->getMessage();
+    }
+    if (c != nullptr) {
+        *pattern.mutable_context() = c->getMessage();
+    }
+
+    return std::unique_ptr<sparql::StatementIterator>(
+            new WrapProtoStatementIterator(persistence->GetStatements(pattern)));
+}
+
+}  // namespace sparql
+}  // namespace persistence
+}  // namespace marmotta
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/persistence/leveldb_sparql.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/persistence/leveldb_sparql.h b/libraries/ostrich/backend/persistence/leveldb_sparql.h
new file mode 100644
index 0000000..9d8e989
--- /dev/null
+++ b/libraries/ostrich/backend/persistence/leveldb_sparql.h
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_SPARQL_H
+#define MARMOTTA_SPARQL_H
+
+#include "sparql/rasqal_adapter.h"
+#include "leveldb_persistence.h"
+
+namespace marmotta {
+namespace persistence {
+namespace sparql {
+
+/**
+ * A SPARQL triple source using a LevelDBPersistence to access data.
+ */
+class LevelDBTripleSource : public ::marmotta::sparql::TripleSource {
+ public:
+
+    LevelDBTripleSource(LevelDBPersistence *persistence) : persistence(persistence) { }
+
+
+    bool HasStatement(const rdf::Resource *s, const rdf::URI *p, const rdf::Value *o, const rdf::Resource *c) override;
+
+    std::unique_ptr<::marmotta::sparql::StatementIterator>
+            GetStatements(const rdf::Resource *s, const rdf::URI *p, const rdf::Value *o, const rdf::Resource *c) override;
+
+ private:
+    // A pointer to the persistence instance wrapped by this triple source.
+    LevelDBPersistence* persistence;
+};
+
+
+}  // namespace sparql
+}  // namespace persistence
+}  // namespace marmotta
+
+#endif //MARMOTTA_SPARQL_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/CMakeLists.txt b/libraries/ostrich/backend/serializer/CMakeLists.txt
new file mode 100644
index 0000000..0fe9c4c
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/CMakeLists.txt
@@ -0,0 +1,4 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/..)
+
+add_library(marmotta_serializer serializer_raptor.h serializer_raptor.cc serializer_raptor.cc serializer_proto.cc serializer_proto.h serializer_base.cc serializer_base.h serializer.cc serializer.h)
+target_link_libraries(marmotta_serializer marmotta_model ${CMAKE_THREAD_LIBS_INIT} ${RAPTOR_LIBRARY})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer.cc b/libraries/ostrich/backend/serializer/serializer.cc
new file mode 100644
index 0000000..8f2f783
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer.cc
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "serializer.h"
+
+#include "serializer_raptor.h"
+#include "serializer_proto.h"
+
+namespace marmotta {
+namespace serializer {
+
+Serializer::Serializer(const rdf::URI &baseUri, Format format, std::vector<rdf::Namespace> namespaces) {
+    switch(format) {
+        case PROTO:
+        case PROTO_TEXT:
+            impl.reset(new ProtoSerializer(baseUri, format, namespaces));
+            break;
+        default:
+            impl.reset(new RaptorSerializer(baseUri, format, namespaces));
+    }
+}
+
+Serializer::Serializer(const rdf::URI &baseUri, Format format, std::map<std::string, rdf::URI> namespaces) {
+    switch(format) {
+        case PROTO:
+        case PROTO_TEXT:
+            impl.reset(new ProtoSerializer(baseUri, format, namespaces));
+            break;
+        default:
+            impl.reset(new RaptorSerializer(baseUri, format, namespaces));
+    }
+}
+
+}  // namespace serializer
+}  // namespace marmotta

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer.h b/libraries/ostrich/backend/serializer/serializer.h
new file mode 100644
index 0000000..965fb9c
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer.h
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_SERIALIZER_H
+#define MARMOTTA_SERIALIZER_H
+
+#include "serializer_base.h"
+
+namespace marmotta {
+namespace serializer {
+
+
+class Serializer {
+ public:
+    using StatementIterator = util::CloseableIterator<rdf::Statement>;
+
+    Serializer(const rdf::URI& baseUri, Format format)
+            : Serializer(baseUri, format, std::map<std::string, rdf::URI>()) {};
+    Serializer(const rdf::URI& baseUri, Format format, std::vector<rdf::Namespace> namespaces);
+    Serializer(const rdf::URI& baseUri, Format format, std::map<std::string, rdf::URI> namespaces);
+
+    ~Serializer() {};
+
+    void serialize(const rdf::Statement& stmt, std::ostream& out) {
+        impl->serialize(stmt, out);
+    };
+
+    void serialize(StatementIterator it, std::ostream& out) {
+        impl->serialize(it, out);
+    };
+
+ private:
+    std::unique_ptr<SerializerBase> impl;
+};
+
+
+}  // namespace serializer
+}  // namespace marmotta
+
+#endif //MARMOTTA_SERIALIZER_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer_base.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer_base.cc b/libraries/ostrich/backend/serializer/serializer_base.cc
new file mode 100644
index 0000000..4b3e86d
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer_base.cc
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "serializer_base.h"
+
+namespace marmotta {
+namespace serializer {
+
+namespace {
+static std::map<std::string, rdf::URI> namespacesMap(std::vector<rdf::Namespace> list) {
+    std::map<std::string, rdf::URI> result;
+    for (auto it = list.cbegin(); it != list.cend(); it++) {
+        result[it->getPrefix()] = it->getUri();
+    }
+    return result;
+}
+}  // namespace
+
+
+Format FormatFromString(const std::string &name) {
+    if (name == "rdfxml" || name == "rdf/xml" || name == "xml") {
+        return RDFXML;
+    }
+    if (name == "n3" || name == "ntriples" || name == "text/n3") {
+        return NTRIPLES;
+    }
+    if (name == "turtle" || name == "text/turtle") {
+        return TURTLE;
+    }
+    if (name == "textproto" || name == "text/proto") {
+        return PROTO_TEXT;
+    }
+    if (name == "proto" || name == "application/proto") {
+        return PROTO;
+    }
+    if (name == "json" || name == "application/json" || name == "application/rdf+json") {
+        return RDFJSON;
+    }
+    return RDFXML;
+}
+
+SerializerBase::SerializerBase(const rdf::URI& baseUri, Format format, std::vector<rdf::Namespace> namespaces)
+        : baseUri(baseUri), format(format), namespaces(namespacesMap(namespaces)) { }
+
+SerializerBase::SerializerBase(const rdf::URI& baseUri, Format format, std::map<std::string, rdf::URI> namespaces)
+        : baseUri(baseUri), format(format), namespaces(namespaces) { }
+
+
+}  // namespace serializer
+}  // namespace marmotta

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer_base.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer_base.h b/libraries/ostrich/backend/serializer/serializer_base.h
new file mode 100644
index 0000000..24a64f9
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer_base.h
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_BASE_SERIALIZER_H
+#define MARMOTTA_BASE_SERIALIZER_H
+
+#include <string>
+#include <map>
+#include <memory>
+#include <vector>
+#include <google/protobuf/io/zero_copy_stream_impl.h>
+
+#include <model/rdf_model.h>
+#include <util/iterator.h>
+
+
+namespace marmotta {
+namespace serializer {
+
+enum Format {
+    RDFXML, RDFXML_ABBREV, TURTLE, NTRIPLES, NQUADS, RDFJSON, SPARQL_JSON, GRAPHVIZ, PROTO, PROTO_TEXT
+};
+
+
+/**
+ * Return the format matching the string name passed as argument.
+ */
+Format FormatFromString(const std::string &name);
+
+/**
+ * Serialize statements in various RDF text formats. This class and its subclasses are not thread safe.
+ */
+class SerializerBase {
+ public:
+    using StatementIterator = util::CloseableIterator<rdf::Statement>;
+
+    SerializerBase(const rdf::URI &baseUri, Format format)
+            : SerializerBase(baseUri, format, std::map<std::string, rdf::URI>()) { };
+
+    SerializerBase(const rdf::URI &baseUri, Format format, std::vector<rdf::Namespace> namespaces);
+
+    SerializerBase(const rdf::URI &baseUri, Format format, std::map<std::string, rdf::URI> namespaces);
+
+    virtual ~SerializerBase() { };
+
+    void serialize(const rdf::Statement &stmt, std::ostream &out) {
+        prepare(out);
+        serialize(stmt);
+        close();
+    };
+
+    void serialize(StatementIterator &it, std::ostream &out) {
+        prepare(out);
+        for (; it.hasNext(); ++it) {
+            serialize(*it);
+        }
+        close();
+    };
+
+ protected:
+    rdf::URI baseUri;
+    Format format;
+    std::map<std::string, rdf::URI> namespaces;
+
+    virtual void prepare(std::ostream &out) = 0;
+
+    virtual void serialize(const rdf::Statement &stmt) = 0;
+
+    virtual void close() = 0;
+};
+
+
+class SerializationError : std::exception {
+ public:
+    SerializationError(const char* message) : message(message) { }
+    SerializationError(std::string &message) : message(message) { }
+
+    const std::string &getMessage() const {
+        return message;
+    }
+
+ private:
+    std::string message;
+};
+
+
+}  // namespace serializer
+}  // namespace marmotta
+
+#endif //MARMOTTA_BASE_SERIALIZER_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer_proto.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer_proto.cc b/libraries/ostrich/backend/serializer/serializer_proto.cc
new file mode 100644
index 0000000..f11730a
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer_proto.cc
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "serializer_proto.h"
+
+#include <google/protobuf/text_format.h>
+#include <google/protobuf/io/coded_stream.h>
+#include <google/protobuf/io/zero_copy_stream.h>
+
+
+namespace marmotta {
+namespace serializer {
+
+void ProtoSerializer::prepare(std::ostream &out) {
+    out_ = new google::protobuf::io::OstreamOutputStream(&out);
+}
+
+void ProtoSerializer::serialize(const rdf::Statement &stmt) {
+    stmts_.add_statement()->MergeFrom(stmt.getMessage());
+}
+
+void ProtoSerializer::close() {
+    google::protobuf::io::CodedOutputStream* coded_output =
+            new google::protobuf::io::CodedOutputStream(out_);
+    switch (format) {
+        case PROTO:
+            stmts_.SerializeToCodedStream(coded_output);
+            break;
+        case PROTO_TEXT:
+            google::protobuf::TextFormat::Print(
+                    stmts_, dynamic_cast<google::protobuf::io::ZeroCopyOutputStream*>(out_));
+            break;
+    }
+    stmts_.Clear();
+    delete coded_output;
+    delete out_;
+}
+
+}  // namespace serializer
+}  // namespace marmotta

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer_proto.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer_proto.h b/libraries/ostrich/backend/serializer/serializer_proto.h
new file mode 100644
index 0000000..1a2fa4d
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer_proto.h
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_PROTO_SERIALIZER_H
+#define MARMOTTA_PROTO_SERIALIZER_H
+
+#include "serializer_base.h"
+
+namespace marmotta {
+namespace serializer {
+/**
+ * Serialize statements as binary proto wire format according to model.proto.
+ */
+class ProtoSerializer : public SerializerBase {
+ public:
+    ProtoSerializer(const rdf::URI& baseUri, Format format)
+            : ProtoSerializer(baseUri, format, std::map<std::string, rdf::URI>()) {};
+    ProtoSerializer(const rdf::URI& baseUri, Format format, std::vector<rdf::Namespace> namespaces)
+            : SerializerBase(baseUri, format, namespaces) {};
+    ProtoSerializer(const rdf::URI& baseUri, Format format, std::map<std::string, rdf::URI> namespaces)
+            : SerializerBase(baseUri, format, namespaces) {};
+
+ private:
+    void prepare(std::ostream& out) override;
+    void serialize(const rdf::Statement& stmt) override;
+    void close() override;
+
+    google::protobuf::io::OstreamOutputStream* out_;
+    marmotta::rdf::proto::Statements stmts_;
+};
+
+
+
+}
+}
+#endif //MARMOTTA_PROTO_SERIALIZER_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer_raptor.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer_raptor.cc b/libraries/ostrich/backend/serializer/serializer_raptor.cc
new file mode 100644
index 0000000..42014cb
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer_raptor.cc
@@ -0,0 +1,266 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "serializer_raptor.h"
+
+namespace marmotta {
+namespace serializer {
+
+namespace {
+static int std_iostream_write_byte(void *context, const int byte) {
+    std::ostream *out = (std::ostream *) context;
+    out->write((char const *) &byte, 1);
+    if (*out) {
+        return 0;
+    } else {
+        return 1;
+    }
+}
+
+static int std_iostream_write_bytes(void *context, const void *ptr, size_t size, size_t nmemb) {
+    std::ostream *out = (std::ostream *) context;
+    out->write((char const *) ptr, size * nmemb);
+    if (*out) {
+        return 0;
+    } else {
+        return 1;
+    }
+}
+
+static int std_iostream_read_bytes(void *context, void *ptr, size_t size, size_t nmemb) {
+    std::istream *in = (std::istream *) context;
+
+    if (!*in) {
+        return -1;
+    }
+
+    in->read((char *) ptr, size * nmemb);
+    return (int) in->gcount();
+}
+
+static int std_iostream_read_eof(void *context) {
+    std::istream *in = (std::istream *) context;
+
+    if (in->eof()) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+const raptor_iostream_handler raptor_handler = {
+        2, NULL, NULL,
+        &std_iostream_write_byte, &std_iostream_write_bytes, NULL,
+        &std_iostream_read_bytes, &std_iostream_read_eof
+};
+
+
+inline std::string raptorFormat(Format format) {
+    switch (format) {
+        case Format::RDFXML:
+            return "rdfxml";
+        case Format::RDFXML_ABBREV:
+            return "rdfxml-abbrev";
+        case Format::GRAPHVIZ:
+            return "dot";
+        case Format::NQUADS:
+            return "nquads";
+        case Format::NTRIPLES:
+            return "ntriples";
+        case Format::TURTLE:
+            return "turtle";
+        case Format::RDFJSON:
+            return "json";
+        case Format::SPARQL_JSON:
+            return "json-triples";
+        default:
+            return "rdfxml";
+    }
+}
+}  // namespace
+
+RaptorSerializer::RaptorSerializer(const rdf::URI& baseUri, Format format)
+        : SerializerBase(baseUri, format) {
+
+    world = raptor_new_world();
+    base  = raptor_new_uri(world, (unsigned char const *) baseUri.getUri().c_str());
+    initRaptor();
+}
+
+RaptorSerializer::RaptorSerializer(const rdf::URI& baseUri, Format format, std::vector<rdf::Namespace> namespaces)
+        : SerializerBase(baseUri, format, namespaces) {
+
+    world = raptor_new_world();
+    base  = raptor_new_uri(world, (unsigned char const *) baseUri.getUri().c_str());
+    initRaptor();
+}
+
+RaptorSerializer::RaptorSerializer(const rdf::URI& baseUri, Format format, std::map<std::string, rdf::URI> namespaces)
+        : SerializerBase(baseUri, format, namespaces) {
+
+    world = raptor_new_world();
+    base  = raptor_new_uri(world, (unsigned char const *) baseUri.getUri().c_str());
+    initRaptor();
+}
+
+
+RaptorSerializer::~RaptorSerializer() {
+    // check for NULL in case a move operation has set the fields to a null pointer
+    if(serializer != NULL)
+        raptor_free_serializer(serializer);
+
+    if(base != NULL)
+        raptor_free_uri(base);
+
+    if(world != NULL)
+        raptor_free_world(world);
+
+}
+
+/*
+RaptorSerializer::RaptorSerializer(const RaptorSerializer &other) {
+    format = other.format;
+    namespaces = other.namespaces;
+
+    world = raptor_new_world();
+    base  = raptor_new_uri(world, raptor_uri_as_string(other.base));
+    initRaptor();
+}
+
+RaptorSerializer::RaptorSerializer(RaptorSerializer &&other) {
+    format = other.format;
+    namespaces = other.namespaces;
+    base = other.base;
+    world = other.world;
+    serializer = other.serializer;
+
+    other.serializer = NULL;
+    other.base = NULL;
+    other.world = NULL;
+}
+
+RaptorSerializer &RaptorSerializer::operator=(const RaptorSerializer &other) {
+    format = other.format;
+    namespaces = other.namespaces;
+
+    world = raptor_new_world();
+    base  = raptor_new_uri(world, raptor_uri_as_string(other.base));
+    initRaptor();
+
+    return *this;
+}
+
+RaptorSerializer &RaptorSerializer::operator=(RaptorSerializer &&other) {
+    format = other.format;
+    namespaces = other.namespaces;
+    serializer = other.serializer;
+    base = other.base;
+    world = other.world;
+
+    other.serializer = NULL;
+    other.base = NULL;
+    other.world = NULL;
+
+    return *this;
+}
+*/
+
+void RaptorSerializer::initRaptor() {
+    serializer = raptor_new_serializer(world, raptorFormat(format).c_str());
+    for(const auto &e : namespaces) {
+        raptor_uri* uri = raptor_new_uri(world, (unsigned char const *) e.second.getUri().c_str());
+        raptor_serializer_set_namespace(serializer, uri, (unsigned char const *) e.first.c_str());
+    }
+    raptor_world_set_log_handler(world, this, [](void *user_data, raptor_log_message* message){
+        std::cerr << message->level << ": " << message->text << std::endl;
+    });
+}
+
+void RaptorSerializer::prepare(std::ostream &out) {
+    stream = raptor_new_iostream_from_handler(world, &out, &raptor_handler);
+    raptor_serializer_start_to_iostream(serializer, base, stream);
+}
+
+void RaptorSerializer::serialize(const rdf::Statement &stmt) {
+    raptor_statement* triple = raptor_new_statement(world);
+
+    if (stmt.getMessage().subject().has_uri()) {
+        triple->subject = raptor_new_term_from_uri_string(
+                world, (unsigned char const *) stmt.getMessage().subject().uri().uri().c_str());
+    } else if (stmt.getMessage().subject().has_bnode()) {
+        triple->subject = raptor_new_term_from_blank(
+                world, (unsigned char const *) stmt.getMessage().subject().bnode().id().c_str());
+    } else {
+        throw SerializationError("invalid subject type");
+    }
+
+    triple->predicate = raptor_new_term_from_uri_string(
+            world,  (unsigned char const *) stmt.getMessage().predicate().uri().c_str());
+
+    if (stmt.getMessage().object().has_resource()) {
+        const marmotta::rdf::proto::Resource& r = stmt.getMessage().object().resource();
+        if (r.has_uri()) {
+            triple->object = raptor_new_term_from_uri_string(
+                    world, (unsigned char const *) r.uri().uri().c_str());
+        } else if(r.has_bnode()) {
+            triple->object = raptor_new_term_from_blank(
+                    world, (unsigned char const *) r.bnode().id().c_str());
+        } else {
+            throw SerializationError("invalid object resource type");
+        }
+    } else if (stmt.getMessage().object().has_literal()) {
+        const marmotta::rdf::proto::Literal& l = stmt.getMessage().object().literal();
+        if (l.has_stringliteral()) {
+            triple->object = raptor_new_term_from_counted_literal(
+                    world,
+                    (unsigned char const *) l.stringliteral().content().c_str(), l.stringliteral().content().size(), NULL,
+                    (unsigned char const *) l.stringliteral().language().c_str(), l.stringliteral().language().size());
+        } else if(l.has_dataliteral()) {
+            triple->object = raptor_new_term_from_counted_literal(
+                    world,
+                    (unsigned char const *) l.dataliteral().content().c_str(), l.dataliteral().content().size(),
+                    raptor_new_uri(world, (unsigned char const *) l.dataliteral().datatype().uri().c_str()),
+                    (unsigned char const *) "", 0);
+        } else {
+            throw SerializationError("invalid object literal type");
+        }
+    } else {
+        throw SerializationError("invalid object type");
+    }
+
+    if (stmt.getMessage().context().has_uri()) {
+        triple->graph = raptor_new_term_from_uri_string(
+                world,  (unsigned char const *) stmt.getMessage().context().uri().uri().c_str());
+    } else if (stmt.getMessage().context().has_bnode()) {
+        triple->graph = raptor_new_term_from_blank(
+                world, (unsigned char const *) stmt.getMessage().context().bnode().id().c_str());
+    } else {
+        throw SerializationError("invalid context type");
+    }
+
+    raptor_serializer_serialize_statement(serializer, triple);
+
+    raptor_free_statement(triple);
+}
+
+void RaptorSerializer::close() {
+    raptor_serializer_serialize_end(serializer);
+    raptor_free_iostream(stream);
+}
+
+}  // namespace serializer
+}  // namespace marmotta

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/serializer/serializer_raptor.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/serializer/serializer_raptor.h b/libraries/ostrich/backend/serializer/serializer_raptor.h
new file mode 100644
index 0000000..680d227
--- /dev/null
+++ b/libraries/ostrich/backend/serializer/serializer_raptor.h
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_RDF_SERIALIZER_H
+#define MARMOTTA_RDF_SERIALIZER_H
+
+#include "serializer_base.h"
+#include <raptor2/raptor2.h>
+
+namespace marmotta {
+namespace serializer {
+
+/**
+ * Serializer implementation using the Raptor library to write out statements
+ * in different RDF serialization formats.
+ */
+class RaptorSerializer : public SerializerBase {
+ public:
+    RaptorSerializer(const rdf::URI& baseUri, Format format);
+    RaptorSerializer(const rdf::URI& baseUri, Format format, std::vector<rdf::Namespace> namespaces);
+    RaptorSerializer(const rdf::URI& baseUri, Format format, std::map<std::string, rdf::URI> namespaces);
+    ~RaptorSerializer() override;
+
+ private:
+    raptor_serializer* serializer;
+    raptor_world*      world;
+    raptor_uri*        base;
+    raptor_iostream*   stream;
+
+    void prepare(std::ostream& out) override;
+    void serialize(const rdf::Statement& stmt) override;
+    void close() override;
+
+    void initRaptor();
+};
+
+
+}
+}
+
+#endif //MARMOTTA_RDF_SERIALIZER_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/service/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/service/CMakeLists.txt b/libraries/ostrich/backend/service/CMakeLists.txt
new file mode 100644
index 0000000..d911eef
--- /dev/null
+++ b/libraries/ostrich/backend/service/CMakeLists.txt
@@ -0,0 +1,9 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/../model ${RAPTOR_INCLUDE_DIR}/raptor2)
+
+file(GLOB ProtoFiles "${CMAKE_CURRENT_SOURCE_DIR}/*.proto")
+PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${ProtoFiles})
+PROTOBUF_GENERATE_GRPC_CPP(GRPC_SRCS GRPC_HDRS ${ProtoFiles})
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+add_library(marmotta_service ${PROTO_SRCS} ${PROTO_HDRS} ${GRPC_SRCS} ${GRPC_HDRS})
+target_link_libraries(marmotta_service marmotta_model ${CMAKE_THREAD_LIBS_INIT} ${PROTOBUF_LIBRARIES} ${GRPC_LIBRARIES})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/service/sail.proto
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/service/sail.proto b/libraries/ostrich/backend/service/sail.proto
new file mode 100644
index 0000000..a1091ea
--- /dev/null
+++ b/libraries/ostrich/backend/service/sail.proto
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto3";
+
+package marmotta.service.proto;
+
+option java_package = "org.apache.marmotta.ostrich.client.proto";
+
+
+import "model.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/wrappers.proto";
+
+message ContextRequest {
+    repeated marmotta.rdf.proto.Resource context = 1;
+}
+
+// Update requests either add a statement or remove a statement pattern. Used
+// by UpdateStatements() to allow batch update operations.
+message UpdateRequest {
+    oneof Update {
+        marmotta.rdf.proto.Statement stmt_added = 1;
+        marmotta.rdf.proto.Statement stmt_removed = 2;
+        marmotta.rdf.proto.Namespace ns_added = 3;
+        marmotta.rdf.proto.Namespace ns_removed = 4;
+    }
+}
+
+// Update responses contain statistics about the modified entities.
+message UpdateResponse {
+    int64 added_statements = 1;
+    int64 removed_statements = 2;
+    int64 added_namespaces = 3;
+    int64 removed_namespaces = 4;
+}
+
+service SailService {
+    // Add namespaces to the repository. Accepts a stream of namespaces.
+    // Returns the number of namespaces added.
+    rpc AddNamespaces(stream marmotta.rdf.proto.Namespace)
+        returns (google.protobuf.Int64Value);
+
+    // Return the namespace matching the given request. Either prefix or uri
+    // must be given.
+    rpc GetNamespace(marmotta.rdf.proto.Namespace)
+        returns (marmotta.rdf.proto.Namespace);
+
+    rpc GetNamespaces(google.protobuf.Empty)
+        returns (stream marmotta.rdf.proto.Namespace);
+
+    // Delete the namespace given as argument.
+    rpc RemoveNamespace(marmotta.rdf.proto.Namespace)
+        returns (google.protobuf.Int64Value);
+
+    // Add statements to the repository. Accepts a stream of statements.
+    // Returns the number of statements added.
+    rpc AddStatements(stream marmotta.rdf.proto.Statement)
+        returns (google.protobuf.Int64Value);
+
+    // List statements matching a statement pattern. Fields of Statement not
+    // set are considered to be wildcards. Returns a stream of statements.
+    rpc GetStatements(marmotta.rdf.proto.Statement)
+        returns (stream marmotta.rdf.proto.Statement);
+
+    // Delete statements matching a statement pattern.  Fields of Statement
+    // not set are considered to be wildcards. Returns the number of statements
+    // deleted.
+    rpc RemoveStatements(marmotta.rdf.proto.Statement)
+        returns (google.protobuf.Int64Value);
+
+    // Return the set of all unique context identifiers used to store
+    // statements.
+    rpc GetContexts(google.protobuf.Empty)
+        returns (stream marmotta.rdf.proto.Resource);
+
+    // Remove all statements in the contexts specified in the request. If no
+    // contexts are specified, clears the complete repository.
+    rpc Clear(ContextRequest) returns (google.protobuf.Int64Value);
+
+    // Count the number of statements in the contexts specified in the request.
+    // If no contexts are specified, counts all statements.
+    rpc Size(ContextRequest) returns (google.protobuf.Int64Value);
+
+    // Batch update operation to process a stream of update requests. Updates
+    // are applied in order.
+    rpc Update(stream UpdateRequest) returns (UpdateResponse);
+}
\ No newline at end of file


[3/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Posted by ss...@apache.org.
http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/test/gtest.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/test/gtest.h b/libraries/ostrich/backend/test/gtest.h
new file mode 100644
index 0000000..4f3804f
--- /dev/null
+++ b/libraries/ostrich/backend/test/gtest.h
@@ -0,0 +1,20061 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test.  It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+//   // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE.  Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include <limits>
+#include <ostream>
+#include <vector>
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test.  They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan)
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms.  They are subject to change without notice.  DO NOT USE
+// THEM IN USER CODE.
+//
+// This file is fundamental to Google Test.  All other Google Test source
+// files are expected to #include this.  Therefore, it cannot #include
+// any other Google Test header.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// The user can define the following macros in the build script to
+// control Google Test's behavior.  If the user doesn't define a macro
+// in this list, Google Test will define it.
+//
+//   GTEST_HAS_CLONE          - Define it to 1/0 to indicate that clone(2)
+//                              is/isn't available.
+//   GTEST_HAS_EXCEPTIONS     - Define it to 1/0 to indicate that exceptions
+//                              are enabled.
+//   GTEST_HAS_GLOBAL_STRING  - Define it to 1/0 to indicate that ::string
+//                              is/isn't available (some systems define
+//                              ::string, which is different to std::string).
+//   GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
+//                              is/isn't available (some systems define
+//                              ::wstring, which is different to std::wstring).
+//   GTEST_HAS_POSIX_RE       - Define it to 1/0 to indicate that POSIX regular
+//                              expressions are/aren't available.
+//   GTEST_HAS_PTHREAD        - Define it to 1/0 to indicate that <pthread.h>
+//                              is/isn't available.
+//   GTEST_HAS_RTTI           - Define it to 1/0 to indicate that RTTI is/isn't
+//                              enabled.
+//   GTEST_HAS_STD_WSTRING    - Define it to 1/0 to indicate that
+//                              std::wstring does/doesn't work (Google Test can
+//                              be used where std::wstring is unavailable).
+//   GTEST_HAS_TR1_TUPLE      - Define it to 1/0 to indicate tr1::tuple
+//                              is/isn't available.
+//   GTEST_HAS_SEH            - Define it to 1/0 to indicate whether the
+//                              compiler supports Microsoft's "Structured
+//                              Exception Handling".
+//   GTEST_HAS_STREAM_REDIRECTION
+//                            - Define it to 1/0 to indicate whether the
+//                              platform supports I/O stream redirection using
+//                              dup() and dup2().
+//   GTEST_USE_OWN_TR1_TUPLE  - Define it to 1/0 to indicate whether Google
+//                              Test's own tr1 tuple implementation should be
+//                              used.  Unused when the user sets
+//                              GTEST_HAS_TR1_TUPLE to 0.
+//   GTEST_LANG_CXX11         - Define it to 1/0 to indicate that Google Test
+//                              is building in C++11/C++98 mode.
+//   GTEST_LINKED_AS_SHARED_LIBRARY
+//                            - Define to 1 when compiling tests that use
+//                              Google Test as a shared library (known as
+//                              DLL on Windows).
+//   GTEST_CREATE_SHARED_LIBRARY
+//                            - Define to 1 when compiling Google Test itself
+//                              as a shared library.
+
+// This header defines the following utilities:
+//
+// Macros indicating the current platform (defined to 1 if compiled on
+// the given platform; otherwise undefined):
+//   GTEST_OS_AIX      - IBM AIX
+//   GTEST_OS_CYGWIN   - Cygwin
+//   GTEST_OS_HPUX     - HP-UX
+//   GTEST_OS_LINUX    - Linux
+//     GTEST_OS_LINUX_ANDROID - Google Android
+//   GTEST_OS_MAC      - Mac OS X
+//     GTEST_OS_IOS    - iOS
+//       GTEST_OS_IOS_SIMULATOR - iOS simulator
+//   GTEST_OS_NACL     - Google Native Client (NaCl)
+//   GTEST_OS_OPENBSD  - OpenBSD
+//   GTEST_OS_QNX      - QNX
+//   GTEST_OS_SOLARIS  - Sun Solaris
+//   GTEST_OS_SYMBIAN  - Symbian
+//   GTEST_OS_WINDOWS  - Windows (Desktop, MinGW, or Mobile)
+//     GTEST_OS_WINDOWS_DESKTOP  - Windows Desktop
+//     GTEST_OS_WINDOWS_MINGW    - MinGW
+//     GTEST_OS_WINDOWS_MOBILE   - Windows Mobile
+//   GTEST_OS_ZOS      - z/OS
+//
+// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
+// most stable support.  Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable.  If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// Note that it is possible that none of the GTEST_OS_* macros are defined.
+//
+// Macros indicating available Google Test features (defined to 1 if
+// the corresponding feature is supported; otherwise undefined):
+//   GTEST_HAS_COMBINE      - the Combine() function (for value-parameterized
+//                            tests)
+//   GTEST_HAS_DEATH_TEST   - death tests
+//   GTEST_HAS_PARAM_TEST   - value-parameterized tests
+//   GTEST_HAS_TYPED_TEST   - typed tests
+//   GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+//   GTEST_USES_POSIX_RE    - enhanced POSIX regex is used. Do not confuse with
+//                            GTEST_HAS_POSIX_RE (see above) which users can
+//                            define themselves.
+//   GTEST_USES_SIMPLE_RE   - our own simple regex is used;
+//                            the above two are mutually exclusive.
+//   GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
+//
+// Macros for basic C++ coding:
+//   GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+//   GTEST_ATTRIBUTE_UNUSED_  - declares that a class' instances or a
+//                              variable don't have to be used.
+//   GTEST_DISALLOW_ASSIGN_   - disables operator=.
+//   GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+//   GTEST_MUST_USE_RESULT_   - declares that a function's result must be used.
+//
+// Synchronization:
+//   Mutex, MutexLock, ThreadLocal, GetThreadCount()
+//                  - synchronization primitives.
+//   GTEST_IS_THREADSAFE - defined to 1 to indicate that the above
+//                         synchronization primitives have real implementations
+//                         and Google Test is thread-safe; or 0 otherwise.
+//
+// Template meta programming:
+//   is_pointer     - as in TR1; needed on Symbian and IBM XL C/C++ only.
+//   IteratorTraits - partial implementation of std::iterator_traits, which
+//                    is not available in libCstd when compiled with Sun C++.
+//
+// Smart pointers:
+//   scoped_ptr     - as in TR2.
+//
+// Regular expressions:
+//   RE             - a simple regular expression class using the POSIX
+//                    Extended Regular Expression syntax on UNIX-like
+//                    platforms, or a reduced regular exception syntax on
+//                    other platforms, including Windows.
+//
+// Logging:
+//   GTEST_LOG_()   - logs messages at the specified severity level.
+//   LogToStderr()  - directs all log messages to stderr.
+//   FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+//   CaptureStdout()     - starts capturing stdout.
+//   GetCapturedStdout() - stops capturing stdout and returns the captured
+//                         string.
+//   CaptureStderr()     - starts capturing stderr.
+//   GetCapturedStderr() - stops capturing stderr and returns the captured
+//                         string.
+//
+// Integer types:
+//   TypeWithSize   - maps an integer to a int type.
+//   Int32, UInt32, Int64, UInt64, TimeInMillis
+//                  - integers of known sizes.
+//   BiggestInt     - the biggest signed integer type.
+//
+// Command-line utilities:
+//   GTEST_FLAG()       - references a flag.
+//   GTEST_DECLARE_*()  - declares a flag.
+//   GTEST_DEFINE_*()   - defines a flag.
+//   GetInjectableArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+//   GetEnv()             - gets the value of an environment variable.
+//   BoolFromGTestEnv()   - parses a bool environment variable.
+//   Int32FromGTestEnv()  - parses an Int32 environment variable.
+//   StringFromGTestEnv() - parses a string environment variable.
+
+#include <ctype.h>   // for isspace, etc
+#include <stddef.h>  // for ptrdiff_t
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32_WCE
+# include <sys/types.h>
+# include <sys/stat.h>
+#endif  // !_WIN32_WCE
+
+#if defined __APPLE__
+# include <AvailabilityMacros.h>
+# include <TargetConditionals.h>
+#endif
+
+#include <iostream>  // NOLINT
+#include <sstream>  // NOLINT
+#include <string>  // NOLINT
+
+#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+#define GTEST_FLAG_PREFIX_ "gtest_"
+#define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+#define GTEST_NAME_ "Google Test"
+#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/"
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+# define GTEST_GCC_VER_ \
+    (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif  // __GNUC__
+
+// Determines the platform on which Google Test is compiled.
+#ifdef __CYGWIN__
+# define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+# define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+# define GTEST_OS_WINDOWS 1
+# ifdef _WIN32_WCE
+#  define GTEST_OS_WINDOWS_MOBILE 1
+# elif defined(__MINGW__) || defined(__MINGW32__)
+#  define GTEST_OS_WINDOWS_MINGW 1
+# else
+#  define GTEST_OS_WINDOWS_DESKTOP 1
+# endif  // _WIN32_WCE
+#elif defined __APPLE__
+# define GTEST_OS_MAC 1
+# if TARGET_OS_IPHONE
+#  define GTEST_OS_IOS 1
+#  if TARGET_IPHONE_SIMULATOR
+#   define GTEST_OS_IOS_SIMULATOR 1
+#  endif
+# endif
+#elif defined __linux__
+# define GTEST_OS_LINUX 1
+# if defined __ANDROID__
+#  define GTEST_OS_LINUX_ANDROID 1
+# endif
+#elif defined __MVS__
+# define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+# define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+# define GTEST_OS_AIX 1
+#elif defined(__hpux)
+# define GTEST_OS_HPUX 1
+#elif defined __native_client__
+# define GTEST_OS_NACL 1
+#elif defined __OpenBSD__
+# define GTEST_OS_OPENBSD 1
+#elif defined __QNX__
+# define GTEST_OS_QNX 1
+#endif  // __CYGWIN__
+
+#ifndef GTEST_LANG_CXX11
+// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when
+// -std={c,gnu}++{0x,11} is passed.  The C++11 standard specifies a
+// value for __cplusplus, and recent versions of clang, gcc, and
+// probably other compilers set that too in C++11 mode.
+# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L
+// Compiling in at least C++11 mode.
+#  define GTEST_LANG_CXX11 1
+# else
+#  define GTEST_LANG_CXX11 0
+# endif
+#endif
+
+// Brings in definitions for functions used in the testing::internal::posix
+// namespace (read, write, close, chdir, isatty, stat). We do not currently
+// use them on Windows Mobile.
+#if !GTEST_OS_WINDOWS
+// This assumes that non-Windows OSes provide unistd.h. For OSes where this
+// is not the case, we need to include headers that provide the functions
+// mentioned above.
+# include <unistd.h>
+# include <strings.h>
+#elif !GTEST_OS_WINDOWS_MOBILE
+# include <direct.h>
+# include <io.h>
+#endif
+
+#if GTEST_OS_LINUX_ANDROID
+// Used to define __ANDROID_API__ matching the target NDK API level.
+#  include <android/api-level.h>  // NOLINT
+#endif
+
+// Defines this to true iff Google Test can use POSIX regular expressions.
+#ifndef GTEST_HAS_POSIX_RE
+# if GTEST_OS_LINUX_ANDROID
+// On Android, <regex.h> is only available starting with Gingerbread.
+#  define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9)
+# else
+#  define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)
+# endif
+#endif
+
+#if GTEST_HAS_POSIX_RE
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise.  We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+# include <regex.h>  // NOLINT
+
+# define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+// <regex.h> is not available on Windows.  Use our own simple regex
+// implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#else
+
+// <regex.h> may not be available on this platform.  Use our own
+// simple regex implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#endif  // GTEST_HAS_POSIX_RE
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+#  ifndef _HAS_EXCEPTIONS
+#   define _HAS_EXCEPTIONS 1
+#  endif  // _HAS_EXCEPTIONS
+#  define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+# elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#  define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions.  However, there is no compile-time way of
+// detecting whether they are enabled or not.  Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+#  define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+#  define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__HP_aCC)
+// Exception handling is in effect by default in HP aCC compiler. It has to
+// be turned of by +noeh compiler option if desired.
+#  define GTEST_HAS_EXCEPTIONS 1
+# else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+#  define GTEST_HAS_EXCEPTIONS 0
+# endif  // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif  // GTEST_HAS_EXCEPTIONS
+
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+# define GTEST_HAS_STD_STRING 1
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+# error "Google Test cannot be used where ::std::string isn't available."
+#endif  // !defined(GTEST_HAS_STD_STRING)
+
+#ifndef GTEST_HAS_GLOBAL_STRING
+// The user didn't tell us whether ::string is available, so we need
+// to figure it out.
+
+# define GTEST_HAS_GLOBAL_STRING 0
+
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
+//   is available.
+
+// Cygwin 1.7 and below doesn't support ::std::wstring.
+// Solaris' libc++ doesn't support it either.  Android has
+// no support for it at least as recent as Froyo (2.2).
+# define GTEST_HAS_STD_WSTRING \
+    (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
+
+#endif  // GTEST_HAS_STD_WSTRING
+
+#ifndef GTEST_HAS_GLOBAL_WSTRING
+// The user didn't tell us whether ::wstring is available, so we need
+// to figure it out.
+# define GTEST_HAS_GLOBAL_WSTRING \
+    (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+# ifdef _MSC_VER
+
+#  ifdef _CPPRTTI  // MSVC defines this macro iff RTTI is enabled.
+#   define GTEST_HAS_RTTI 1
+#  else
+#   define GTEST_HAS_RTTI 0
+#  endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
+#  ifdef __GXX_RTTI
+// When building against STLport with the Android NDK and with
+// -frtti -fno-exceptions, the build fails at link time with undefined
+// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,
+// so disable RTTI when detected.
+#   if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \
+       !defined(__EXCEPTIONS)
+#    define GTEST_HAS_RTTI 0
+#   else
+#    define GTEST_HAS_RTTI 1
+#   endif  // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS
+#  else
+#   define GTEST_HAS_RTTI 0
+#  endif  // __GXX_RTTI
+
+// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends
+// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the
+// first version with C++ support.
+# elif defined(__clang__)
+
+#  define GTEST_HAS_RTTI __has_feature(cxx_rtti)
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+#  ifdef __RTTI_ALL__
+#   define GTEST_HAS_RTTI 1
+#  else
+#   define GTEST_HAS_RTTI 0
+#  endif
+
+# else
+
+// For all other compilers, we assume RTTI is enabled.
+#  define GTEST_HAS_RTTI 1
+
+# endif  // _MSC_VER
+
+#endif  // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+# include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we assume pthreads support is
+// available on Linux and Mac.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \
+    || GTEST_OS_QNX)
+#endif  // GTEST_HAS_PTHREAD
+
+#if GTEST_HAS_PTHREAD
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+# include <pthread.h>  // NOLINT
+
+// For timespec and nanosleep, used below.
+# include <time.h>  // NOLINT
+#endif
+
+// Determines whether Google Test can use tr1/tuple.  You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
+#ifndef GTEST_HAS_TR1_TUPLE
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR)
+// STLport, provided with the Android NDK, has neither <tr1/tuple> or <tuple>.
+#  define GTEST_HAS_TR1_TUPLE 0
+# else
+// The user didn't tell us not to do it, so we assume it's OK.
+#  define GTEST_HAS_TR1_TUPLE 1
+# endif
+#endif  // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
+// The user didn't tell us, so we need to figure it out.
+
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already.  At this time, libstdc++ 4.0.0+ and
+// MSVC 2010 are the only mainstream standard libraries that come
+// with a TR1 tuple implementation.  NVIDIA's CUDA NVCC compiler
+// pretends to be GCC by defining __GNUC__ and friends, but cannot
+// compile GCC's tuple implementation.  MSVC 2008 (9.0) provides TR1
+// tuple in a 323 MB Feature Pack download, which we cannot assume the
+// user has.  QNX's QCC compiler is a modified GCC but it doesn't
+// support TR1 tuple.  libc++ only provides std::tuple, in C++11 mode,
+// and it can be used with some compilers that define __GNUC__.
+# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \
+      && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600
+#  define GTEST_ENV_HAS_TR1_TUPLE_ 1
+# endif
+
+// C++11 specifies that <tuple> provides std::tuple. Use that if gtest is used
+// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6
+// can build with clang but need to use gcc4.2's libstdc++).
+# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325)
+#  define GTEST_ENV_HAS_STD_TUPLE_ 1
+# endif
+
+# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_
+#  define GTEST_USE_OWN_TR1_TUPLE 0
+# else
+#  define GTEST_USE_OWN_TR1_TUPLE 1
+# endif
+
+#endif  // GTEST_USE_OWN_TR1_TUPLE
+
+// To avoid conditional compilation everywhere, we make it
+// gtest-port.h's responsibility to #include the header implementing
+// tr1/tuple.
+#if GTEST_HAS_TR1_TUPLE
+
+# if GTEST_USE_OWN_TR1_TUPLE
+// This file was GENERATED by command:
+//     pump.py gtest-tuple.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility>  // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined).  This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+    template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+   private:
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+    void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+    void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+    void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+    void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+    void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+    void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+    T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6, \
+    typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+    typename T##3, typename T##4, typename T##5, typename T##6, \
+    typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior.  We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+    typename T3 = void, typename T4 = void, typename T5 = void,
+    typename T6 = void, typename T7 = void, typename T8 = void,
+    typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; };  // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; };  // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&.  This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; };  // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; };  // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>.  kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T) > {
+  typedef T0 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T) > {
+  typedef T1 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T) > {
+  typedef T2 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T) > {
+  typedef T3 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T) > {
+  typedef T4 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T) > {
+  typedef T5 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T) > {
+  typedef T6 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T) > {
+  typedef T7 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T) > {
+  typedef T8 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T) > {
+  typedef T9 type;
+};
+
+}  // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+  tuple() {}
+  tuple(const tuple& /* t */)  {}
+  tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+  tuple(const tuple& t) : f0_(t.f0_) {}
+
+  template <GTEST_1_TYPENAMES_(U)>
+  tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_1_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_1_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    return *this;
+  }
+
+  T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+      f1_(f1) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+  template <GTEST_2_TYPENAMES_(U)>
+  tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+  template <typename U0, typename U1>
+  tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_2_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+  template <typename U0, typename U1>
+  tuple& operator=(const ::std::pair<U0, U1>& p) {
+    f0_ = p.first;
+    f1_ = p.second;
+    return *this;
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_2_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+  template <GTEST_3_TYPENAMES_(U)>
+  tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_3_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_3_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+      f3_(f3) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+  template <GTEST_4_TYPENAMES_(U)>
+  tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_4_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_4_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+      GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_) {}
+
+  template <GTEST_5_TYPENAMES_(U)>
+  tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_5_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_5_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+      f5_(f5) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_) {}
+
+  template <GTEST_6_TYPENAMES_(U)>
+  tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_6_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_6_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+      f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+  template <GTEST_7_TYPENAMES_(U)>
+  tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_7_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_7_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+      GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+      f5_(f5), f6_(f6), f7_(f7) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+  template <GTEST_8_TYPENAMES_(U)>
+  tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_8_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_8_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    f7_ = t.f7_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+  T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+      GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+      f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+  template <GTEST_9_TYPENAMES_(U)>
+  tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_9_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_9_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    f7_ = t.f7_;
+    f8_ = t.f8_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+  T7 f7_;
+  T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+  template <int k> friend class gtest_internal::Get;
+
+  tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+      f9_() {}
+
+  explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+      GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+      GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+      GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+      f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+  tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+      f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+  template <GTEST_10_TYPENAMES_(U)>
+  tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+      f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+      f9_(t.f9_) {}
+
+  tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+  template <GTEST_10_TYPENAMES_(U)>
+  tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+    return CopyFrom(t);
+  }
+
+  GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+  template <GTEST_10_TYPENAMES_(U)>
+  tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+    f0_ = t.f0_;
+    f1_ = t.f1_;
+    f2_ = t.f2_;
+    f3_ = t.f3_;
+    f4_ = t.f4_;
+    f5_ = t.f5_;
+    f6_ = t.f6_;
+    f7_ = t.f7_;
+    f8_ = t.f8_;
+    f9_ = t.f9_;
+    return *this;
+  }
+
+  T0 f0_;
+  T1 f1_;
+  T2 f2_;
+  T3 f3_;
+  T4 f4_;
+  T5 f5_;
+  T6 f6_;
+  T7 f7_;
+  T8 f8_;
+  T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple().  And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+  return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+  return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+  return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3) {
+  return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4) {
+  return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5) {
+  return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+  return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+  return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+    const T8& f8) {
+  return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+    const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+    const T8& f8, const T9& f9) {
+  return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T) > {
+  static const int value = 0;
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T) > {
+  static const int value = 1;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T) > {
+  static const int value = 2;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T) > {
+  static const int value = 3;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T) > {
+  static const int value = 4;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T) > {
+  static const int value = 5;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T) > {
+  static const int value = 6;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T) > {
+  static const int value = 7;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T) > {
+  static const int value = 8;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T) > {
+  static const int value = 9;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T) > {
+  static const int value = 10;
+};
+
+template <int k, class Tuple>
+struct tuple_element {
+  typedef typename gtest_internal::TupleElement<
+      k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+  Field(Tuple& t) { return t.f0_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+  ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+  Field(Tuple& t) { return t.f1_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+  ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+  Field(Tuple& t) { return t.f2_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+  ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+  Field(Tuple& t) { return t.f3_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+  ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+  Field(Tuple& t) { return t.f4_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+  ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+  Field(Tuple& t) { return t.f5_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+  ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+  Field(Tuple& t) { return t.f6_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+  ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+  Field(Tuple& t) { return t.f7_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+  ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+  Field(Tuple& t) { return t.f8_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+  ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+  template <class Tuple>
+  static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+  Field(Tuple& t) { return t.f9_; }  // NOLINT
+
+  template <class Tuple>
+  static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+  ConstField(const Tuple& t) { return t.f9_; }
+};
+
+}  // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+  return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k,  GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+  return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+  template <class Tuple1, class Tuple2>
+  static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+    return true;
+  }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+  template <class Tuple1, class Tuple2>
+  static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+    return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+        ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+  }
+};
+
+}  // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+                       const GTEST_10_TUPLE_(U)& u) {
+  return gtest_internal::SameSizeTuplePrefixComparator<
+      tuple_size<GTEST_10_TUPLE_(T) >::value,
+      tuple_size<GTEST_10_TUPLE_(U) >::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+                       const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+}  // namespace tr1
+}  // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif  // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+# elif GTEST_ENV_HAS_STD_TUPLE_
+#  include <tuple>
+// C++11 puts its tuple into the ::std namespace rather than
+// ::std::tr1.  gtest expects tuple to live in ::std::tr1, so put it there.
+// This causes undefined behavior, but supported compilers react in
+// the way we intend.
+namespace std {
+namespace tr1 {
+using ::std::get;
+using ::std::make_tuple;
+using ::std::tuple;
+using ::std::tuple_element;
+using ::std::tuple_size;
+}
+}
+
+# elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+#  ifdef BOOST_HAS_TR1_TUPLE
+#   undef BOOST_HAS_TR1_TUPLE
+#  endif  // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+#  define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+#  include <tuple>
+
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header.  This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+#  if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled.  _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>.  Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+#   define _TR1_FUNCTIONAL 1
+#   include <tr1/tuple>
+#   undef _TR1_FUNCTIONAL  // Allows the user to #include
+                        // <tr1/functional> if he chooses to.
+#  else
+#   include <tr1/tuple>  // NOLINT
+#  endif  // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+# else
+// If the compiler is not GCC 4.0+, we assume the user is using a
+// spec-conforming TR1 implementation.
+#  include <tuple>  // NOLINT
+# endif  // GTEST_USE_OWN_TR1_TUPLE
+
+#endif  // GTEST_HAS_TR1_TUPLE
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+# if GTEST_OS_LINUX && !defined(__ia64__)
+#  if GTEST_OS_LINUX_ANDROID
+// On Android, clone() is only available on ARM starting with Gingerbread.
+#    if defined(__arm__) && __ANDROID_API__ >= 9
+#     define GTEST_HAS_CLONE 1
+#    else
+#     define GTEST_HAS_CLONE 0
+#    endif
+#  else
+#   define GTEST_HAS_CLONE 1
+#  endif
+# else
+#  define GTEST_HAS_CLONE 0
+# endif  // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif  // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#ifndef GTEST_HAS_STREAM_REDIRECTION
+// By default, we assume that stream redirection is supported on all
+// platforms except known mobile ones.
+# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN
+#  define GTEST_HAS_STREAM_REDIRECTION 0
+# else
+#  define GTEST_HAS_STREAM_REDIRECTION 1
+# endif  // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#endif  // GTEST_HAS_STREAM_REDIRECTION
+
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+     (GTEST_OS_MAC && !GTEST_OS_IOS) || GTEST_OS_IOS_SIMULATOR || \
+     (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+     GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \
+     GTEST_OS_OPENBSD || GTEST_OS_QNX)
+# define GTEST_HAS_DEATH_TEST 1
+# include <vector>  // NOLINT
+#endif
+
+// We don't support MSVC 7.1 with exceptions disabled now.  Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, IBM Visual Age, and HP aCC support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+    defined(__IBMCPP__) || defined(__HP_aCC)
+# define GTEST_HAS_TYPED_TEST 1
+# define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether to support Combine(). This only makes sense when
+// value-parameterized tests are enabled.  The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+# define GTEST_HAS_COMBINE 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+    (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
+
+// Determines whether test results can be streamed to a socket.
+#if GTEST_OS_LINUX
+# define GTEST_CAN_STREAM_RESULTS_ 1
+#endif
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding.  This leads to problems with code like:
+//
+//   if (gate)
+//     ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default:  // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used.  This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor.  Example:
+//
+//   struct Foo {
+//     Foo() { ... }
+//   } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#else
+# define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+  void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
+  type(type const &);\
+  GTEST_DISALLOW_ASSIGN_(type)
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro.  The macro should be used on function declarations
+// following the argument list:
+//
+//   Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
+# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+# define GTEST_MUST_USE_RESULT_
+#endif  // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling.  This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+#  define GTEST_HAS_SEH 1
+# else
+// Assume no SEH.
+#  define GTEST_HAS_SEH 0
+# endif
+
+#endif  // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+
+# if GTEST_LINKED_AS_SHARED_LIBRARY
+#  define GTEST_API_ __declspec(dllimport)
+# elif GTEST_CREATE_SHARED_LIBRARY
+#  define GTEST_API_ __declspec(dllexport)
+# endif
+
+#endif  // _MSC_VER
+
+#ifndef GTEST_API_
+# define GTEST_API_
+#endif
+
+#ifdef __GNUC__
+// Ask the compiler to never inline a given function.
+# define GTEST_NO_INLINE_ __attribute__((noinline))
+#else
+# define GTEST_NO_INLINE_
+#endif
+
+// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.
+#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION)
+# define GTEST_HAS_CXXABI_H_ 1
+#else
+# define GTEST_HAS_CXXABI_H_ 0
+#endif
+
+namespace testing {
+
+class Message;
+
+namespace internal {
+
+// A secret type that Google Test users don't know about.  It has no
+// definition on purpose.  Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES,
+//                         content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+template <bool>
+struct CompileAssert {
+};
+
+#define GTEST_COMPILE_ASSERT_(expr, msg) \
+  typedef ::testing::internal::CompileAssert<(static_cast<bool>(expr))> \
+      msg[static_cast<bool>(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_
+
+// Implementation details of GTEST_COMPILE_ASSERT_:
+//
+// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1
+//   elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+//    #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+//   does not work, as gcc supports variable-length arrays whose sizes
+//   are determined at run-time (this is gcc's extension and not part
+//   of the C++ standard).  As a result, gcc fails to reject the
+//   following code with the simple definition:
+//
+//     int foo;
+//     GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is
+//                                      // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+//   expr is a compile-time constant.  (Template arguments must be
+//   determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
+//
+//     CompileAssert<bool(expr)>
+//
+//   instead, these compilers will refuse to compile
+//
+//     GTEST_COMPILE_ASSERT_(5 > 0, some_message);
+//
+//   (They seem to think the ">" in "5 > 0" marks the end of the
+//   template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+//     ((expr) ? 1 : -1).
+//
+//   This is to avoid running into a bug in MS VC 7.1, which
+//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.
+//
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {};
+
+#if GTEST_HAS_GLOBAL_STRING
+typedef ::string string;
+#else
+typedef ::std::string string;
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+typedef ::wstring wstring;
+#elif GTEST_HAS_STD_WSTRING
+typedef ::std::wstring wstring;
+#endif  // GTEST_HAS_GLOBAL_WSTRING
+
+// A helper for suppressing warnings on constant condition.  It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines scoped_ptr.
+
+// This implementation of scoped_ptr is PARTIAL - it only contains
+// enough stuff to satisfy Google Test's need.
+template <typename T>
+class scoped_ptr {
+ public:
+  typedef T element_type;
+
+  explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
+  ~scoped_ptr() { reset(); }
+
+  T& operator*() const { return *ptr_; }
+  T* operator->() const { return ptr_; }
+  T* get() const { return ptr_; }
+
+  T* release() {
+    T* const ptr = ptr_;
+    ptr_ = NULL;
+    return ptr;
+  }
+
+  void reset(T* p = NULL) {
+    if (p != ptr_) {
+      if (IsTrue(sizeof(T) > 0)) {  // Makes sure T is a complete type.
+        delete ptr_;
+      }
+      ptr_ = p;
+    }
+  }
+
+ private:
+  T* ptr_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
+};
+
+// Defines RE.
+
+// A simple C++ wrapper for <regex.h>.  It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+  // A copy constructor is required by the Standard to initialize object
+  // references from r-values.
+  RE(const RE& other) { Init(other.pattern()); }
+
+  // Constructs an RE from a string.
+  RE(const ::std::string& regex) { Init(regex.c_str()); }  // NOLINT
+
+#if GTEST_HAS_GLOBAL_STRING
+
+  RE(const ::string& regex) { Init(regex.c_str()); }  // NOLINT
+
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+  RE(const char* regex) { Init(regex); }  // NOLINT
+  ~RE();
+
+  // Returns the string representation of the regex.
+  const char* pattern() const { return pattern_; }
+
+  // FullMatch(str, re) returns true iff regular expression re matches
+  // the entire str.
+  // PartialMatch(str, re) returns true iff regular expression re
+  // matches a substring of str (including str itself).
+  //
+  // TODO(wan@google.com): make FullMatch() and PartialMatch() work
+  // when str contains NUL characters.
+  static bool FullMatch(const ::std::string& str, const RE& re) {
+    return FullMatch(str.c_str(), re);
+  }
+  static bool PartialMatch(const ::std::string& str, const RE& re) {
+    return PartialMatch(str.c_str(), re);
+  }
+
+#if GTEST_HAS_GLOBAL_STRING
+
+  static bool FullMatch(const ::string& str, const RE& re) {
+    return FullMatch(str.c_str(), re);
+  }
+  static bool PartialMatch(const ::string& str, const RE& re) {
+    return PartialMatch(str.c_str(), re);
+  }
+
+#endif  // GTEST_HAS_GLOBAL_STRING
+
+  static bool FullMatch(const char* str, const RE& re);
+  static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+  void Init(const char* regex);
+
+  // We use a const char* instead of an std::string, as Google Test used to be
+  // used where std::string is not available.  TODO(wan@google.com): change to
+  // std::string.
+  const char* pattern_;
+  bool is_valid_;
+
+#if GTEST_USES_POSIX_RE
+
+  regex_t full_regex_;     // For FullMatch().
+  regex_t partial_regex_;  // For PartialMatch().
+
+#else  // GTEST_USES_SIMPLE_RE
+
+  const char* full_pattern_;  // For FullMatch();
+
+#endif
+
+  GTEST_DISALLOW_ASSIGN_(RE);
+};
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
+                                                               int line);
+
+// Defines logging utilities:
+//   GTEST_LOG_(severity) - logs messages at the specified severity level. The
+//                          message itself is streamed into the macro.
+//   LogToStderr()  - directs all log messages to stderr.
+//   FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+  GTEST_INFO,
+  GTEST_WARNING,
+  GTEST_ERROR,
+  GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+  GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+  // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+  ~GTestLog();
+
+  ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+  const GTestLogSeverity severity_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#define GTEST_LOG_(severity) \
+    ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+                                  __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(NULL); }
+
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+//  Synopsys:
+//    GTEST_CHECK_(boolean_condition);
+//     or
+//    GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+//    This checks the condition and if the condition is not satisfied
+//    it prints message about the condition violation, including the
+//    condition itself, plus additional message streamed into it, if any,
+//    and then it aborts the program. It aborts the program irrespective of
+//    whether it is built in the debug mode or not.
+#define GTEST_CHECK_(condition) \
+    GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+    if (::testing::internal::IsTrue(condition)) \
+      ; \
+    else \
+      GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success).  Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+  if (const int gtest_error = (posix_call)) \
+    GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+                      << gtest_error
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Use ImplicitCast_ as a safe version of static_cast for upcasting in
+// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
+// const Foo*).  When you use ImplicitCast_, the compiler checks that
+// the cast is safe.  Such explicit ImplicitCast_s are necessary in
+// surprisingly many situations where C++ demands an exact type match
+// instead of an argument type convertable to a target type.
+//
+// The syntax for using ImplicitCast_ is the same as for static_cast:
+//
+//   ImplicitCast_<ToType>(expr)
+//
+// ImplicitCast_ would have been part of the C++ standard library,
+// but the proposal was submitted too late.  It will probably make
+// its way into the language in the future.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., implicit_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To>
+inline To ImplicitCast_(To x) { return x; }
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
+// always succeed.  When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo?  It
+// could be a bare Foo, or of type DifferentSubclassOfFoo.  Thus,
+// when you downcast, you should use this macro.  In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not).  In normal mode, we do the efficient static_cast<>
+// instead.  Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+//    This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+//    if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+//    if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., down_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To, typename From>  // use like this: DownCast_<T*>(foo);
+inline To DownCast_(From* f) {  // so we only accept pointers
+  // Ensures that To is a sub-type of From *.  This test is here only
+  // for compile-time type checking, and has no overhead in an
+  // optimized build at run-time, as it will be optimized away
+  // completely.
+  if (false) {
+    const To to = NULL;
+    ::testing::internal::ImplicitCast_<From*>(to);
+  }
+
+#if GTEST_HAS_RTTI
+  // RTTI: debug mode only!
+  GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);
+#endif
+  return static_cast<To>(f);
+}
+
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+  GTEST_CHECK_(typeid(*base) == typeid(Derived));
+  return dynamic_cast<Derived*>(base);  // NOLINT
+#else
+  return static_cast<Derived*>(base);  // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Defines the stderr capturer:
+//   CaptureStdout     - starts capturing stdout.
+//   GetCapturedStdout - stops capturing stdout and returns the captured string.
+//   CaptureStderr     - starts capturing stderr.
+//   GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ std::string GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ std::string GetCapturedStderr();
+
+#endif  // GTEST_HAS_STREAM_REDIRECTION
+
+
+#if GTEST_HAS_DEATH_TEST
+
+const ::std::vector<testing::internal::string>& GetInjectableArgvs();
+void SetInjectableArgvs(const ::std::vector<testing::internal::string>*
+                             new_argvs);
+
+// A copy of all command line arguments.  Set by InitGoogleTest().
+extern ::std::vector<testing::internal::string> g_argvs;
+
+#endif  // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+
+#if GTEST_HAS_PTHREAD
+
+// Sleeps for (roughly) n milli-seconds.  This function is only for
+// testing Google Test's own constructs.  Don't use it in user tests,
+// either directly or indirectly.
+inline void SleepMilliseconds(int n) {
+  const timespec time = {
+    0,                  // 0 seconds.
+    n * 1000L * 1000L,  // And n ms.
+  };
+  nanosleep(&time, NULL);
+}
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified.  Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+  Notification() : notified_(false) {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+  }
+  ~Notification() {
+    pthread_mutex_destroy(&mutex_);
+  }
+
+  // Notifies all threads created with this notification to start. Must
+  // be called from the controller thread.
+  void Notify() {
+    pthread_mutex_lock(&mutex_);
+    notified_ = true;
+    pthread_mutex_unlock(&mutex_);
+  }
+
+  // Blocks until the controller thread notifies. Must be called from a test
+  // thread.
+  void WaitForNotification() {
+    for (;;) {
+      pthread_mutex_lock(&mutex_);
+      const bool notified = notified_;
+      pthread_mutex_unlock(&mutex_);
+      if (notified)
+        break;
+      SleepMilliseconds(10);
+    }
+  }
+
+ private:
+  pthread_mutex_t mutex_;
+  bool notified_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+  virtual ~ThreadWithParamBase() {}
+  virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical.  Some compilers (for
+// example, SunStudio) treat them as different types.  Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+  static_cast<ThreadWithParamBase*>(thread)->Run();
+  return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+//   void ThreadFunc(int param) { /* Do things with param */ }
+//   Notification thread_can_start;
+//   ...
+//   // The thread_can_start parameter is optional; you can supply NULL.
+//   ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+//   thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+  typedef void (*UserThreadFunc)(T);
+
+  ThreadWithParam(
+      UserThreadFunc func, T param, Notification* thread_can_start)
+      : func_(func),
+        param_(param),
+        thread_can_start_(thread_can_start),
+        finished_(false) {
+    ThreadWithParamBase* const base = this;
+    // The thread can be created only after all fields except thread_
+    // have been initialized.
+    GTEST_CHECK_POSIX_SUCCESS_(
+        pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+  }
+  ~ThreadWithParam() { Join(); }
+
+  void Join() {
+    if (!finished_) {
+      GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+      finished_ = true;
+    }
+  }
+
+  virtual void Run() {
+    if (thread_can_start_ != NULL)
+      thread_can_start_->WaitForNotification();
+    func_(param_);
+  }
+
+ private:
+  const UserThreadFunc func_;  // User-supplied thread function.
+  const T param_;  // User-supplied parameter to the thread function.
+  // When non-NULL, used to block execution until the controller thread
+  // notifies.
+  Notification* const thread_can_start_;
+  bool finished_;  // true iff we know that the thread function has finished.
+  pthread_t thread_;  // The native thread object.
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms. They
+// are used in conjunction with class MutexLock:
+//
+//   Mutex mutex;
+//   ...
+//   MutexLock lock(&mutex);  // Acquires the mutex and releases it at the end
+//                            // of the current scope.
+//
+// MutexBase implements behavior for both statically and dynamically
+// allocated mutexes.  Do not use MutexBase directly.  Instead, write
+// the following to define a static mutex:
+//
+//   GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+//
+// You can forward declare a static mutex like this:
+//
+//   GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// To create a dynamic mutex, just define an object of type Mutex.
+class MutexBase {
+ public:
+  // Acquires this mutex.
+  void Lock() {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+    owner_ = pthread_self();
+    has_owner_ = true;
+  }
+
+  // Releases this mutex.
+  void Unlock() {
+    // Since the lock is being released the owner_ field should no longer be
+    // considered valid. We don't protect writing to has_owner_ here, as it's
+    // the caller's responsibility to ensure that the current thread holds the
+    // mutex when this is called.
+    has_owner_ = false;
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+  }
+
+  // Does nothing if the current thread holds the mutex. Otherwise, crashes
+  // with high probability.
+  void AssertHeld() const {
+    GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))
+        << "The current thread is not holding the mutex @" << this;
+  }
+
+  // A static mutex may be used before main() is entered.  It may even
+  // be used before the dynamic initialization stage.  Therefore we
+  // must be able to initialize a static mutex object at link time.
+  // This means MutexBase has to be a POD and its member variables
+  // have to be public.
+ public:
+  pthread_mutex_t mutex_;  // The underlying pthread mutex.
+  // has_owner_ indicates whether the owner_ field below contains a valid thread
+  // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All
+  // accesses to the owner_ field should be protected by a check of this field.
+  // An alternative might be to memset() owner_ to all zeros, but there's no
+  // guarantee that a zero'd pthread_t is necessarily invalid or even different
+  // from pthread_self().
+  bool has_owner_;
+  pthread_t owner_;  // The thread holding the mutex.
+};
+
+// Forward-declares a static mutex.
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+    extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+// The initialization list here does not explicitly initialize each field,
+// instead relying on default initialization for the unspecified fields. In
+// particular, the owner_ field (a pthread_t) is not explicitly initialized.
+// This allows initialization to work whether pthread_t is a scalar or struct.
+// The flag -Wmissing-field-initializers must not be specified for this to work.
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+    ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+  Mutex() {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+    has_owner_ = false;
+  }
+  ~Mutex() {
+    GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+  }
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock as the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms.  Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+  explicit GTestMutexLock(MutexBase* mutex)
+      : mutex_(mutex) { mutex_->Lock(); }
+
+  ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+  MutexBase* const mutex_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage.  Therefore it cannot be templatized to access
+// ThreadLocal<T>.  Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+  virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+  delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+//
+//   // Thread 1
+//   ThreadLocal<int> tl(100);  // 100 is the default value for each thread.
+//
+//   // Thread 2
+//   tl.set(150);  // Changes the value for thread 2 only.
+//   EXPECT_EQ(150, tl.get());
+//
+//   // Thread 1
+//   EXPECT_EQ(100, tl.get());  // In thread 1, tl has the original value.
+//   tl.set(200);
+//   EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// An object managed for a thread by a ThreadLocal instance is deleted
+// when the thread exits.  Or, if the ThreadLocal instance dies in
+// that thread, when the ThreadLocal dies.  It's the user's
+// responsibility to ensure that all other threads using a ThreadLocal
+// have exited when it dies, or the per-thread objects for those
+// threads will not be deleted.
+//
+// Google Test only uses global ThreadLocal objects.  That means they
+// will die after main() has returned.  Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal {
+ public:
+  ThreadLocal() : key_(CreateKey()),
+                  default_() {}
+  explicit ThreadLocal(const T& value) : key_(CreateKey()),
+                                         default_(value) {}
+
+  ~ThreadLocal() {
+    // Destroys the managed object for the current thread, if any.
+    DeleteThreadLocalValue(pthread_getspecific(key_));
+
+    // Releases resources associated with the key.  This will *not*
+    // delete mana

<TRUNCATED>

[4/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Posted by ss...@apache.org.
http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/test/gtest-all.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/test/gtest-all.cc b/libraries/ostrich/backend/test/gtest-all.cc
new file mode 100644
index 0000000..ff9e512
--- /dev/null
+++ b/libraries/ostrich/backend/test/gtest-all.cc
@@ -0,0 +1,9592 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// Google C++ Testing Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+#include "gtest.h"
+
+// The following lines pull in the real gtest *.cc files.
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Utilities for testing Google Test itself and code that uses Google Test
+// (e.g. frameworks built on top of Google Test).
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+
+namespace testing {
+
+// This helper class can be used to mock out Google Test failure reporting
+// so that we can test Google Test or code that builds on Google Test.
+//
+// An object of this class appends a TestPartResult object to the
+// TestPartResultArray object given in the constructor whenever a Google Test
+// failure is reported. It can either intercept only failures that are
+// generated in the same thread that created this object or it can intercept
+// all generated failures. The scope of this mock object can be controlled with
+// the second argument to the two arguments constructor.
+class GTEST_API_ ScopedFakeTestPartResultReporter
+    : public TestPartResultReporterInterface {
+ public:
+  // The two possible mocking modes of this object.
+  enum InterceptMode {
+    INTERCEPT_ONLY_CURRENT_THREAD,  // Intercepts only thread local failures.
+    INTERCEPT_ALL_THREADS           // Intercepts all failures.
+  };
+
+  // The c'tor sets this object as the test part result reporter used
+  // by Google Test.  The 'result' parameter specifies where to report the
+  // results. This reporter will only catch failures generated in the current
+  // thread. DEPRECATED
+  explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
+
+  // Same as above, but you can choose the interception scope of this object.
+  ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
+                                   TestPartResultArray* result);
+
+  // The d'tor restores the previous test part result reporter.
+  virtual ~ScopedFakeTestPartResultReporter();
+
+  // Appends the TestPartResult object to the TestPartResultArray
+  // received in the constructor.
+  //
+  // This method is from the TestPartResultReporterInterface
+  // interface.
+  virtual void ReportTestPartResult(const TestPartResult& result);
+ private:
+  void Init();
+
+  const InterceptMode intercept_mode_;
+  TestPartResultReporterInterface* old_reporter_;
+  TestPartResultArray* const result_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
+};
+
+namespace internal {
+
+// A helper class for implementing EXPECT_FATAL_FAILURE() and
+// EXPECT_NONFATAL_FAILURE().  Its destructor verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring.  If that's not the case, a
+// non-fatal failure will be generated.
+class GTEST_API_ SingleFailureChecker {
+ public:
+  // The constructor remembers the arguments.
+  SingleFailureChecker(const TestPartResultArray* results,
+                       TestPartResult::Type type,
+                       const string& substr);
+  ~SingleFailureChecker();
+ private:
+  const TestPartResultArray* const results_;
+  const TestPartResult::Type type_;
+  const string substr_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
+};
+
+}  // namespace internal
+
+}  // namespace testing
+
+// A set of macros for testing Google Test assertions or code that's expected
+// to generate Google Test fatal failures.  It verifies that the given
+// statement will cause exactly one fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+//   - 'statement' cannot reference local non-static variables or
+//     non-static members of the current object.
+//   - 'statement' cannot return a value.
+//   - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works.  The AcceptsMacroThatExpandsToUnprotectedComma test in
+// gtest_unittest.cc will fail to compile if we do that.
+#define EXPECT_FATAL_FAILURE(statement, substr) \
+  do { \
+    class GTestExpectFatalFailureHelper {\
+     public:\
+      static void Execute() { statement; }\
+    };\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter:: \
+          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+      GTestExpectFatalFailureHelper::Execute();\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+  do { \
+    class GTestExpectFatalFailureHelper {\
+     public:\
+      static void Execute() { statement; }\
+    };\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter:: \
+          INTERCEPT_ALL_THREADS, &gtest_failures);\
+      GTestExpectFatalFailureHelper::Execute();\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+// A macro for testing Google Test assertions or code that's expected to
+// generate Google Test non-fatal failures.  It asserts that the given
+// statement will cause exactly one non-fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// 'statement' is allowed to reference local variables and members of
+// the current object.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+//   - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works.  If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma.  The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+//   if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+//   GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
+#define EXPECT_NONFATAL_FAILURE(statement, substr) \
+  do {\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+        (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter:: \
+          INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+      if (::testing::internal::AlwaysTrue()) { statement; }\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+  do {\
+    ::testing::TestPartResultArray gtest_failures;\
+    ::testing::internal::SingleFailureChecker gtest_checker(\
+        &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+        (substr));\
+    {\
+      ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+          ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
+          &gtest_failures);\
+      if (::testing::internal::AlwaysTrue()) { statement; }\
+    }\
+  } while (::testing::internal::AlwaysFalse())
+
+#endif  // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+#include <ctype.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <iomanip>
+#include <limits>
+#include <ostream>  // NOLINT
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
+
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+# include <fcntl.h>  // NOLINT
+# include <limits.h>  // NOLINT
+# include <sched.h>  // NOLINT
+// Declares vsnprintf().  This header is not available on Windows.
+# include <strings.h>  // NOLINT
+# include <sys/mman.h>  // NOLINT
+# include <sys/time.h>  // NOLINT
+# include <unistd.h>  // NOLINT
+# include <string>
+
+#elif GTEST_OS_SYMBIAN
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h>  // NOLINT
+
+#elif GTEST_OS_ZOS
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h>  // NOLINT
+
+// On z/OS we additionally need strings.h for strcasecmp.
+# include <strings.h>  // NOLINT
+
+#elif GTEST_OS_WINDOWS_MOBILE  // We are on Windows CE.
+
+# include <windows.h>  // NOLINT
+
+#elif GTEST_OS_WINDOWS  // We are on Windows proper.
+
+# include <io.h>  // NOLINT
+# include <sys/timeb.h>  // NOLINT
+# include <sys/types.h>  // NOLINT
+# include <sys/stat.h>  // NOLINT
+
+# if GTEST_OS_WINDOWS_MINGW
+// MinGW has gettimeofday() but not _ftime64().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+//   gettimeofday().
+// TODO(kenton@google.com): There are other ways to get the time on
+//   Windows, like GetTickCount() or GetSystemTimeAsFileTime().  MinGW
+//   supports these.  consider using them instead.
+#  define GTEST_HAS_GETTIMEOFDAY_ 1
+#  include <sys/time.h>  // NOLINT
+# endif  // GTEST_OS_WINDOWS_MINGW
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <windows.h>  // NOLINT
+
+#else
+
+// Assume other platforms have gettimeofday().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+//   gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <sys/time.h>  // NOLINT
+# include <unistd.h>  // NOLINT
+
+#endif  // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h>  // NOLINT
+# include <netdb.h>  // NOLINT
+#endif
+
+// Indicates that this translation unit is part of Google Test's
+// implementation.  It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error.  This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions and classes used by the Google C++ testing framework.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// This file contains purely Google Test's internal implementation.  Please
+// DO NOT #INCLUDE IT IN A USER PROGRAM.
+
+#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
+#define GTEST_SRC_GTEST_INTERNAL_INL_H_
+
+// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
+// part of Google Test's implementation; otherwise it's undefined.
+#if !GTEST_IMPLEMENTATION_
+// A user is trying to include this from his code - just say no.
+# error "gtest-internal-inl.h is part of Google Test's internal implementation."
+# error "It must not be included except by Google Test itself."
+#endif  // GTEST_IMPLEMENTATION_
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif  // !_WIN32_WCE
+#include <stddef.h>
+#include <stdlib.h>  // For strtoll/_strtoul64/malloc/free.
+#include <string.h>  // For memmove.
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h>  // NOLINT
+# include <netdb.h>  // NOLINT
+#endif
+
+#if GTEST_OS_WINDOWS
+# include <windows.h>  // NOLINT
+#endif  // GTEST_OS_WINDOWS
+
+
+namespace testing {
+
+// Declares the flags.
+//
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
+
+namespace internal {
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library.  This is solely for testing GetTestTypeId().
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
+const char kBreakOnFailureFlag[] = "break_on_failure";
+const char kCatchExceptionsFlag[] = "catch_exceptions";
+const char kColorFlag[] = "color";
+const char kFilterFlag[] = "filter";
+const char kListTestsFlag[] = "list_tests";
+const char kOutputFlag[] = "output";
+const char kPrintTimeFlag[] = "print_time";
+const char kRandomSeedFlag[] = "random_seed";
+const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kStreamResultToFlag[] = "stream_result_to";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true iff Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Converts the given time in milliseconds to a date string in the ISO 8601
+// format, without the timezone information.  N.B.: due to the use the
+// non-reentrant localtime() function, this function is not thread safe.  Do
+// not use it in any code that can be called from multiple threads.
+GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true.  On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+    const char* str, const char* flag, Int32* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
+  const unsigned int raw_seed = (random_seed_flag == 0) ?
+      static_cast<unsigned int>(GetTimeInMillis()) :
+      static_cast<unsigned int>(random_seed_flag);
+
+  // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+  // it's easy to type.
+  const int normalized_seed =
+      static_cast<int>((raw_seed - 1U) %
+                       static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+  return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'.  The behavior is
+// undefined if 'seed' is invalid.  The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+  GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+      << "Invalid random seed " << seed << " - must be in [1, "
+      << kMaxRandomSeed << "].";
+  const int next_seed = seed + 1;
+  return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
+
+// This class saves the values of all Google Test flags in its c'tor, and
+// restores them in its d'tor.
+class GTestFlagSaver {
+ public:
+  // The c'tor.
+  GTestFlagSaver() {
+    also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
+    break_on_failure_ = GTEST_FLAG(break_on_failure);
+    catch_exceptions_ = GTEST_FLAG(catch_exceptions);
+    color_ = GTEST_FLAG(color);
+    death_test_style_ = GTEST_FLAG(death_test_style);
+    death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
+    filter_ = GTEST_FLAG(filter);
+    internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
+    list_tests_ = GTEST_FLAG(list_tests);
+    output_ = GTEST_FLAG(output);
+    print_time_ = GTEST_FLAG(print_time);
+    random_seed_ = GTEST_FLAG(random_seed);
+    repeat_ = GTEST_FLAG(repeat);
+    shuffle_ = GTEST_FLAG(shuffle);
+    stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+    stream_result_to_ = GTEST_FLAG(stream_result_to);
+    throw_on_failure_ = GTEST_FLAG(throw_on_failure);
+  }
+
+  // The d'tor is not virtual.  DO NOT INHERIT FROM THIS CLASS.
+  ~GTestFlagSaver() {
+    GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
+    GTEST_FLAG(break_on_failure) = break_on_failure_;
+    GTEST_FLAG(catch_exceptions) = catch_exceptions_;
+    GTEST_FLAG(color) = color_;
+    GTEST_FLAG(death_test_style) = death_test_style_;
+    GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
+    GTEST_FLAG(filter) = filter_;
+    GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
+    GTEST_FLAG(list_tests) = list_tests_;
+    GTEST_FLAG(output) = output_;
+    GTEST_FLAG(print_time) = print_time_;
+    GTEST_FLAG(random_seed) = random_seed_;
+    GTEST_FLAG(repeat) = repeat_;
+    GTEST_FLAG(shuffle) = shuffle_;
+    GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+    GTEST_FLAG(stream_result_to) = stream_result_to_;
+    GTEST_FLAG(throw_on_failure) = throw_on_failure_;
+  }
+
+ private:
+  // Fields for saving the original values of flags.
+  bool also_run_disabled_tests_;
+  bool break_on_failure_;
+  bool catch_exceptions_;
+  std::string color_;
+  std::string death_test_style_;
+  bool death_test_use_fork_;
+  std::string filter_;
+  std::string internal_run_death_test_;
+  bool list_tests_;
+  std::string output_;
+  bool print_time_;
+  internal::Int32 random_seed_;
+  internal::Int32 repeat_;
+  bool shuffle_;
+  internal::Int32 stack_trace_depth_;
+  std::string stream_result_to_;
+  bool throw_on_failure_;
+} GTEST_ATTRIBUTE_UNUSED_;
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+GTEST_API_ std::string CodePointToUtf8(UInt32 code_point);
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+//   UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+//   UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+                            const char* shard_index_str,
+                            bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error and
+// and aborts.
+GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+    int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+  // Implemented as an explicit loop since std::count_if() in libCstd on
+  // Solaris has a non-standard signature.
+  int count = 0;
+  for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {
+    if (predicate(*it))
+      ++count;
+  }
+  return count;
+}
+
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+  std::for_each(c.begin(), c.end(), functor);
+}
+
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+  return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
+}
+
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+                  std::vector<E>* v) {
+  const int size = static_cast<int>(v->size());
+  GTEST_CHECK_(0 <= begin && begin <= size)
+      << "Invalid shuffle range start " << begin << ": must be in range [0, "
+      << size << "].";
+  GTEST_CHECK_(begin <= end && end <= size)
+      << "Invalid shuffle range finish " << end << ": must be in range ["
+      << begin << ", " << size << "].";
+
+  // Fisher-Yates shuffle, from
+  // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+  for (int range_width = end - begin; range_width >= 2; range_width--) {
+    const int last_in_range = begin + range_width - 1;
+    const int selected = begin + random->Generate(range_width);
+    std::swap((*v)[selected], (*v)[last_in_range]);
+  }
+}
+
+// Performs an in-place shuffle of the vector's elements.
+template <typename E>
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+  ShuffleRange(random, 0, static_cast<int>(v->size()), v);
+}
+
+// A function for deleting an object.  Handy for being used as a
+// functor.
+template <typename T>
+static void Delete(T* x) {
+  delete x;
+}
+
+// A predicate that checks the key of a TestProperty against a known key.
+//
+// TestPropertyKeyIs is copyable.
+class TestPropertyKeyIs {
+ public:
+  // Constructor.
+  //
+  // TestPropertyKeyIs has NO default constructor.
+  explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}
+
+  // Returns true iff the test name of test property matches on key_.
+  bool operator()(const TestProperty& test_property) const {
+    return test_property.key() == key_;
+  }
+
+ private:
+  std::string key_;
+};
+
+// Class UnitTestOptions.
+//
+// This class contains functions for processing options the user
+// specifies when running the tests.  It has only static members.
+//
+// In most cases, the user can specify an option using either an
+// environment variable or a command line flag.  E.g. you can set the
+// test filter using either GTEST_FILTER or --gtest_filter.  If both
+// the variable and the flag are present, the latter overrides the
+// former.
+class GTEST_API_ UnitTestOptions {
+ public:
+  // Functions for processing the gtest_output flag.
+
+  // Returns the output format, or "" for normal printed output.
+  static std::string GetOutputFormat();
+
+  // Returns the absolute path of the requested output file, or the
+  // default (test_detail.xml in the original working directory) if
+  // none was explicitly specified.
+  static std::string GetAbsolutePathToOutputFile();
+
+  // Functions for processing the gtest_filter flag.
+
+  // Returns true iff the wildcard pattern matches the string.  The
+  // first ':' or '\0' character in pattern marks the end of it.
+  //
+  // This recursive algorithm isn't very efficient, but is clear and
+  // works well enough for matching test names, which are short.
+  static bool PatternMatchesString(const char *pattern, const char *str);
+
+  // Returns true iff the user-specified filter matches the test case
+  // name and the test name.
+  static bool FilterMatchesTest(const std::string &test_case_name,
+                                const std::string &test_name);
+
+#if GTEST_OS_WINDOWS
+  // Function for supporting the gtest_catch_exception flag.
+
+  // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+  // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+  // This function is useful as an __except condition.
+  static int GTestShouldProcessSEH(DWORD exception_code);
+#endif  // GTEST_OS_WINDOWS
+
+  // Returns true if "name" matches the ':' separated list of glob-style
+  // filters in "filter".
+  static bool MatchesFilter(const std::string& name, const char* filter);
+};
+
+// Returns the current application's name, removing directory path if that
+// is present.  Used by UnitTestOptions::GetOutputFile.
+GTEST_API_ FilePath GetCurrentExecutableName();
+
+// The role interface for getting the OS stack trace as a string.
+class OsStackTraceGetterInterface {
+ public:
+  OsStackTraceGetterInterface() {}
+  virtual ~OsStackTraceGetterInterface() {}
+
+  // Returns the current OS stack trace as an std::string.  Parameters:
+  //
+  //   max_depth  - the maximum number of stack frames to be included
+  //                in the trace.
+  //   skip_count - the number of top frames to be skipped; doesn't count
+  //                against max_depth.
+  virtual string CurrentStackTrace(int max_depth, int skip_count) = 0;
+
+  // UponLeavingGTest() should be called immediately before Google Test calls
+  // user code. It saves some information about the current stack that
+  // CurrentStackTrace() will use to find and hide Google Test stack frames.
+  virtual void UponLeavingGTest() = 0;
+
+ private:
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
+};
+
+// A working implementation of the OsStackTraceGetterInterface interface.
+class OsStackTraceGetter : public OsStackTraceGetterInterface {
+ public:
+  OsStackTraceGetter() : caller_frame_(NULL) {}
+
+  virtual string CurrentStackTrace(int max_depth, int skip_count)
+      GTEST_LOCK_EXCLUDED_(mutex_);
+
+  virtual void UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_);
+
+  // This string is inserted in place of stack frames that are part of
+  // Google Test's implementation.
+  static const char* const kElidedFramesMarker;
+
+ private:
+  Mutex mutex_;  // protects all internal state
+
+  // We save the stack frame below the frame that calls user code.
+  // We do this because the address of the frame immediately below
+  // the user code changes between the call to UponLeavingGTest()
+  // and any calls to CurrentStackTrace() from within the user code.
+  void* caller_frame_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
+};
+
+// Information about a Google Test trace point.
+struct TraceInfo {
+  const char* file;
+  int line;
+  std::string message;
+};
+
+// This is the default global test part result reporter used in UnitTestImpl.
+// This class should only be used by UnitTestImpl.
+class DefaultGlobalTestPartResultReporter
+  : public TestPartResultReporterInterface {
+ public:
+  explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
+  // Implements the TestPartResultReporterInterface. Reports the test part
+  // result in the current test.
+  virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+  UnitTestImpl* const unit_test_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
+};
+
+// This is the default per thread test part result reporter used in
+// UnitTestImpl. This class should only be used by UnitTestImpl.
+class DefaultPerThreadTestPartResultReporter
+    : public TestPartResultReporterInterface {
+ public:
+  explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
+  // Implements the TestPartResultReporterInterface. The implementation just
+  // delegates to the current global test part result reporter of *unit_test_.
+  virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+  UnitTestImpl* const unit_test_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
+};
+
+// The private implementation of the UnitTest class.  We don't protect
+// the methods under a mutex, as this class is not accessible by a
+// user and the UnitTest class that delegates work to this class does
+// proper locking.
+class GTEST_API_ UnitTestImpl {
+ public:
+  explicit UnitTestImpl(UnitTest* parent);
+  virtual ~UnitTestImpl();
+
+  // There are two different ways to register your own TestPartResultReporter.
+  // You can register your own repoter to listen either only for test results
+  // from the current thread or for results from all threads.
+  // By default, each per-thread test result repoter just passes a new
+  // TestPartResult to the global test result reporter, which registers the
+  // test part result for the currently running test.
+
+  // Returns the global test part result reporter.
+  TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
+
+  // Sets the global test part result reporter.
+  void SetGlobalTestPartResultReporter(
+      TestPartResultReporterInterface* reporter);
+
+  // Returns the test part result reporter for the current thread.
+  TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
+
+  // Sets the test part result reporter for the current thread.
+  void SetTestPartResultReporterForCurrentThread(
+      TestPartResultReporterInterface* reporter);
+
+  // Gets the number of successful test cases.
+  int successful_test_case_count() const;
+
+  // Gets the number of failed test cases.
+  int failed_test_case_count() const;
+
+  // Gets the number of all test cases.
+  int total_test_case_count() const;
+
+  // Gets the number of all test cases that contain at least one test
+  // that should run.
+  int test_case_to_run_count() const;
+
+  // Gets the number of successful tests.
+  int successful_test_count() const;
+
+  // Gets the number of failed tests.
+  int failed_test_count() const;
+
+  // Gets the number of disabled tests that will be reported in the XML report.
+  int reportable_disabled_test_count() const;
+
+  // Gets the number of disabled tests.
+  int disabled_test_count() const;
+
+  // Gets the number of tests to be printed in the XML report.
+  int reportable_test_count() const;
+
+  // Gets the number of all tests.
+  int total_test_count() const;
+
+  // Gets the number of tests that should run.
+  int test_to_run_count() const;
+
+  // Gets the time of the test program start, in ms from the start of the
+  // UNIX epoch.
+  TimeInMillis start_timestamp() const { return start_timestamp_; }
+
+  // Gets the elapsed time, in milliseconds.
+  TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+  // Returns true iff the unit test passed (i.e. all test cases passed).
+  bool Passed() const { return !Failed(); }
+
+  // Returns true iff the unit test failed (i.e. some test case failed
+  // or something outside of all tests failed).
+  bool Failed() const {
+    return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
+  }
+
+  // Gets the i-th test case among all the test cases. i can range from 0 to
+  // total_test_case_count() - 1. If i is not in that range, returns NULL.
+  const TestCase* GetTestCase(int i) const {
+    const int index = GetElementOr(test_case_indices_, i, -1);
+    return index < 0 ? NULL : test_cases_[i];
+  }
+
+  // Gets the i-th test case among all the test cases. i can range from 0 to
+  // total_test_case_count() - 1. If i is not in that range, returns NULL.
+  TestCase* GetMutableTestCase(int i) {
+    const int index = GetElementOr(test_case_indices_, i, -1);
+    return index < 0 ? NULL : test_cases_[index];
+  }
+
+  // Provides access to the event listener list.
+  TestEventListeners* listeners() { return &listeners_; }
+
+  // Returns the TestResult for the test that's currently running, or
+  // the TestResult for the ad hoc test if no test is running.
+  TestResult* current_test_result();
+
+  // Returns the TestResult for the ad hoc test.
+  const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
+
+  // Sets the OS stack trace getter.
+  //
+  // Does nothing if the input and the current OS stack trace getter
+  // are the same; otherwise, deletes the old getter and makes the
+  // input the current getter.
+  void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
+
+  // Returns the current OS stack trace getter if it is not NULL;
+  // otherwise, creates an OsStackTraceGetter, makes it the current
+  // getter, and returns it.
+  OsStackTraceGetterInterface* os_stack_trace_getter();
+
+  // Returns the current OS stack trace as an std::string.
+  //
+  // The maximum number of stack frames to be included is specified by
+  // the gtest_stack_trace_depth flag.  The skip_count parameter
+  // specifies the number of top frames to be skipped, which doesn't
+  // count against the number of frames to be included.
+  //
+  // For example, if Foo() calls Bar(), which in turn calls
+  // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+  // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+  std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_;
+
+  // Finds and returns a TestCase with the given name.  If one doesn't
+  // exist, creates one and returns it.
+  //
+  // Arguments:
+  //
+  //   test_case_name: name of the test case
+  //   type_param:     the name of the test's type parameter, or NULL if
+  //                   this is not a typed or a type-parameterized test.
+  //   set_up_tc:      pointer to the function that sets up the test case
+  //   tear_down_tc:   pointer to the function that tears down the test case
+  TestCase* GetTestCase(const char* test_case_name,
+                        const char* type_param,
+                        Test::SetUpTestCaseFunc set_up_tc,
+                        Test::TearDownTestCaseFunc tear_down_tc);
+
+  // Adds a TestInfo to the unit test.
+  //
+  // Arguments:
+  //
+  //   set_up_tc:    pointer to the function that sets up the test case
+  //   tear_down_tc: pointer to the function that tears down the test case
+  //   test_info:    the TestInfo object
+  void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
+                   Test::TearDownTestCaseFunc tear_down_tc,
+                   TestInfo* test_info) {
+    // In order to support thread-safe death tests, we need to
+    // remember the original working directory when the test program
+    // was first invoked.  We cannot do this in RUN_ALL_TESTS(), as
+    // the user may have changed the current directory before calling
+    // RUN_ALL_TESTS().  Therefore we capture the current directory in
+    // AddTestInfo(), which is called to register a TEST or TEST_F
+    // before main() is reached.
+    if (original_working_dir_.IsEmpty()) {
+      original_working_dir_.Set(FilePath::GetCurrentDir());
+      GTEST_CHECK_(!original_working_dir_.IsEmpty())
+          << "Failed to get the current working directory.";
+    }
+
+    GetTestCase(test_info->test_case_name(),
+                test_info->type_param(),
+                set_up_tc,
+                tear_down_tc)->AddTestInfo(test_info);
+  }
+
+#if GTEST_HAS_PARAM_TEST
+  // Returns ParameterizedTestCaseRegistry object used to keep track of
+  // value-parameterized tests and instantiate and register them.
+  internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
+    return parameterized_test_registry_;
+  }
+#endif  // GTEST_HAS_PARAM_TEST
+
+  // Sets the TestCase object for the test that's currently running.
+  void set_current_test_case(TestCase* a_current_test_case) {
+    current_test_case_ = a_current_test_case;
+  }
+
+  // Sets the TestInfo object for the test that's currently running.  If
+  // current_test_info is NULL, the assertion results will be stored in
+  // ad_hoc_test_result_.
+  void set_current_test_info(TestInfo* a_current_test_info) {
+    current_test_info_ = a_current_test_info;
+  }
+
+  // Registers all parameterized tests defined using TEST_P and
+  // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter
+  // combination. This method can be called more then once; it has guards
+  // protecting from registering the tests more then once.  If
+  // value-parameterized tests are disabled, RegisterParameterizedTests is
+  // present but does nothing.
+  void RegisterParameterizedTests();
+
+  // Runs all tests in this UnitTest object, prints the result, and
+  // returns true if all tests are successful.  If any exception is
+  // thrown during a test, this test is considered to be failed, but
+  // the rest of the tests will still be run.
+  bool RunAllTests();
+
+  // Clears the results of all tests, except the ad hoc tests.
+  void ClearNonAdHocTestResult() {
+    ForEach(test_cases_, TestCase::ClearTestCaseResult);
+  }
+
+  // Clears the results of ad-hoc test assertions.
+  void ClearAdHocTestResult() {
+    ad_hoc_test_result_.Clear();
+  }
+
+  // Adds a TestProperty to the current TestResult object when invoked in a
+  // context of a test or a test case, or to the global property set. If the
+  // result already contains a property with the same key, the value will be
+  // updated.
+  void RecordProperty(const TestProperty& test_property);
+
+  enum ReactionToSharding {
+    HONOR_SHARDING_PROTOCOL,
+    IGNORE_SHARDING_PROTOCOL
+  };
+
+  // Matches the full name of each test against the user-specified
+  // filter to decide whether the test should run, then records the
+  // result in each TestCase and TestInfo object.
+  // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+  // based on sharding variables in the environment.
+  // Returns the number of tests that should run.
+  int FilterTests(ReactionToSharding shard_tests);
+
+  // Prints the names of the tests matching the user-specified filter flag.
+  void ListTestsMatchingFilter();
+
+  const TestCase* current_test_case() const { return current_test_case_; }
+  TestInfo* current_test_info() { return current_test_info_; }
+  const TestInfo* current_test_info() const { return current_test_info_; }
+
+  // Returns the vector of environments that need to be set-up/torn-down
+  // before/after the tests are run.
+  std::vector<Environment*>& environments() { return environments_; }
+
+  // Getters for the per-thread Google Test trace stack.
+  std::vector<TraceInfo>& gtest_trace_stack() {
+    return *(gtest_trace_stack_.pointer());
+  }
+  const std::vector<TraceInfo>& gtest_trace_stack() const {
+    return gtest_trace_stack_.get();
+  }
+
+#if GTEST_HAS_DEATH_TEST
+  void InitDeathTestSubprocessControlInfo() {
+    internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+  }
+  // Returns a pointer to the parsed --gtest_internal_run_death_test
+  // flag, or NULL if that flag was not specified.
+  // This information is useful only in a death test child process.
+  // Must not be called before a call to InitGoogleTest.
+  const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
+    return internal_run_death_test_flag_.get();
+  }
+
+  // Returns a pointer to the current death test factory.
+  internal::DeathTestFactory* death_test_factory() {
+    return death_test_factory_.get();
+  }
+
+  void SuppressTestEventsIfInSubprocess();
+
+  friend class ReplaceDeathTestFactory;
+#endif  // GTEST_HAS_DEATH_TEST
+
+  // Initializes the event listener performing XML output as specified by
+  // UnitTestOptions. Must not be called before InitGoogleTest.
+  void ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+  // Initializes the event listener for streaming test results to a socket.
+  // Must not be called before InitGoogleTest.
+  void ConfigureStreamingOutput();
+#endif
+
+  // Performs initialization dependent upon flag values obtained in
+  // ParseGoogleTestFlagsOnly.  Is called from InitGoogleTest after the call to
+  // ParseGoogleTestFlagsOnly.  In case a user neglects to call InitGoogleTest
+  // this function is also called from RunAllTests.  Since this function can be
+  // called more than once, it has to be idempotent.
+  void PostFlagParsingInit();
+
+  // Gets the random seed used at the start of the current test iteration.
+  int random_seed() const { return random_seed_; }
+
+  // Gets the random number generator.
+  internal::Random* random() { return &random_; }
+
+  // Shuffles all test cases, and the tests within each test case,
+  // making sure that death tests are still run first.
+  void ShuffleTests();
+
+  // Restores the test cases and tests to their order before the first shuffle.
+  void UnshuffleTests();
+
+  // Returns the value of GTEST_FLAG(catch_exceptions) at the moment
+  // UnitTest::Run() starts.
+  bool catch_exceptions() const { return catch_exceptions_; }
+
+ private:
+  friend class ::testing::UnitTest;
+
+  // Used by UnitTest::Run() to capture the state of
+  // GTEST_FLAG(catch_exceptions) at the moment it starts.
+  void set_catch_exceptions(bool value) { catch_exceptions_ = value; }
+
+  // The UnitTest object that owns this implementation object.
+  UnitTest* const parent_;
+
+  // The working directory when the first TEST() or TEST_F() was
+  // executed.
+  internal::FilePath original_working_dir_;
+
+  // The default test part result reporters.
+  DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
+  DefaultPerThreadTestPartResultReporter
+      default_per_thread_test_part_result_reporter_;
+
+  // Points to (but doesn't own) the global test part result reporter.
+  TestPartResultReporterInterface* global_test_part_result_repoter_;
+
+  // Protects read and write access to global_test_part_result_reporter_.
+  internal::Mutex global_test_part_result_reporter_mutex_;
+
+  // Points to (but doesn't own) the per-thread test part result reporter.
+  internal::ThreadLocal<TestPartResultReporterInterface*>
+      per_thread_test_part_result_reporter_;
+
+  // The vector of environments that need to be set-up/torn-down
+  // before/after the tests are run.
+  std::vector<Environment*> environments_;
+
+  // The vector of TestCases in their original order.  It owns the
+  // elements in the vector.
+  std::vector<TestCase*> test_cases_;
+
+  // Provides a level of indirection for the test case list to allow
+  // easy shuffling and restoring the test case order.  The i-th
+  // element of this vector is the index of the i-th test case in the
+  // shuffled order.
+  std::vector<int> test_case_indices_;
+
+#if GTEST_HAS_PARAM_TEST
+  // ParameterizedTestRegistry object used to register value-parameterized
+  // tests.
+  internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
+
+  // Indicates whether RegisterParameterizedTests() has been called already.
+  bool parameterized_tests_registered_;
+#endif  // GTEST_HAS_PARAM_TEST
+
+  // Index of the last death test case registered.  Initially -1.
+  int last_death_test_case_;
+
+  // This points to the TestCase for the currently running test.  It
+  // changes as Google Test goes through one test case after another.
+  // When no test is running, this is set to NULL and Google Test
+  // stores assertion results in ad_hoc_test_result_.  Initially NULL.
+  TestCase* current_test_case_;
+
+  // This points to the TestInfo for the currently running test.  It
+  // changes as Google Test goes through one test after another.  When
+  // no test is running, this is set to NULL and Google Test stores
+  // assertion results in ad_hoc_test_result_.  Initially NULL.
+  TestInfo* current_test_info_;
+
+  // Normally, a user only writes assertions inside a TEST or TEST_F,
+  // or inside a function called by a TEST or TEST_F.  Since Google
+  // Test keeps track of which test is current running, it can
+  // associate such an assertion with the test it belongs to.
+  //
+  // If an assertion is encountered when no TEST or TEST_F is running,
+  // Google Test attributes the assertion result to an imaginary "ad hoc"
+  // test, and records the result in ad_hoc_test_result_.
+  TestResult ad_hoc_test_result_;
+
+  // The list of event listeners that can be used to track events inside
+  // Google Test.
+  TestEventListeners listeners_;
+
+  // The OS stack trace getter.  Will be deleted when the UnitTest
+  // object is destructed.  By default, an OsStackTraceGetter is used,
+  // but the user can set this field to use a custom getter if that is
+  // desired.
+  OsStackTraceGetterInterface* os_stack_trace_getter_;
+
+  // True iff PostFlagParsingInit() has been called.
+  bool post_flag_parse_init_performed_;
+
+  // The random number seed used at the beginning of the test run.
+  int random_seed_;
+
+  // Our random number generator.
+  internal::Random random_;
+
+  // The time of the test program start, in ms from the start of the
+  // UNIX epoch.
+  TimeInMillis start_timestamp_;
+
+  // How long the test took to run, in milliseconds.
+  TimeInMillis elapsed_time_;
+
+#if GTEST_HAS_DEATH_TEST
+  // The decomposed components of the gtest_internal_run_death_test flag,
+  // parsed when RUN_ALL_TESTS is called.
+  internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
+  internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
+#endif  // GTEST_HAS_DEATH_TEST
+
+  // A per-thread stack of traces created by the SCOPED_TRACE() macro.
+  internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
+
+  // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()
+  // starts.
+  bool catch_exceptions_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
+};  // class UnitTestImpl
+
+// Convenience function for accessing the global UnitTest
+// implementation object.
+inline UnitTestImpl* GetUnitTestImpl() {
+  return UnitTest::GetInstance()->impl();
+}
+
+#if GTEST_USES_SIMPLE_RE
+
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsAsciiDigit(char ch);
+GTEST_API_ bool IsAsciiPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsAsciiWhiteSpace(char ch);
+GTEST_API_ bool IsAsciiWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+    bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
+#endif  // GTEST_USES_SIMPLE_RE
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+GTEST_API_ std::string GetLastErrnoDescription();
+
+# if GTEST_OS_WINDOWS
+// Provides leak-safe Windows kernel handle ownership.
+class AutoHandle {
+ public:
+  AutoHandle() : handle_(INVALID_HANDLE_VALUE) {}
+  explicit AutoHandle(HANDLE handle) : handle_(handle) {}
+
+  ~AutoHandle() { Reset(); }
+
+  HANDLE Get() const { return handle_; }
+  void Reset() { Reset(INVALID_HANDLE_VALUE); }
+  void Reset(HANDLE handle) {
+    if (handle != handle_) {
+      if (handle_ != INVALID_HANDLE_VALUE)
+        ::CloseHandle(handle_);
+      handle_ = handle;
+    }
+  }
+
+ private:
+  HANDLE handle_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+# endif  // GTEST_OS_WINDOWS
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter.  Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+  // Fail fast if the given string does not begin with a digit;
+  // this bypasses strtoXXX's "optional leading whitespace and plus
+  // or minus sign" semantics, which are undesirable here.
+  if (str.empty() || !IsDigit(str[0])) {
+    return false;
+  }
+  errno = 0;
+
+  char* end;
+  // BiggestConvertible is the largest integer type that system-provided
+  // string-to-number conversion routines can return.
+
+# if GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+  // MSVC and C++ Builder define __int64 instead of the standard long long.
+  typedef unsigned __int64 BiggestConvertible;
+  const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
+
+# else
+
+  typedef unsigned long long BiggestConvertible;  // NOLINT
+  const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
+
+# endif  // GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+  const bool parse_success = *end == '\0' && errno == 0;
+
+  // TODO(vladl@google.com): Convert this to compile time assertion when it is
+  // available.
+  GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+  const Integer result = static_cast<Integer>(parsed);
+  if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+    *number = result;
+    return true;
+  }
+  return false;
+}
+#endif  // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+  static void RecordProperty(TestResult* test_result,
+                             const std::string& xml_element,
+                             const TestProperty& property) {
+    test_result->RecordProperty(xml_element, property);
+  }
+
+  static void ClearTestPartResults(TestResult* test_result) {
+    test_result->ClearTestPartResults();
+  }
+
+  static const std::vector<testing::TestPartResult>& test_part_results(
+      const TestResult& test_result) {
+    return test_result.test_part_results();
+  }
+};
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Streams test results to the given port on the given host machine.
+class StreamingListener : public EmptyTestEventListener {
+ public:
+  // Abstract base class for writing strings to a socket.
+  class AbstractSocketWriter {
+   public:
+    virtual ~AbstractSocketWriter() {}
+
+    // Sends a string to the socket.
+    virtual void Send(const string& message) = 0;
+
+    // Closes the socket.
+    virtual void CloseConnection() {}
+
+    // Sends a string and a newline to the socket.
+    void SendLn(const string& message) {
+      Send(message + "\n");
+    }
+  };
+
+  // Concrete class for actually writing strings to a socket.
+  class SocketWriter : public AbstractSocketWriter {
+   public:
+    SocketWriter(const string& host, const string& port)
+        : sockfd_(-1), host_name_(host), port_num_(port) {
+      MakeConnection();
+    }
+
+    virtual ~SocketWriter() {
+      if (sockfd_ != -1)
+        CloseConnection();
+    }
+
+    // Sends a string to the socket.
+    virtual void Send(const string& message) {
+      GTEST_CHECK_(sockfd_ != -1)
+          << "Send() can be called only when there is a connection.";
+
+      const int len = static_cast<int>(message.length());
+      if (write(sockfd_, message.c_str(), len) != len) {
+        GTEST_LOG_(WARNING)
+            << "stream_result_to: failed to stream to "
+            << host_name_ << ":" << port_num_;
+      }
+    }
+
+   private:
+    // Creates a client socket and connects to the server.
+    void MakeConnection();
+
+    // Closes the socket.
+    void CloseConnection() {
+      GTEST_CHECK_(sockfd_ != -1)
+          << "CloseConnection() can be called only when there is a connection.";
+
+      close(sockfd_);
+      sockfd_ = -1;
+    }
+
+    int sockfd_;  // socket file descriptor
+    const string host_name_;
+    const string port_num_;
+
+    GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter);
+  };  // class SocketWriter
+
+  // Escapes '=', '&', '%', and '\n' characters in str as "%xx".
+  static string UrlEncode(const char* str);
+
+  StreamingListener(const string& host, const string& port)
+      : socket_writer_(new SocketWriter(host, port)) { Start(); }
+
+  explicit StreamingListener(AbstractSocketWriter* socket_writer)
+      : socket_writer_(socket_writer) { Start(); }
+
+  void OnTestProgramStart(const UnitTest& /* unit_test */) {
+    SendLn("event=TestProgramStart");
+  }
+
+  void OnTestProgramEnd(const UnitTest& unit_test) {
+    // Note that Google Test current only report elapsed time for each
+    // test iteration, not for the entire test program.
+    SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed()));
+
+    // Notify the streaming server to stop.
+    socket_writer_->CloseConnection();
+  }
+
+  void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) {
+    SendLn("event=TestIterationStart&iteration=" +
+           StreamableToString(iteration));
+  }
+
+  void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) {
+    SendLn("event=TestIterationEnd&passed=" +
+           FormatBool(unit_test.Passed()) + "&elapsed_time=" +
+           StreamableToString(unit_test.elapsed_time()) + "ms");
+  }
+
+  void OnTestCaseStart(const TestCase& test_case) {
+    SendLn(std::string("event=TestCaseStart&name=") + test_case.name());
+  }
+
+  void OnTestCaseEnd(const TestCase& test_case) {
+    SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed())
+           + "&elapsed_time=" + StreamableToString(test_case.elapsed_time())
+           + "ms");
+  }
+
+  void OnTestStart(const TestInfo& test_info) {
+    SendLn(std::string("event=TestStart&name=") + test_info.name());
+  }
+
+  void OnTestEnd(const TestInfo& test_info) {
+    SendLn("event=TestEnd&passed=" +
+           FormatBool((test_info.result())->Passed()) +
+           "&elapsed_time=" +
+           StreamableToString((test_info.result())->elapsed_time()) + "ms");
+  }
+
+  void OnTestPartResult(const TestPartResult& test_part_result) {
+    const char* file_name = test_part_result.file_name();
+    if (file_name == NULL)
+      file_name = "";
+    SendLn("event=TestPartResult&file=" + UrlEncode(file_name) +
+           "&line=" + StreamableToString(test_part_result.line_number()) +
+           "&message=" + UrlEncode(test_part_result.message()));
+  }
+
+ private:
+  // Sends the given message and a newline to the socket.
+  void SendLn(const string& message) { socket_writer_->SendLn(message); }
+
+  // Called at the start of streaming to notify the receiver what
+  // protocol we are using.
+  void Start() { SendLn("gtest_streaming_protocol_version=1.0"); }
+
+  string FormatBool(bool value) { return value ? "1" : "0"; }
+
+  const scoped_ptr<AbstractSocketWriter> socket_writer_;
+
+  GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);
+};  // class StreamingListener
+
+#endif  // GTEST_CAN_STREAM_RESULTS_
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // GTEST_SRC_GTEST_INTERNAL_INL_H_
+#undef GTEST_IMPLEMENTATION_
+
+#if GTEST_OS_WINDOWS
+# define vsnprintf _vsnprintf
+#endif  // GTEST_OS_WINDOWS
+
+namespace testing {
+
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
+// Constants.
+
+// A test whose test case name or test name matches this filter is
+// disabled and not run.
+static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
+
+// A test case whose name matches this filter is considered a death
+// test case and will be run before test cases whose name doesn't
+// match this filter.
+static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
+
+// A test filter that matches everything.
+static const char kUniversalFilter[] = "*";
+
+// The default output file for XML output.
+static const char kDefaultOutputFile[] = "test_detail.xml";
+
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
+namespace internal {
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+const char kStackTraceMarker[] = "\nStack trace:\n";
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+bool g_help_flag = false;
+
+}  // namespace internal
+
+static const char* GetDefaultFilter() {
+  return kUniversalFilter;
+}
+
+GTEST_DEFINE_bool_(
+    also_run_disabled_tests,
+    internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+    "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
+    break_on_failure,
+    internal::BoolFromGTestEnv("break_on_failure", false),
+    "True iff a failed assertion should be a debugger break-point.");
+
+GTEST_DEFINE_bool_(
+    catch_exceptions,
+    internal::BoolFromGTestEnv("catch_exceptions", true),
+    "True iff " GTEST_NAME_
+    " should catch exceptions and treat them as test failures.");
+
+GTEST_DEFINE_string_(
+    color,
+    internal::StringFromGTestEnv("color", "auto"),
+    "Whether to use colors in the output.  Valid values: yes, no, "
+    "and auto.  'auto' means to use colors if the output is "
+    "being sent to a terminal and the TERM environment variable "
+    "is set to a terminal type that supports colors.");
+
+GTEST_DEFINE_string_(
+    filter,
+    internal::StringFromGTestEnv("filter", GetDefaultFilter()),
+    "A colon-separated list of glob (not regex) patterns "
+    "for filtering the tests to run, optionally followed by a "
+    "'-' and a : separated list of negative patterns (tests to "
+    "exclude).  A test is run if it matches one of the positive "
+    "patterns and does not match any of the negative patterns.");
+
+GTEST_DEFINE_bool_(list_tests, false,
+                   "List all tests without running them.");
+
+GTEST_DEFINE_string_(
+    output,
+    internal::StringFromGTestEnv("output", ""),
+    "A format (currently must be \"xml\"), optionally followed "
+    "by a colon and an output file name or directory. A directory "
+    "is indicated by a trailing pathname separator. "
+    "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
+    "If a directory is specified, output files will be created "
+    "within that directory, with file-names based on the test "
+    "executable's name and, if necessary, made unique by adding "
+    "digits.");
+
+GTEST_DEFINE_bool_(
+    print_time,
+    internal::BoolFromGTestEnv("print_time", true),
+    "True iff " GTEST_NAME_
+    " should display elapsed time in text output.");
+
+GTEST_DEFINE_int32_(
+    random_seed,
+    internal::Int32FromGTestEnv("random_seed", 0),
+    "Random number seed to use when shuffling test orders.  Must be in range "
+    "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
+    repeat,
+    internal::Int32FromGTestEnv("repeat", 1),
+    "How many times to repeat each test.  Specify a negative number "
+    "for repeating forever.  Useful for shaking out flaky tests.");
+
+GTEST_DEFINE_bool_(
+    show_internal_stack_frames, false,
+    "True iff " GTEST_NAME_ " should include internal stack frames when "
+    "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(
+    shuffle,
+    internal::BoolFromGTestEnv("shuffle", false),
+    "True iff " GTEST_NAME_
+    " should randomize tests' order on every run.");
+
+GTEST_DEFINE_int32_(
+    stack_trace_depth,
+    internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+    "The maximum number of stack frames to print when an "
+    "assertion fails.  The valid range is 0 through 100, inclusive.");
+
+GTEST_DEFINE_string_(
+    stream_result_to,
+    internal::StringFromGTestEnv("stream_result_to", ""),
+    "This flag specifies the host name and the port number on which to stream "
+    "test results. Example: \"localhost:555\". The flag is effective only on "
+    "Linux.");
+
+GTEST_DEFINE_bool_(
+    throw_on_failure,
+    internal::BoolFromGTestEnv("throw_on_failure", false),
+    "When this flag is specified, a failed assertion will throw an exception "
+    "if exceptions are enabled or exit the program with a non-zero code "
+    "otherwise.");
+
+namespace internal {
+
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG).  Crashes if 'range' is 0 or greater
+// than kMaxRange.
+UInt32 Random::Generate(UInt32 range) {
+  // These constants are the same as are used in glibc's rand(3).
+  state_ = (1103515245U*state_ + 12345U) % kMaxRange;
+
+  GTEST_CHECK_(range > 0)
+      << "Cannot generate a number in the range [0, 0).";
+  GTEST_CHECK_(range <= kMaxRange)
+      << "Generation of a number in [0, " << range << ") was requested, "
+      << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+  // Converting via modulus introduces a bit of downward bias, but
+  // it's simple, and a linear congruential generator isn't too good
+  // to begin with.
+  return state_ % range;
+}
+
+// GTestIsInitialized() returns true iff the user has initialized
+// Google Test.  Useful for catching the user mistake of not initializing
+// Google Test before calling RUN_ALL_TESTS().
+//
+// A user must call testing::InitGoogleTest() to initialize Google
+// Test.  g_init_gtest_count is set to the number of times
+// InitGoogleTest() has been called.  We don't protect this variable
+// under a mutex as it is only accessed in the main thread.
+GTEST_API_ int g_init_gtest_count = 0;
+static bool GTestIsInitialized() { return g_init_gtest_count != 0; }
+
+// Iterates over a vector of TestCases, keeping a running sum of the
+// results of calling a given int-returning method on each.
+// Returns the sum.
+static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
+                               int (TestCase::*method)() const) {
+  int sum = 0;
+  for (size_t i = 0; i < case_list.size(); i++) {
+    sum += (case_list[i]->*method)();
+  }
+  return sum;
+}
+
+// Returns true iff the test case passed.
+static bool TestCasePassed(const TestCase* test_case) {
+  return test_case->should_run() && test_case->Passed();
+}
+
+// Returns true iff the test case failed.
+static bool TestCaseFailed(const TestCase* test_case) {
+  return test_case->should_run() && test_case->Failed();
+}
+
+// Returns true iff test_case contains at least one test that should
+// run.
+static bool ShouldRunTestCase(const TestCase* test_case) {
+  return test_case->should_run();
+}
+
+// AssertHelper constructor.
+AssertHelper::AssertHelper(TestPartResult::Type type,
+                           const char* file,
+                           int line,
+                           const char* message)
+    : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+  delete data_;
+}
+
+// Message assignment, for assertion streaming support.
+void AssertHelper::operator=(const Message& message) const {
+  UnitTest::GetInstance()->
+    AddTestPartResult(data_->type, data_->file, data_->line,
+                      AppendUserMessage(data_->message, message),
+                      UnitTest::GetInstance()->impl()
+                      ->CurrentOsStackTraceExceptTop(1)
+                      // Skips the stack frame for this function itself.
+                      );  // NOLINT
+}
+
+// Mutex for linked pointers.
+GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// Application pathname gotten in InitGoogleTest.
+std::string g_executable_path;
+
+// Returns the current application's name, removing directory path if that
+// is present.
+FilePath GetCurrentExecutableName() {
+  FilePath result;
+
+#if GTEST_OS_WINDOWS
+  result.Set(FilePath(g_executable_path).RemoveExtension("exe"));
+#else
+  result.Set(FilePath(g_executable_path));
+#endif  // GTEST_OS_WINDOWS
+
+  return result.RemoveDirectoryName();
+}
+
+// Functions for processing the gtest_output flag.
+
+// Returns the output format, or "" for normal printed output.
+std::string UnitTestOptions::GetOutputFormat() {
+  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+  if (gtest_output_flag == NULL) return std::string("");
+
+  const char* const colon = strchr(gtest_output_flag, ':');
+  return (colon == NULL) ?
+      std::string(gtest_output_flag) :
+      std::string(gtest_output_flag, colon - gtest_output_flag);
+}
+
+// Returns the name of the requested output file, or the default if none
+// was explicitly specified.
+std::string UnitTestOptions::GetAbsolutePathToOutputFile() {
+  const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+  if (gtest_output_flag == NULL)
+    return "";
+
+  const char* const colon = strchr(gtest_output_flag, ':');
+  if (colon == NULL)
+    return internal::FilePath::ConcatPaths(
+        internal::FilePath(
+            UnitTest::GetInstance()->original_working_dir()),
+        internal::FilePath(kDefaultOutputFile)).string();
+
+  internal::FilePath output_name(colon + 1);
+  if (!output_name.IsAbsolutePath())
+    // TODO(wan@google.com): on Windows \some\path is not an absolute
+    // path (as its meaning depends on the current drive), yet the
+    // following logic for turning it into an absolute path is wrong.
+    // Fix it.
+    output_name = internal::FilePath::ConcatPaths(
+        internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+        internal::FilePath(colon + 1));
+
+  if (!output_name.IsDirectory())
+    return output_name.string();
+
+  internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
+      output_name, internal::GetCurrentExecutableName(),
+      GetOutputFormat().c_str()));
+  return result.string();
+}
+
+// Returns true iff the wildcard pattern matches the string.  The
+// first ':' or '\0' character in pattern marks the end of it.
+//
+// This recursive algorithm isn't very efficient, but is clear and
+// works well enough for matching test names, which are short.
+bool UnitTestOptions::PatternMatchesString(const char *pattern,
+                                           const char *str) {
+  switch (*pattern) {
+    case '\0':
+    case ':':  // Either ':' or '\0' marks the end of the pattern.
+      return *str == '\0';
+    case '?':  // Matches any single character.
+      return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
+    case '*':  // Matches any string (possibly empty) of characters.
+      return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
+          PatternMatchesString(pattern + 1, str);
+    default:  // Non-special character.  Matches itself.
+      return *pattern == *str &&
+          PatternMatchesString(pattern + 1, str + 1);
+  }
+}
+
+bool UnitTestOptions::MatchesFilter(
+    const std::string& name, const char* filter) {
+  const char *cur_pattern = filter;
+  for (;;) {
+    if (PatternMatchesString(cur_pattern, name.c_str())) {
+      return true;
+    }
+
+    // Finds the next pattern in the filter.
+    cur_pattern = strchr(cur_pattern, ':');
+
+    // Returns if no more pattern can be found.
+    if (cur_pattern == NULL) {
+      return false;
+    }
+
+    // Skips the pattern separater (the ':' character).
+    cur_pattern++;
+  }
+}
+
+// Returns true iff the user-specified filter matches the test case
+// name and the test name.
+bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name,
+                                        const std::string &test_name) {
+  const std::string& full_name = test_case_name + "." + test_name.c_str();
+
+  // Split --gtest_filter at '-', if there is one, to separate into
+  // positive filter and negative filter portions
+  const char* const p = GTEST_FLAG(filter).c_str();
+  const char* const dash = strchr(p, '-');
+  std::string positive;
+  std::string negative;
+  if (dash == NULL) {
+    positive = GTEST_FLAG(filter).c_str();  // Whole string is a positive filter
+    negative = "";
+  } else {
+    positive = std::string(p, dash);   // Everything up to the dash
+    negative = std::string(dash + 1);  // Everything after the dash
+    if (positive.empty()) {
+      // Treat '-test1' as the same as '*-test1'
+      positive = kUniversalFilter;
+    }
+  }
+
+  // A filter is a colon-separated list of patterns.  It matches a
+  // test if any pattern in it matches the test.
+  return (MatchesFilter(full_name, positive.c_str()) &&
+          !MatchesFilter(full_name, negative.c_str()));
+}
+
+#if GTEST_HAS_SEH
+// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+// This function is useful as an __except condition.
+int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
+  // Google Test should handle a SEH exception if:
+  //   1. the user wants it to, AND
+  //   2. this is not a breakpoint exception, AND
+  //   3. this is not a C++ exception (VC++ implements them via SEH,
+  //      apparently).
+  //
+  // SEH exception code for C++ exceptions.
+  // (see http://support.microsoft.com/kb/185294 for more information).
+  const DWORD kCxxExceptionCode = 0xe06d7363;
+
+  bool should_handle = true;
+
+  if (!GTEST_FLAG(catch_exceptions))
+    should_handle = false;
+  else if (exception_code == EXCEPTION_BREAKPOINT)
+    should_handle = false;
+  else if (exception_code == kCxxExceptionCode)
+    should_handle = false;
+
+  return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
+}
+#endif  // GTEST_HAS_SEH
+
+}  // namespace internal
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test.  The 'result' parameter specifies where to report the
+// results. Intercepts only failures from the current thread.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+    TestPartResultArray* result)
+    : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
+      result_(result) {
+  Init();
+}
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test.  The 'result' parameter specifies where to report the
+// results.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+    InterceptMode intercept_mode, TestPartResultArray* result)
+    : intercept_mode_(intercept_mode),
+      result_(result) {
+  Init();
+}
+
+void ScopedFakeTestPartResultReporter::Init() {
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+    old_reporter_ = impl->GetGlobalTestPartResultReporter();
+    impl->SetGlobalTestPartResultReporter(this);
+  } else {
+    old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
+    impl->SetTestPartResultReporterForCurrentThread(this);
+  }
+}
+
+// The d'tor restores the test part result reporter used by Google Test
+// before.
+ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
+  internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+  if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+    impl->SetGlobalTestPartResultReporter(old_reporter_);
+  } else {
+    impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
+  }
+}
+
+// Increments the test part result count and remembers the result.
+// This method is from the TestPartResultReporterInterface interface.
+void ScopedFakeTestPartResultReporter::ReportTestPartResult(
+    const TestPartResult& result) {
+  result_->Append(result);
+}
+
+namespace internal {
+
+// Returns the type ID of ::testing::Test.  We should always call this
+// instead of GetTypeId< ::testing::Test>() to get the type ID of
+// testing::Test.  This is to work around a suspected linker bug when
+// using Google Test as a framework on Mac OS X.  The bug causes
+// GetTypeId< ::testing::Test>() to return different values depending
+// on whether the call is from the Google Test framework itself or
+// from user test code.  GetTestTypeId() is guaranteed to always
+// return the same value, as it always calls GetTypeId<>() from the
+// gtest.cc, which is within the Google Test framework.
+TypeId GetTestTypeId() {
+  return GetTypeId<Test>();
+}
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library.  This is solely for testing GetTestTypeId().
+extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
+
+// This predicate-formatter checks that 'results' contains a test part
+// failure of the given type and that the failure message contains the
+// given substring.
+AssertionResult HasOneFailure(const char* /* results_expr */,
+                              const char* /* type_expr */,
+                              const char* /* substr_expr */,
+                              const TestPartResultArray& results,
+                              TestPartResult::Type type,
+                              const string& substr) {
+  const std::string expected(type == TestPartResult::kFatalFailure ?
+                        "1 fatal failure" :
+                        "1 non-fatal failure");
+  Message msg;
+  if (results.size() != 1) {
+    msg << "Expected: " << expected << "\n"
+        << "  Actual: " << results.size() << " failures";
+    for (int i = 0; i < results.size(); i++) {
+      msg << "\n" << results.GetTestPartResult(i);
+    }
+    return AssertionFailure() << msg;
+  }
+
+  const TestPartResult& r = results.GetTestPartResult(0);
+  if (r.type() != type) {
+    return AssertionFailure() << "Expected: " << expected << "\n"
+                              << "  Actual:\n"
+                              << r;
+  }
+
+  if (strstr(r.message(), substr.c_str()) == NULL) {
+    return AssertionFailure() << "Expected: " << expected << " containing \""
+                              << substr << "\"\n"
+                              << "  Actual:\n"
+                              << r;
+  }
+
+  return AssertionSuccess();
+}
+
+// The constructor of SingleFailureChecker remembers where to look up
+// test part results, what type of failure we expect, and what
+// substring the failure message should contain.
+SingleFailureChecker:: SingleFailureChecker(
+    const TestPartResultArray* results,
+    TestPartResult::Type type,
+    const string& substr)
+    : results_(results),
+      type_(type),
+      substr_(substr) {}
+
+// The destructor of SingleFailureChecker verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring.  If that's not the case, a
+// non-fatal failure will be generated.
+SingleFailureChecker::~SingleFailureChecker() {
+  EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);
+}
+
+DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
+    UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
+    const TestPartResult& result) {
+  unit_test_->current_test_result()->AddTestPartResult(result);
+  unit_test_->listeners()->repeater()->OnTestPartResult(result);
+}
+
+DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
+    UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
+    const TestPartResult& result) {
+  unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
+}
+
+// Returns the global test part result reporter.
+TestPartResultReporterInterface*
+UnitTestImpl::GetGlobalTestPartResultReporter() {
+  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+  return global_test_part_result_repoter_;
+}
+
+// Sets the global test part result reporter.
+void UnitTestImpl::SetGlobalTestPartResultReporter(
+    TestPartResultReporterInterface* reporter) {
+  internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+  global_test_part_result_repoter_ = reporter;
+}
+
+// Returns the test part result reporter for the current thread.
+TestPartResultReporterInterface*
+UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
+  return per_thread_test_part_result_reporter_.get();
+}
+
+// Sets the test part result reporter for the current thread.
+void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
+    TestPartResultReporterInterface* reporter) {
+  per_thread_test_part_result_reporter_.set(reporter);
+}
+
+// Gets the number of successful test cases.
+int UnitTestImpl::successful_test_case_count() const {
+  return CountIf(test_cases_, TestCasePassed);
+}
+
+// Gets the number of failed test cases.
+int UnitTestImpl::failed_test_case_count() const {
+  return CountIf(test_cases_, TestCaseFailed);
+}
+
+// Gets the number of all test cases.
+int UnitTestImpl::total_test_case_count() const {
+  return static_cast<int>(test_cases_.size());
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTestImpl::test_case_to_run_count() const {
+  return CountIf(test_cases_, ShouldRunTestCase);
+}
+
+// Gets the number of successful tests.
+int UnitTestImpl::successful_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
+}
+
+// Gets the number of failed tests.
+int UnitTestImpl::failed_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTestImpl::reportable_disabled_test_count() const {
+  return SumOverTestCaseList(test_cases_,
+                             &TestCase::reportable_disabled_test_count);
+}
+
+// Gets the number of disabled tests.
+int UnitTestImpl::disabled_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTestImpl::reportable_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count);
+}
+
+// Gets the number of all tests.
+int UnitTestImpl::total_test_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
+}
+
+// Gets the number of tests that should run.
+int UnitTestImpl::test_to_run_count() const {
+  return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag.  The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
+  (void)skip_count;
+  return "";
+}
+
+// Returns the current time in milliseconds.
+TimeInMillis GetTimeInMillis() {
+#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
+  // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
+  // http://analogous.blogspot.com/2005/04/epoch.html
+  const TimeInMillis kJavaEpochToWinFileTimeDelta =
+    static_cast<TimeInMillis>(116444736UL) * 100000UL;
+  const DWORD kTenthMicrosInMilliSecond = 10000;
+
+  SYSTEMTIME now_systime;
+  FILETIME now_filetime;
+  ULARGE_INTEGER now_int64;
+  // TODO(kenton@google.com): Shouldn't this just use
+  //   GetSystemTimeAsFileTime()?
+  GetSystemTime(&now_systime);
+  if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
+    now_int64

<TRUNCATED>

[5/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Posted by ss...@apache.org.
http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/service/sparql.proto
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/service/sparql.proto b/libraries/ostrich/backend/service/sparql.proto
new file mode 100644
index 0000000..892539b
--- /dev/null
+++ b/libraries/ostrich/backend/service/sparql.proto
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+syntax = "proto3";
+
+package marmotta.sparql.proto;
+
+option java_package = "org.apache.marmotta.ostrich.client.proto";
+
+import "model.proto";
+
+// SPARQL request consisting of a single query string.
+message SparqlRequest {
+    string query = 1;
+}
+
+// SPARQL response row, containing a set of bindings.
+message SparqlResponse {
+    message Binding {
+        string variable = 1;
+        marmotta.rdf.proto.Value value = 2;
+    }
+
+    repeated Binding binding = 1;
+}
+
+// Interface describing services that allow to evaluate sparql queries.
+service SparqlService {
+    // Execute a SPARQL 1.1 tuple query and stream back the results.
+    rpc TupleQuery(SparqlRequest) returns (stream SparqlResponse);
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sharding/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sharding/CMakeLists.txt b/libraries/ostrich/backend/sharding/CMakeLists.txt
new file mode 100644
index 0000000..9afb0ec
--- /dev/null
+++ b/libraries/ostrich/backend/sharding/CMakeLists.txt
@@ -0,0 +1,10 @@
+include_directories(
+        .. ${CMAKE_CURRENT_BINARY_DIR}/..
+        ${CMAKE_CURRENT_BINARY_DIR}/../model ${CMAKE_CURRENT_BINARY_DIR}/../service)
+
+add_executable(marmotta_sharding sharding.cc sharding.h server.cc)
+target_link_libraries(marmotta_sharding
+        marmotta_util marmotta_model marmotta_service
+        ${LevelDB_LIBRARY} ${GFLAGS_LIBRARY} ${GLOG_LIBRARY}
+        ${CMAKE_THREAD_LIBS_INIT} ${PROTOBUF_LIBRARIES} ${GRPC_LIBRARIES} ${Tcmalloc_LIBRARIES})
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sharding/server.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sharding/server.cc b/libraries/ostrich/backend/sharding/server.cc
new file mode 100644
index 0000000..cf7aa6e
--- /dev/null
+++ b/libraries/ostrich/backend/sharding/server.cc
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <iostream>
+
+#include <gflags/gflags.h>
+#include <glog/logging.h>
+
+#include "util/split.h"
+#include "sharding/sharding.h"
+
+using grpc::Status;
+using grpc::Server;
+using grpc::ServerBuilder;
+
+
+DEFINE_string(host, "0.0.0.0", "address/name of server to access.");
+DEFINE_string(port, "10000", "port of server to access.");
+DEFINE_string(backends, "",
+              "comma-separated list of host:port pairs of backends to use");
+
+std::unique_ptr<Server> server;
+
+void stopServer(int signal) {
+    if (server.get() != nullptr) {
+        LOG(INFO) << "Persistence Server shutting down";
+        server->Shutdown();
+    }
+}
+
+int main(int argc, char** argv) {
+    // Initialize Google's logging library.
+    google::InitGoogleLogging(argv[0]);
+    google::ParseCommandLineFlags(&argc, &argv, true);
+
+    marmotta::sharding::ShardingService service(
+            marmotta::util::split(FLAGS_backends, ','));
+
+    ServerBuilder builder;
+    builder.AddListeningPort(FLAGS_host + ":" + FLAGS_port, grpc::InsecureServerCredentials());
+    builder.RegisterService(&service);
+
+    server = builder.BuildAndStart();
+    LOG(INFO) << "Sharding Server listening on " << FLAGS_host << ":" << FLAGS_port << std::endl;
+
+    signal(SIGINT, stopServer);
+    signal(SIGTERM, stopServer);
+
+    server->Wait();
+
+    return 0;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sharding/sharding.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sharding/sharding.cc b/libraries/ostrich/backend/sharding/sharding.cc
new file mode 100644
index 0000000..0a865c5
--- /dev/null
+++ b/libraries/ostrich/backend/sharding/sharding.cc
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cstdlib>
+#include <thread>
+#include <unordered_set>
+#include <glog/logging.h>
+
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/security/credentials.h>
+#include <grpc++/support/sync_stream.h>
+
+#include "sharding/sharding.h"
+#include "model/rdf_model.h"
+#include "model/rdf_operators.h"
+
+using grpc::Channel;
+using grpc::ClientContext;
+using grpc::ClientReader;
+using grpc::ClientReaderWriter;
+using grpc::ClientWriter;
+using grpc::Status;
+using grpc::ServerContext;
+using grpc::ServerReader;
+using grpc::ServerWriter;
+using marmotta::rdf::proto::Namespace;
+using marmotta::rdf::proto::Resource;
+using marmotta::rdf::proto::Statement;
+using marmotta::service::proto::ContextRequest;
+using marmotta::service::proto::SailService;
+using marmotta::service::proto::UpdateRequest;
+using marmotta::service::proto::UpdateResponse;
+using google::protobuf::Empty;
+using google::protobuf::Int64Value;
+
+namespace marmotta {
+namespace sharding {
+
+// A templated fanout function, forwarding the same request to all backends and collecting
+// Int64Value responses by summing them up.
+template<typename Request,
+        Status (SailService::Stub::*ClientMethod)(ClientContext*, const Request&, Int64Value*)>
+Status Fanout(const Request& request, ShardingService::ChannelList &backends, Int64Value *result) {
+    auto start = std::chrono::steady_clock::now();
+    std::vector<std::thread> threads;
+    std::vector<Status> statuses(backends.size());
+
+    int64_t r = 0;
+    for (int i=0; i<backends.size(); i++) {
+        threads.push_back(std::thread([i, &backends, &statuses, &request, &r]() {
+            ClientContext localctx;
+            Int64Value response;
+            auto stub = svc::SailService::NewStub(backends[i]);
+            statuses[i] = ((*stub).*ClientMethod)(&localctx, request, &response);
+            r += response.value();
+        }));
+    }
+
+    // need to wait until all are completed now.
+    for (auto& t : threads) {
+        t.join();
+    }
+
+    result->set_value(r);
+
+    for (auto s : statuses) {
+        if (!s.ok())
+            return s;
+    }
+
+    DLOG(INFO) << "Fanout operation done (time="
+               << std::chrono::duration <double, std::milli> (
+            std::chrono::steady_clock::now() - start).count()
+               << "ms).";
+
+    return Status::OK;
+};
+
+ShardingService::ShardingService(std::vector<std::string> backends) : backends(backends) {
+    for (const std::string& server : backends) {
+        LOG(INFO) << "Establishing channel to " << server;
+        channels.push_back(grpc::CreateChannel(server, grpc::InsecureChannelCredentials()));
+    }
+}
+
+grpc::Status ShardingService::AddNamespaces(
+        ServerContext *context, ServerReader<Namespace> *reader, Int64Value *result) {
+
+    std::vector<ClientContext> contexts(backends.size());
+    std::vector<Int64Value> stats(backends.size());
+
+    StubList stubs;
+    WriterList <Namespace> writers;
+
+    for (int i=0; i<backends.size(); i++) {
+        stubs.push_back(makeStub(i));
+        writers.push_back(stubs.back()->AddNamespaces(&contexts[i], &stats[i]));
+    }
+
+    // Iterate over all namespaces and schedule a write task.
+    Namespace ns;
+    while (reader->Read(&ns)) {
+        DLOG(INFO) << "Adding namespace " << ns.DebugString();
+        for (auto& w : writers) {
+            w->Write(ns);
+        }
+    }
+
+    for (auto& w : writers) {
+        w->WritesDone();
+        w->Finish();
+    }
+
+    result->set_value(stats[0].value());
+
+    return Status::OK;
+}
+
+
+Status ShardingService::GetNamespace(
+        ServerContext *context, const Namespace *pattern, Namespace *result) {
+    int bucket = rand() % backends.size();
+
+    auto stub = makeStub(bucket);
+
+    ClientContext ctx;
+    return stub->GetNamespace(&ctx, *pattern, result);
+}
+
+Status ShardingService::GetNamespaces(
+        ServerContext *context, const Empty *ignored, ServerWriter<Namespace> *result) {
+    int bucket = rand() % backends.size();
+
+    auto stub = makeStub(bucket);
+
+    ClientContext ctx;
+    auto reader = stub->GetNamespaces(&ctx, *ignored);
+
+    Namespace ns;
+    while (reader->Read(&ns)) {
+        result->Write(ns);
+    }
+    return reader->Finish();
+}
+
+Status ShardingService::AddStatements(
+        ServerContext *context, ServerReader<Statement> *reader, Int64Value *result) {
+    std::vector<ClientContext> contexts(backends.size());
+    std::vector<Int64Value> responses(backends.size());
+
+    StubList stubs;
+    WriterList<Statement> writers;
+    for (int i=0; i<backends.size(); i++) {
+        stubs.push_back(makeStub(i));
+        writers.push_back(Writer<Statement>(
+                stubs.back()->AddStatements(&contexts[i], &responses[i])));
+    }
+
+    std::hash<Statement> stmt_hash;
+
+    Statement stmt;
+    while (reader->Read(&stmt)) {
+            size_t bucket = stmt_hash(stmt) % backends.size();
+
+            DLOG(INFO) << "Shard " << bucket << ": Adding statement " << stmt.DebugString();
+            writers[bucket]->Write(stmt);
+    }
+    for (auto& w : writers) {
+        w->WritesDone();
+        w->Finish();
+    }
+
+    for (auto& r : responses) {
+        result->set_value(result->value() + r.value());
+    }
+
+    return Status::OK;
+}
+
+Status ShardingService::GetStatements(
+        ServerContext *context, const Statement *pattern, ServerWriter<Statement> *result) {
+    auto start = std::chrono::steady_clock::now();
+    DLOG(INFO) << "Get statements matching pattern " << pattern->DebugString();
+
+    std::vector<std::thread> threads;
+    std::mutex mutex;
+
+    for (int i=0; i<backends.size(); i++) {
+        threads.push_back(std::thread([i, this, &mutex, result, pattern]() {
+            DLOG(INFO) << "Shard " << i << ": Getting statements.";
+            ClientContext localctx;
+            auto stub = makeStub(i);
+            auto reader = stub->GetStatements(&localctx, *pattern);
+
+            int64_t count = 0;
+            Statement stmt;
+            bool run = true;
+            while (run && reader->Read(&stmt)) {
+                std::lock_guard<std::mutex> guard(mutex);
+                run = result->Write(stmt);
+                count++;
+            }
+            DLOG(INFO) << "Shard " << i << ": Getting statements finished (" << count << " results)";
+        }));
+    }
+
+    for (auto& t : threads) {
+        t.join();
+    }
+
+    DLOG(INFO) << "Get statements done (time="
+               << std::chrono::duration <double, std::milli> (
+            std::chrono::steady_clock::now() - start).count()
+               << "ms).";
+
+    return Status::OK;
+}
+
+Status ShardingService::RemoveStatements(
+        ServerContext *context, const Statement *pattern, Int64Value *result) {
+    DLOG(INFO) << "Fanout: Remove statements matching pattern " << pattern->DebugString();
+
+    return Fanout<Statement, &SailService::Stub::RemoveStatements>(*pattern, channels, result);
+}
+
+
+Status ShardingService::Update(
+        ServerContext *context, ServerReader<UpdateRequest> *reader, UpdateResponse *result) {
+    std::vector<ClientContext> contexts(backends.size());
+    std::vector<UpdateResponse> responses(backends.size());
+
+    StubList stubs;
+    WriterList <UpdateRequest> writers;
+
+    for (int i=0; i<backends.size(); i++) {
+        stubs.push_back(makeStub(i));
+        writers.push_back(stubs.back()->Update(&contexts[i], &responses[i]));
+    }
+
+    std::hash<Statement> stmt_hash;
+    std::hash<Namespace> ns_hash;
+
+    UpdateRequest req;
+    std::string buf;
+    while (reader->Read(&req)) {
+        if (req.has_stmt_added()) {
+            size_t bucket = stmt_hash(req.stmt_added()) % backends.size();
+
+            DLOG(INFO) << "Shard " << bucket << ": Add statement request " << req.DebugString();
+            writers[bucket]->Write(req);
+        } else {
+            DLOG(INFO) << "Fanout update request " << req.DebugString();
+            for (auto& w : writers) {
+                w->Write(req);
+            }
+        }
+    }
+    for (auto& w : writers) {
+        w->WritesDone();
+        w->Finish();
+    }
+
+    for (auto& r : responses) {
+        result->set_added_namespaces(result->added_namespaces() + r.added_namespaces());
+        result->set_removed_namespaces(result->removed_namespaces() + r.removed_namespaces());
+        result->set_added_statements(result->added_statements() + r.added_statements());
+        result->set_removed_statements(result->removed_statements() + r.removed_statements());
+    }
+
+
+    return Status::OK;
+}
+
+Status ShardingService::Clear(
+        ServerContext *context, const ContextRequest *contexts, Int64Value *result) {
+    DLOG(INFO) << "Fanout: Clear contexts matching pattern " << contexts->DebugString();
+
+    return Fanout<ContextRequest, &SailService::Stub::Clear>(*contexts, channels, result);
+}
+
+Status ShardingService::Size(
+        ServerContext *context, const ContextRequest *contexts, Int64Value *result) {
+    DLOG(INFO) << "Fanout: Computing size of contexts matching pattern " << contexts->DebugString();
+
+    return Fanout<ContextRequest, &SailService::Stub::Size>(*contexts, channels, result);
+}
+
+std::unique_ptr<SailService::Stub> ShardingService::makeStub(int i) {
+    return SailService::NewStub(channels[i]);
+}
+
+Status ShardingService::GetContexts(
+        ServerContext *context, const Empty *ignored, ServerWriter<Resource> *result) {
+    std::unordered_set<Resource> contexts;
+    std::vector<std::thread> threads;
+    std::mutex mutex;
+
+    for (int i=0; i<backends.size(); i++) {
+        threads.push_back(std::thread([i, &mutex, &contexts, this](){
+            ClientContext ctx;
+            auto stub = makeStub(i);
+            auto reader = stub->GetContexts(&ctx, Empty());
+
+            Resource r;
+            while (reader->Read(&r)) {
+                std::lock_guard<std::mutex> guard(mutex);
+                contexts.insert(r);
+            }
+            reader->Finish();
+        }));
+    }
+
+    for (auto c : contexts) {
+        result->Write(c);
+    }
+    return Status::OK;
+}
+}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sharding/sharding.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sharding/sharding.h b/libraries/ostrich/backend/sharding/sharding.h
new file mode 100644
index 0000000..012bbf0
--- /dev/null
+++ b/libraries/ostrich/backend/sharding/sharding.h
@@ -0,0 +1,174 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Implementation of a proxy service doing hash-based sharding of statements
+ * for storage and retrieval. The shards are passed as vector of host:port
+ * pairs to the constructor.
+ */
+#ifndef MARMOTTA_SHARDING_H
+#define MARMOTTA_SHARDING_H
+
+#include <vector>
+#include <string>
+
+#include <grpc/grpc.h>
+#include <grpc++/server.h>
+#include <grpc++/server_builder.h>
+#include <grpc++/server_context.h>
+#include <grpc++/security/server_credentials.h>
+
+#include <google/protobuf/wrappers.pb.h>
+
+#include "service/sail.pb.h"
+#include "service/sail.grpc.pb.h"
+#include "model/model.pb.h"
+
+
+namespace marmotta {
+namespace sharding {
+
+namespace svc = marmotta::service::proto;
+
+/**
+ * Implementation of a proxy service doing hash-based sharding of statements
+ * for storage and retrieval. The shards are passed as vector of host:port
+ * pairs to the constructor.
+ */
+class ShardingService : public svc::SailService::Service {
+ public:
+
+    /**
+     * Instantiate new sharding service, connecting to the backends provided
+     * as argument (vector of host:port pairs).
+     */
+    ShardingService(std::vector<std::string> backends);
+
+    /**
+     * Add namespaces. Since namespaces are potentially needed in all backends,
+     * they will be added to all.
+     */
+    grpc::Status AddNamespaces(grpc::ServerContext* context,
+                               grpc::ServerReader<rdf::proto::Namespace>* reader,
+                               google::protobuf::Int64Value* result) override;
+
+
+    /**
+     * Get the namespace matching the pattern using a random server.
+     */
+    grpc::Status GetNamespace(grpc::ServerContext* context,
+                              const rdf::proto::Namespace* pattern,
+                              rdf::proto::Namespace* result) override;
+
+    /**
+     * Get all namespaces matching the pattern using a random server.
+     */
+    grpc::Status GetNamespaces(grpc::ServerContext* context,
+                               const google::protobuf::Empty* ignored,
+                               grpc::ServerWriter<rdf::proto::Namespace>* result) override;
+
+    /**
+     * Add a sequence of statements. Computes a hash over the serialized
+     * proto message modulo the number of backends to determine which backend
+     * to write to.
+     */
+    grpc::Status AddStatements(grpc::ServerContext* context,
+                               grpc::ServerReader<rdf::proto::Statement>* reader,
+                               google::protobuf::Int64Value* result) override;
+
+    /**
+     * Retrieve statements matching a certain pattern. Queries all backends in
+     * parallel and multiplexes the results.
+     */
+    grpc::Status GetStatements(grpc::ServerContext* context,
+                               const rdf::proto::Statement* pattern,
+                               grpc::ServerWriter<rdf::proto::Statement>* result) override;
+
+    /**
+     * Remove statements matching a certain pattern. Forwards the request to
+     * all backends in parallel.
+     */
+    grpc::Status RemoveStatements(grpc::ServerContext* context,
+                                  const rdf::proto::Statement* pattern,
+                                  google::protobuf::Int64Value* result) override;
+
+    /**
+     * Process a sequence of updates. For statement updates, computes a hash over the
+     * serialized proto message modulo the number of backends to determine which backend
+     * to write to. For namespace updates, writes to all backends.
+     */
+    grpc::Status Update(grpc::ServerContext* context,
+                        grpc::ServerReader<service::proto::UpdateRequest>* reader,
+                        service::proto::UpdateResponse* result) override;
+
+    /**
+     * Retrieve contexts from all backends.
+     */
+    grpc::Status GetContexts(grpc::ServerContext* context,
+                             const google::protobuf::Empty* ignored,
+                             grpc::ServerWriter<rdf::proto::Resource>* result) override;
+
+    /**
+     * Clear all statements matching the given context request. Forwards the
+     * request to all backends in parallel.
+     */
+    grpc::Status Clear(grpc::ServerContext* context,
+                       const svc::ContextRequest* contexts,
+                       google::protobuf::Int64Value* result) override;
+
+    /**
+     * Get the size of the combined repository. Forwards the request to all
+     * backends in parallel and adds the results.
+     */
+    grpc::Status Size(grpc::ServerContext* context,
+                      const svc::ContextRequest* contexts,
+                      google::protobuf::Int64Value* result) override;
+
+
+    using StubType = std::unique_ptr<svc::SailService::Stub>;
+    using StubList = std::vector<StubType>;
+
+    using ChannelType = std::shared_ptr<grpc::Channel>;
+    using ChannelList = std::vector<ChannelType>;
+
+    template <class T>
+    using Writer = std::unique_ptr<grpc::ClientWriter<T>>;
+
+    template <class T>
+    using WriterList = std::vector<Writer<T>>;
+
+ private:
+    // Vector holding the RPC stubs to the backends.
+    std::vector<std::string> backends;
+
+    // Keep a list of channels open, initialised on construction.
+    ChannelList channels;
+
+    // Hash function, computed over binary representation of statement message,
+    // modulo the number of backends.
+    std::hash<std::string> hash_fn;
+
+    // Make a stub for the backend with the given index.
+    StubType makeStub(int backend);
+};
+
+
+}  // namespace sharding
+}  // namespace marmotta
+
+#endif //MARMOTTA_SHARDING_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sparql/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sparql/CMakeLists.txt b/libraries/ostrich/backend/sparql/CMakeLists.txt
new file mode 100644
index 0000000..9bb00ef
--- /dev/null
+++ b/libraries/ostrich/backend/sparql/CMakeLists.txt
@@ -0,0 +1,7 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/.. ${RAPTOR_INCLUDE_DIR}/raptor2)
+
+add_library(marmotta_sparql
+        rasqal_model.cc rasqal_model.h rasqal_adapter.cc rasqal_adapter.h)
+target_link_libraries(marmotta_sparql marmotta_model ${CMAKE_THREAD_LIBS_INIT}
+        ${PROTOBUF_LIBRARIES} ${GRPC_LIBRARIES}
+        ${RASQAL_LIBRARIES} ${RAPTOR_LIBRARY} ${GLOG_LIBRARY})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sparql/rasqal_adapter.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sparql/rasqal_adapter.cc b/libraries/ostrich/backend/sparql/rasqal_adapter.cc
new file mode 100644
index 0000000..7fc3cad
--- /dev/null
+++ b/libraries/ostrich/backend/sparql/rasqal_adapter.cc
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <functional>
+
+#include <raptor2/raptor2.h>
+#include <rasqal/rasqal.h>
+#include <glog/logging.h>
+#include <chrono>
+
+#include "sparql/rasqal_adapter.h"
+#include "sparql/rasqal_model.h"
+
+namespace marmotta {
+namespace sparql {
+
+namespace {
+
+#ifndef NDEBUG
+// Format binding names for debugging
+std::string formatVariables(rasqal_variable *bindings[4]) {
+    std::string result = "(";
+    for (int i=0; i<4; i++) {
+        if (bindings[i] != nullptr) {
+            result += (const char*)bindings[i]->name;
+            result += " ";
+        } else {
+            result += "_ ";
+        }
+    }
+    result += ")";
+    return result;
+}
+
+std::string formatBindings(const std::map<std::string, rdf::Value>& bindings) {
+    std::string result="(";
+    for (auto it=bindings.cbegin(); it != bindings.cend(); it++) {
+        result += it->first + "=" + it->second.as_turtle() + " ";
+    }
+    result += ")";
+    return result;
+}
+#endif
+
+void log_handler(void *user_data, raptor_log_message *message) {
+    LOG(ERROR) << "SPARQL Error(" << message->code << "): " << message->text;
+}
+
+// Bind the current statement to the variables configured in the triple match.
+rasqal_triple_parts bind_match(
+        struct rasqal_triples_match_s *rtm, void *user_data,
+        rasqal_variable *bindings[4], rasqal_triple_parts parts) {
+    StatementIterator *it = (StatementIterator *) rtm->user_data;
+
+    int r = 0;
+
+#ifndef NDEBUG
+    DLOG(INFO) << "Binding variables " << formatVariables(bindings) << " for statement " << (*it)->as_turtle();
+#endif
+    if ((parts & RASQAL_TRIPLE_SUBJECT) != 0) {
+        rasqal_variable_set_value(bindings[0], rasqal::AsLiteral(rtm->world, (*it)->getSubject()));
+        r |= RASQAL_TRIPLE_SUBJECT;
+    }
+    if ((parts & RASQAL_TRIPLE_PREDICATE) != 0) {
+        rasqal_variable_set_value(bindings[1], rasqal::AsLiteral(rtm->world, (*it)->getPredicate()));
+        r |= RASQAL_TRIPLE_PREDICATE;
+    }
+    if ((parts & RASQAL_TRIPLE_OBJECT) != 0) {
+        rasqal_variable_set_value(bindings[2], rasqal::AsLiteral(rtm->world, (*it)->getObject()));
+        r |= RASQAL_TRIPLE_OBJECT;
+    }
+    if ((parts & RASQAL_TRIPLE_ORIGIN) != 0) {
+        rasqal_variable_set_value(bindings[3], rasqal::AsLiteral(rtm->world, (*it)->getContext()));
+        r |= RASQAL_TRIPLE_ORIGIN;
+    }
+
+    return (rasqal_triple_parts) r;
+}
+
+// Increment the iterator contained in the triple match user data.
+void next_match(struct rasqal_triples_match_s *rtm, void *user_data) {
+    DLOG(INFO) << "Next result";
+    StatementIterator *it = (StatementIterator *) rtm->user_data;
+    ++(*it);
+}
+
+// Return true in case the iterator has no next element.
+int is_end(struct rasqal_triples_match_s *rtm, void *user_data) {
+    StatementIterator *it = (StatementIterator *) rtm->user_data;
+    return !it->hasNext();
+}
+
+// Delete iterator and make sure its destructors are called in the C++ way.
+void finish(struct rasqal_triples_match_s *rtm, void *user_data) {
+    DLOG(INFO) << "Finish result iteration.";
+    StatementIterator *it = (StatementIterator *) rtm->user_data;
+    delete it;
+    rtm->user_data = nullptr;
+}
+
+// Init a Rasqal triples match using the interator returned by GetStatements()
+int init_triples_match(
+        rasqal_triples_match *rtm, struct rasqal_triples_source_s *rts,
+        void *user_data, rasqal_triple_meta *m, rasqal_triple *t) {
+    DLOG(INFO) << "Get statements (exact=" << rtm->is_exact << ", finished=" << rtm->finished << ")";
+
+    SparqlService *self = (SparqlService *) *(void**)user_data;
+
+    std::unique_ptr<rdf::Resource> s = nullptr;
+    std::unique_ptr<rdf::URI> p = nullptr;
+    std::unique_ptr<rdf::Value> o = nullptr;
+    std::unique_ptr<rdf::Resource> c = nullptr;
+
+    rasqal_variable* var;
+    if ((var=rasqal_literal_as_variable(t->subject))) {
+        m->bindings[0] = var;
+        if (var->value) {
+            s.reset(new rdf::Resource(rasqal::ConvertResource(var->value)));
+        }
+    } else {
+        s.reset(new rdf::Resource(rasqal::ConvertResource(t->subject)));
+    }
+
+    if ((var=rasqal_literal_as_variable(t->predicate))) {
+        m->bindings[1] = var;
+        if (var->value) {
+            p.reset(new rdf::URI(rasqal::ConvertURI(var->value)));
+        }
+    } else {
+        p.reset(new rdf::URI(rasqal::ConvertURI(t->predicate)));
+    }
+
+    if ((var=rasqal_literal_as_variable(t->object))) {
+        m->bindings[2] = var;
+        if (var->value) {
+            o.reset(new rdf::Value(rasqal::ConvertValue(var->value)));
+        }
+    } else {
+        o.reset(new rdf::Value(rasqal::ConvertValue(t->object)));
+    }
+
+    if(t->origin) {
+        if ((var=rasqal_literal_as_variable(t->origin))) {
+            m->bindings[3] = var;
+            if (var->value) {
+                c.reset(new rdf::Resource(rasqal::ConvertResource(var->value)));
+            }
+        } else {
+            c.reset(new rdf::Resource(rasqal::ConvertResource(t->origin)));
+        }
+    }
+
+    // Store C++ iterator in user_data and take ownership
+    auto it = self->Source().GetStatements(s.get(), p.get(), o.get(), c.get());
+    rtm->user_data = it.release();
+
+    rtm->bind_match = bind_match;
+    rtm->next_match = next_match;
+    rtm->is_end = is_end;
+    rtm->finish = finish;
+
+    return 0;
+}
+
+// Check for triple presence, using the SparqlService::HasStatement method.
+int triple_present(
+        struct rasqal_triples_source_s *rts, void *user_data, rasqal_triple *t) {
+    DLOG(INFO) << "Check triple";
+
+    auto s = rasqal::ConvertResource(t->subject);
+    auto p = rasqal::ConvertURI(t->predicate);
+    auto o = rasqal::ConvertValue(t->object);
+
+    SparqlService *self = (SparqlService *) *(void**)user_data;
+    if ((t->flags & RASQAL_TRIPLE_ORIGIN) != 0) {
+        auto c = rasqal::ConvertResource(t->origin);
+
+        return self->Source().HasStatement(&s, &p, &o, &c);
+    } else {
+        return self->Source().HasStatement(&s, &p, &o, nullptr);
+    }
+}
+
+void free_triples_source(void *user_data) {
+    DLOG(INFO) << "Free triples source";
+}
+
+// Init a Rasqal triple source, wrapping the Marmotta TripleSource (factory_user_data)
+int new_triples_source(rasqal_query* query, void *factory_user_data, void *user_data, rasqal_triples_source* rts) {
+    DLOG(INFO) << "Init triples source";
+
+    rts->version = 1;
+    rts->init_triples_match = init_triples_match;
+    rts->triple_present = triple_present;
+    rts->free_triples_source = free_triples_source;
+    rts->user_data = (void**)malloc(sizeof(void*));
+    *((void**)rts->user_data) = factory_user_data;
+
+    return 0;
+}
+
+// Init a Rasqal triple source, wrapping the Marmotta TripleSource (factory_user_data)
+int init_triples_source(
+        rasqal_query *query, void *factory_user_data, void *user_data,
+        rasqal_triples_source *rts, rasqal_triples_error_handler handler) {
+    return new_triples_source(query, factory_user_data, user_data, rts);
+}
+
+// Init a Rasqal triple factory
+int init_factory(rasqal_triples_source_factory *factory) {
+    DLOG(INFO) << "Init query factory";
+    factory->version = 1;
+    factory->new_triples_source = new_triples_source;
+    factory->init_triples_source = init_triples_source;
+    return 0;
+}
+}  // namespace
+
+
+SparqlService::SparqlService(std::unique_ptr<TripleSource> source)
+        : source(std::move(source)) {
+    // Initialise Rasqal world.
+    world = rasqal_new_world();
+    rasqal_world_open(world);
+
+    // Redirect logging output to glog.
+    rasqal_world_set_log_handler(world, nullptr, log_handler);
+
+    // Register our triple source with Rasqal, providing the relevant wrappers.
+    rasqal_set_triples_source_factory(world, &init_factory, this);
+}
+
+SparqlService::~SparqlService() {
+    rasqal_free_world(world);
+}
+
+void SparqlService::TupleQuery(const std::string& query, std::function<bool(const RowType&)> row_handler) {
+    auto start = std::chrono::steady_clock::now();
+    LOG(INFO) << "Starting SPARQL tuple query.";
+
+    auto q = rasqal_new_query(world, "sparql11-query", nullptr);
+    auto base = raptor_new_uri(rasqal_world_get_raptor(world), (const unsigned char*)"http://example.com");
+    if (rasqal_query_prepare(q, (const unsigned char*)query.c_str(), base) != 0) {
+        raptor_free_uri(base);
+        rasqal_free_query(q);
+        throw SparqlException("Query preparation failed", query);
+    }
+
+    bool next = true;
+    auto r = rasqal_query_execute(q);
+    if (r == nullptr) {
+        raptor_free_uri(base);
+        rasqal_free_query(q);
+        throw SparqlException("Query execution failed", query);
+    }
+
+    int rowcount = 0;
+    while (next && rasqal_query_results_finished(r) == 0) {
+        RowType row;
+        for (int i=0; i<rasqal_query_results_get_bindings_count(r); i++) {
+            row[(const char*)rasqal_query_results_get_binding_name(r,i)] =
+                    rasqal::ConvertValue(rasqal_query_results_get_binding_value(r,i));
+        }
+#ifndef NDEBUG
+        DLOG(INFO) << "Row " << rowcount << ": " << formatBindings(row);
+#endif
+
+        next = row_handler(row);
+        rasqal_query_results_next(r);
+
+        rowcount++;
+    }
+
+    rasqal_free_query_results(r);
+    rasqal_free_query(q);
+    raptor_free_uri(base);
+
+    LOG(INFO) << "SPARQL query finished (time=" << std::chrono::duration <double, std::milli> (
+            std::chrono::steady_clock::now() - start).count() << "ms).";
+
+}
+
+}  // namespace sparql
+}  // namespace marmotta
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sparql/rasqal_adapter.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sparql/rasqal_adapter.h b/libraries/ostrich/backend/sparql/rasqal_adapter.h
new file mode 100644
index 0000000..48ce127
--- /dev/null
+++ b/libraries/ostrich/backend/sparql/rasqal_adapter.h
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_RASQAL_ADAPTER_H
+#define MARMOTTA_RASQAL_ADAPTER_H
+
+#include <memory>
+#include <rasqal/rasqal.h>
+
+#include "model/rdf_model.h"
+#include "util/iterator.h"
+
+namespace marmotta {
+namespace sparql {
+
+using StatementIterator = util::CloseableIterator<rdf::Statement>;
+
+/**
+ * An abstract superclass for more easily interfacing from the C++ Marmotta model
+ * with C-based Rasqal.
+ */
+class TripleSource {
+ public:
+
+    /**
+     * Check for presence of a complete statement.
+     *
+     * Parameters with nullptr value are interpreted as wildcards.
+     */
+    virtual bool HasStatement(
+            const rdf::Resource* s, const rdf::URI* p, const rdf::Value* o,
+            const rdf::Resource* c) = 0;
+
+    /**
+     * Return an iterator over statements matching the given subject, predicate,
+     * object and context. The caller takes ownership of the pointer.
+     *
+     * Parameters with nullptr value are interpreted as wildcards.
+     */
+    virtual std::unique_ptr<StatementIterator> GetStatements(
+            const rdf::Resource* s, const rdf::URI* p, const rdf::Value* o,
+            const rdf::Resource* c) = 0;
+};
+
+class SparqlException : public std::exception {
+ public:
+
+    SparqlException(const std::string &message, const std::string &query) : message(message), query(query) { }
+
+    const char *what() const noexcept override {
+        return message.c_str();
+    }
+
+ private:
+    std::string message;
+    std::string query;
+};
+
+/**
+ * Class SparqlService provides a SPARQL wrapper around a triple source using
+ * Rasqal.
+ */
+class SparqlService {
+ public:
+    using RowType = std::map<std::string, rdf::Value>;
+
+    SparqlService(std::unique_ptr<TripleSource> source);
+
+    /**
+     * Free any C-style resources, particularly the rasqal world.
+     */
+    ~SparqlService();
+
+    void TupleQuery(const std::string& query, std::function<bool(const RowType&)> row_handler);
+
+    /**
+     * Return a reference to the triple source managed by this service.
+     */
+    TripleSource& Source() {
+        return *source;
+    }
+
+ private:
+    std::unique_ptr<TripleSource> source;
+
+    rasqal_world* world;
+};
+
+}  // namespace sparql
+}  // namespace marmotta
+
+
+#endif //MARMOTTA_RASQAL_ADAPTER_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sparql/rasqal_model.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sparql/rasqal_model.cc b/libraries/ostrich/backend/sparql/rasqal_model.cc
new file mode 100644
index 0000000..346b831
--- /dev/null
+++ b/libraries/ostrich/backend/sparql/rasqal_model.cc
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <raptor2/raptor2.h>
+#include <rasqal/rasqal.h>
+#include <glog/logging.h>
+#include "rasqal_model.h"
+
+namespace marmotta {
+namespace sparql {
+namespace rasqal {
+
+// Helper macros. Some Rasqal functions copy the input string themselves, others don't.
+#define STR(s) (const unsigned char*)s.c_str()
+#define CPSTR(s) (const unsigned char*)strdup(s.c_str())
+
+rdf::Resource ConvertResource(rasqal_literal *node) {
+    switch (node->type) {
+        case RASQAL_LITERAL_URI:
+            return rdf::URI(std::string((const char*)raptor_uri_as_string(node->value.uri)));
+        case RASQAL_LITERAL_BLANK:
+            return rdf::BNode(std::string((const char*)node->string, node->string_len));
+        default:
+            LOG(INFO) << "Error: unsupported resource type " << node->type;
+            return rdf::Resource();
+    }
+}
+
+
+rdf::Value ConvertValue(rasqal_literal *node) {
+    std::string label((const char*)node->string, node->string_len);
+    rdf::Value r;
+    char* s;
+    switch (node->type) {
+        case RASQAL_LITERAL_URI:
+            return rdf::URI((const char*)raptor_uri_as_string(node->value.uri));
+        case RASQAL_LITERAL_BLANK:
+            return rdf::BNode(label);
+        case RASQAL_LITERAL_STRING:
+            if (node->language) {
+                return rdf::StringLiteral(label, node->language);
+            } else {
+                return rdf::StringLiteral(label);
+
+            }
+        case RASQAL_LITERAL_XSD_STRING:
+            return rdf::DatatypeLiteral(
+                    label, rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+        case RASQAL_LITERAL_BOOLEAN:
+            return rdf::DatatypeLiteral(
+                    node->value.integer==0?"false":"true",
+                    rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+        case RASQAL_LITERAL_INTEGER:
+            return rdf::DatatypeLiteral(
+                    std::to_string(node->value.integer),
+                    rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+        case RASQAL_LITERAL_FLOAT:
+        case RASQAL_LITERAL_DOUBLE:
+            return rdf::DatatypeLiteral(
+                    std::to_string(node->value.floating),
+                    rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+        case RASQAL_LITERAL_DECIMAL:
+            s = rasqal_xsd_decimal_as_string(node->value.decimal);
+            r = rdf::DatatypeLiteral(
+                    s, rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+            free(s);
+            return std::move(r); // r is an lvalue, explicit move
+        case RASQAL_LITERAL_DATETIME:
+            s = rasqal_xsd_datetime_to_string(node->value.datetime);
+            r = rdf::DatatypeLiteral(
+                    s, rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+            free(s);
+            return std::move(r); // r is an lvalue, explicit move
+        case RASQAL_LITERAL_DATE:
+            s = rasqal_xsd_date_to_string(node->value.date);
+            r = rdf::DatatypeLiteral(
+                    s, rdf::URI((const char*)raptor_uri_as_string(node->datatype)));
+            free(s);
+            return std::move(r); // r is an lvalue, explicit move
+        default:
+            LOG(INFO) << "Error: unsupported node type " << node->type;
+            return rdf::Value();
+    }
+}
+
+
+rdf::URI ConvertURI(rasqal_literal *node) {
+    switch (node->type) {
+        case RASQAL_LITERAL_URI:
+            return rdf::URI((const char*)raptor_uri_as_string(node->value.uri));
+        default:
+            return rdf::URI();
+    }
+}
+
+
+rdf::Statement ConvertStatement(rasqal_triple *triple) {
+    if (triple->flags == RASQAL_TRIPLE_SPOG) {
+        return rdf::Statement(
+                ConvertResource(triple->subject),
+                ConvertURI(triple->predicate),
+                ConvertValue(triple->object),
+                ConvertResource(triple->origin)
+        );
+    } else {
+        return rdf::Statement(
+                ConvertResource(triple->subject),
+                ConvertURI(triple->predicate),
+                ConvertValue(triple->object)
+        );
+
+    }
+}
+
+rasqal_literal *AsStringLiteral(rasqal_world* world, const rdf::Value &v) {
+    rdf::StringLiteral l(v.getMessage().literal().stringliteral());
+
+    return rasqal_new_string_literal(
+            world,
+            CPSTR(l.getContent()),
+            strdup(l.getLanguage().c_str()),
+            nullptr,
+            nullptr);
+}
+
+rasqal_literal *AsDatatypeLiteral(rasqal_world* world, const rdf::Value &v) {
+    raptor_world* raptorWorld = rasqal_world_get_raptor(world);
+    rdf::DatatypeLiteral l(v.getMessage().literal().dataliteral());
+
+    return rasqal_new_string_literal(
+            world,
+            CPSTR(l.getContent()),
+            nullptr,
+            raptor_new_uri(raptorWorld, STR(l.getDatatype().stringValue())),
+            nullptr);
+}
+
+rasqal_literal *AsLiteral(rasqal_world* world, const rdf::Resource &r) {
+    raptor_world* raptorWorld = rasqal_world_get_raptor(world);
+    switch (r.type) {
+        case rdf::Resource::URI:
+            return rasqal_new_uri_literal(
+                    world,
+                    raptor_new_uri(raptorWorld, STR(r.stringValue())));
+        case rdf::Resource::BNODE:
+            return rasqal_new_simple_literal(
+                    world, RASQAL_LITERAL_BLANK, CPSTR(r.stringValue()));
+        default:
+            return nullptr;
+    }
+}
+
+rasqal_literal *AsLiteral(rasqal_world* world, const rdf::Value &v) {
+    raptor_world* raptorWorld = rasqal_world_get_raptor(world);
+    switch (v.type) {
+        case rdf::Value::URI:
+            return rasqal_new_uri_literal(
+                    world, raptor_new_uri(raptorWorld, STR(v.stringValue())));
+        case rdf::Value::BNODE:
+            return rasqal_new_simple_literal(
+                    world, RASQAL_LITERAL_BLANK, CPSTR(v.stringValue()));
+        case rdf::Value::STRING_LITERAL:
+            return AsStringLiteral(world, v);
+        case rdf::Value::DATATYPE_LITERAL:
+            return AsDatatypeLiteral(world, v);
+        default:
+            return nullptr;
+    }
+}
+
+rasqal_literal *AsLiteral(rasqal_world* world, const rdf::URI &u) {
+    raptor_world* raptorWorld = rasqal_world_get_raptor(world);
+    return rasqal_new_uri_literal(
+            world, raptor_new_uri(raptorWorld, STR(u.stringValue())));
+}
+}  // namespace rasqal
+}  // namespace sparql
+}  // namespace marmotta
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/sparql/rasqal_model.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/sparql/rasqal_model.h b/libraries/ostrich/backend/sparql/rasqal_model.h
new file mode 100644
index 0000000..549b3c4
--- /dev/null
+++ b/libraries/ostrich/backend/sparql/rasqal_model.h
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_RASQAL_MODEL_H
+#define MARMOTTA_RASQAL_MODEL_H
+
+#include <memory>
+#include <rasqal/rasqal.h>
+
+#include "model/rdf_model.h"
+
+namespace marmotta {
+namespace sparql {
+namespace rasqal {
+
+/*
+ * Convert a rasqal literal into a Marmotta Resource. Returns empty in case
+ * the node cannot be converted.
+ */
+rdf::Resource ConvertResource(rasqal_literal* node);
+
+/*
+ * Convert a rasqal literal into a Marmotta Value. Returns empty in case
+ * the node cannot be converted.
+ */
+rdf::Value ConvertValue(rasqal_literal* node);
+
+/*
+ * Convert a rasqal literal into a Marmotta URI. Returns empty in case
+ * the node cannot be converted.
+ */
+rdf::URI ConvertURI(rasqal_literal* node);
+
+/*
+ * Convert a rasqal triple into a Marmotta Statement. Returns empty in case
+ * the node cannot be converted.
+ */
+rdf::Statement ConvertStatement(rasqal_triple* triple);
+
+/*
+ * Convert a Marmotta Resource into a rasqal literal.
+ */
+rasqal_literal* AsLiteral(rasqal_world* world, const rdf::Resource& r);
+
+/*
+ * Convert a Marmotta Value into a rasqal literal.
+ */
+rasqal_literal* AsLiteral(rasqal_world* world, const rdf::Value& v);
+
+/*
+ * Convert a Marmotta URI into a rasqal literal.
+ */
+rasqal_literal* AsLiteral(rasqal_world* world, const rdf::URI& u);
+
+
+}  // namespace rasqal
+}  // namespace sparql
+}  // namespace marmotta
+
+#endif //MARMOTTA_RASQAL_MODEL_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/test/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/test/CMakeLists.txt b/libraries/ostrich/backend/test/CMakeLists.txt
new file mode 100644
index 0000000..841b982
--- /dev/null
+++ b/libraries/ostrich/backend/test/CMakeLists.txt
@@ -0,0 +1,12 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/.. ${RAPTOR_INCLUDE_DIR}/raptor2)
+
+add_library(gtest STATIC gtest.h gtest-all.cc)
+
+add_executable(model_tests StatementTest.cc main.cc)
+target_link_libraries(model_tests gtest marmotta_model ${GLOG_LIBRARY})
+
+add_executable(sparql_tests SparqlTest.cc main.cc)
+target_link_libraries(sparql_tests gtest marmotta_model marmotta_sparql ${GLOG_LIBRARY})
+
+add_test(NAME ModelTest
+         COMMAND model_tests)

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/test/SparqlTest.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/test/SparqlTest.cc b/libraries/ostrich/backend/test/SparqlTest.cc
new file mode 100644
index 0000000..7e56eba
--- /dev/null
+++ b/libraries/ostrich/backend/test/SparqlTest.cc
@@ -0,0 +1,266 @@
+//
+// Created by wastl on 09.12.15.
+//
+#include <glog/logging.h>
+#include "gtest.h"
+#include "sparql/rasqal_adapter.h"
+#include "model/rdf_operators.h"
+
+namespace marmotta {
+namespace sparql {
+
+namespace {
+class MockStatementIterator : public StatementIterator {
+ public:
+    MockStatementIterator(std::vector<rdf::Statement> statements)
+            : statements(statements), index(0) {
+    }
+
+    StatementIterator& operator++() override {
+        index++;
+        return *this;
+    };
+
+    rdf::Statement& operator*() override {
+        return statements[index];
+    };
+
+    rdf::Statement* operator->() override {
+        return &statements[index];
+    };
+
+    bool hasNext() override {
+        return index < statements.size();
+    };
+
+ private:
+    std::vector<rdf::Statement> statements;
+    int index;
+};
+
+class MockTripleSource : public TripleSource {
+
+ public:
+    MockTripleSource(std::vector<rdf::Statement> statements) : statements(statements) {
+
+    }
+
+    bool HasStatement(const rdf::Resource *s, const rdf::URI *p, const rdf::Value *o, const rdf::Resource *c) override {
+        for (const auto& stmt : statements) {
+            bool match = true;
+            if (s != nullptr && *s != stmt.getSubject()) {
+                match = false;
+            }
+            if (p != nullptr && *p != stmt.getPredicate()) {
+                match = false;
+            }
+            if (o != nullptr && *o != stmt.getObject()) {
+                match = false;
+            }
+            if (c != nullptr && *c != stmt.getContext()) {
+                match = false;
+            }
+            if (!match) {
+                return false;
+            }
+        }
+        return false;
+    }
+
+    std::unique_ptr<StatementIterator> GetStatements(const rdf::Resource *s, const rdf::URI *p,
+                                                             const rdf::Value *o, const rdf::Resource *c) override {
+        std::vector<rdf::Statement> results;
+        for (const auto& stmt : statements) {
+            bool match = true;
+            if (s != nullptr && *s != stmt.getSubject()) {
+                match = false;
+            }
+            if (p != nullptr && *p != stmt.getPredicate()) {
+                match = false;
+            }
+            if (o != nullptr && *o != stmt.getObject()) {
+                match = false;
+            }
+            if (c != nullptr && *c != stmt.getContext()) {
+                match = false;
+            }
+            if (match) {
+                results.push_back(stmt);
+            }
+        }
+        return std::unique_ptr<StatementIterator>(new MockStatementIterator(results));
+    }
+
+ private:
+    std::vector<rdf::Statement> statements;
+};
+}  // namespace
+
+
+TEST(SPARQLTest, Simple) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::URI("http://example.com/s1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value s, p, o;
+    svc.TupleQuery("SELECT * WHERE {?s ?p ?o}", [&](const SparqlService::RowType& row) {
+        count++;
+        s = row.at("s");
+        p = row.at("p");
+        o = row.at("o");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/s1", s.stringValue());
+    EXPECT_EQ("http://example.com/p1", p.stringValue());
+    EXPECT_EQ("http://example.com/o1", o.stringValue());
+}
+
+TEST(SPARQLTest, SubjectPattern) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::URI("http://example.com/s1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1")),
+                    rdf::Statement(rdf::URI("http://example.com/s2"), rdf::URI("http://example.com/p2"), rdf::URI("http://example.com/o2"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value p, o;
+    svc.TupleQuery("SELECT * WHERE {<http://example.com/s1> ?p ?o}", [&](const SparqlService::RowType& row) {
+        count++;
+        p = row.at("p");
+        o = row.at("o");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/p1", p.stringValue());
+    EXPECT_EQ("http://example.com/o1", o.stringValue());
+}
+
+TEST(SPARQLTest, PredicatePattern) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::URI("http://example.com/s1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1")),
+                    rdf::Statement(rdf::URI("http://example.com/s2"), rdf::URI("http://example.com/p2"), rdf::URI("http://example.com/o2"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value s, o;
+    svc.TupleQuery("SELECT * WHERE {?s <http://example.com/p1> ?o}", [&](const SparqlService::RowType& row) {
+        count++;
+        s = row.at("s");
+        o = row.at("o");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/s1", s.stringValue());
+    EXPECT_EQ("http://example.com/o1", o.stringValue());
+}
+
+TEST(SPARQLTest, ObjectPattern) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::URI("http://example.com/s1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1")),
+                    rdf::Statement(rdf::URI("http://example.com/s2"), rdf::URI("http://example.com/p2"), rdf::URI("http://example.com/o2"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value s, p;
+    svc.TupleQuery("SELECT * WHERE {?s ?p <http://example.com/o1>}", [&](const SparqlService::RowType& row) {
+        count++;
+        s = row.at("s");
+        p = row.at("p");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/p1", p.stringValue());
+    EXPECT_EQ("http://example.com/s1", s.stringValue());
+}
+
+TEST(SPARQLTest, BNode) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::BNode("n1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1")),
+                    rdf::Statement(rdf::BNode("n2"), rdf::URI("http://example.com/p2"), rdf::URI("http://example.com/o2"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value s, p;
+    svc.TupleQuery("SELECT * WHERE {?s ?p <http://example.com/o1>}", [&](const SparqlService::RowType& row) {
+        count++;
+        s = row.at("s");
+        p = row.at("p");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/p1", p.stringValue());
+    EXPECT_EQ("n1", s.stringValue());
+}
+
+TEST(SPARQLTest, Filter) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::URI("http://example.com/s1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1")),
+                    rdf::Statement(rdf::URI("http://example.com/s2"), rdf::URI("http://example.com/p2"), rdf::URI("http://example.com/o2"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value s, p, o;
+    svc.TupleQuery("SELECT * WHERE {?s ?p ?o . FILTER(?o = <http://example.com/o1>)}", [&](const SparqlService::RowType& row) {
+        count++;
+        s = row.at("s");
+        p = row.at("p");
+        o = row.at("o");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/p1", p.stringValue());
+    EXPECT_EQ("http://example.com/s1", s.stringValue());
+    EXPECT_EQ("http://example.com/o1", o.stringValue());
+}
+
+TEST(SPARQLTest, Join) {
+    SparqlService svc(std::unique_ptr<TripleSource>(new MockTripleSource(
+            {
+                    rdf::Statement(rdf::URI("http://example.com/s1"), rdf::URI("http://example.com/p1"), rdf::URI("http://example.com/o1")),
+                    rdf::Statement(rdf::URI("http://example.com/o1"), rdf::URI("http://example.com/p2"), rdf::URI("http://example.com/o2"))
+            }
+    )));
+
+    int count = 0;
+    rdf::Value s, o;
+    svc.TupleQuery("SELECT * WHERE {?s ?p1 ?o1 . ?o1 ?p2 ?o }", [&](const SparqlService::RowType& row) {
+        count++;
+        s = row.at("s");
+        o = row.at("o");
+
+        return true;
+    });
+
+    EXPECT_EQ(1, count);
+    EXPECT_EQ("http://example.com/s1", s.stringValue());
+    EXPECT_EQ("http://example.com/o2", o.stringValue());
+}
+
+
+}  // namespace sparql
+}  // namespace marmotta
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/test/StatementTest.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/test/StatementTest.cc b/libraries/ostrich/backend/test/StatementTest.cc
new file mode 100644
index 0000000..56fb11e
--- /dev/null
+++ b/libraries/ostrich/backend/test/StatementTest.cc
@@ -0,0 +1,135 @@
+//
+// Created by wastl on 18.04.15.
+//
+
+#include "gtest.h"
+#include "model/rdf_model.h"
+#include "model/rdf_operators.h"
+
+namespace marmotta {
+
+TEST(URITest, Construct) {
+    rdf::URI uri1("http://www.example.com/U1");
+    rdf::URI uri2(std::string("http://www.example.com/U2"));
+
+    EXPECT_EQ(uri1.getUri(), "http://www.example.com/U1");
+    EXPECT_EQ(uri2.getUri(), "http://www.example.com/U2");
+}
+
+TEST(URITest, Equality) {
+    rdf::URI uri1("http://www.example.com/U1");
+    rdf::URI uri2("http://www.example.com/U1");
+    rdf::URI uri3("http://www.example.com/U3");
+
+    EXPECT_EQ(uri1, uri2);
+    EXPECT_NE(uri1, uri3);
+}
+
+TEST(URITest, ProtoEquality) {
+    rdf::URI uri1("http://www.example.com/U1");
+    rdf::URI uri2("http://www.example.com/U1");
+    rdf::URI uri3("http://www.example.com/U3");
+
+    EXPECT_EQ(uri1.getMessage(), uri2.getMessage());
+    EXPECT_NE(uri1.getMessage(), uri3.getMessage());
+}
+
+TEST(BNodeTest, Construct) {
+    rdf::BNode bNode1("n1");
+    rdf::BNode bNode2(std::string("n2"));
+
+    EXPECT_EQ(bNode1.getId(), "n1");
+    EXPECT_EQ(bNode2.getId(), "n2");
+}
+
+TEST(BNodeTest, Equality) {
+    rdf::BNode bNode1("n1");
+    rdf::BNode bNode2("n1");
+    rdf::BNode bNode3("n3");
+
+    EXPECT_EQ(bNode1, bNode2);
+    EXPECT_NE(bNode1, bNode3);
+}
+
+TEST(BNodeTest, ProtoEquality) {
+    rdf::BNode bNode1("n1");
+    rdf::BNode bNode2("n1");
+    rdf::BNode bNode3("n3");
+
+    EXPECT_EQ(bNode1.getMessage(), bNode2.getMessage());
+    EXPECT_NE(bNode1.getMessage(), bNode3.getMessage());
+}
+
+TEST(StringLiteralTest, Construct) {
+    rdf::StringLiteral l1("Hello, World!");
+    rdf::StringLiteral l2("Hello, World!", "en");
+    rdf::StringLiteral l3(std::string("Hello, World!"));
+
+    EXPECT_EQ(l1.getContent(), "Hello, World!");
+    EXPECT_EQ(l1.getLanguage(), "");
+
+    EXPECT_EQ(l2.getContent(), "Hello, World!");
+    EXPECT_EQ(l2.getLanguage(), "en");
+
+    EXPECT_EQ(l3.getContent(), "Hello, World!");
+    EXPECT_EQ(l3.getLanguage(), "");
+}
+
+TEST(StringLiteralTest, Equality) {
+    rdf::StringLiteral l1("Hello, World!");
+    rdf::StringLiteral l2("Hello, World!");
+    rdf::StringLiteral l3("Hello, World!", "en");
+    rdf::StringLiteral l4("The quick brown fox jumps over the lazy dog.");
+
+    EXPECT_EQ(l1, l2);
+    EXPECT_NE(l1, l3);
+    EXPECT_NE(l1, l4);
+}
+
+TEST(StringLiteralTest, ProtoEquality) {
+    rdf::StringLiteral l1("Hello, World!");
+    rdf::StringLiteral l2("Hello, World!");
+    rdf::StringLiteral l3("Hello, World!", "en");
+    rdf::StringLiteral l4("The quick brown fox jumps over the lazy dog.");
+
+    EXPECT_EQ(l1.getMessage(), l2.getMessage());
+    EXPECT_NE(l1.getMessage(), l3.getMessage());
+    EXPECT_NE(l1.getMessage(), l4.getMessage());
+}
+
+TEST(ValueTest, Construct) {
+    rdf::Value v1(rdf::URI("http://www.example.com/U1"));
+    rdf::Value v2(rdf::BNode("n1"));
+    rdf::Value v3(rdf::StringLiteral("Hello, World!"));
+    
+    EXPECT_EQ(v1.stringValue(), "http://www.example.com/U1");
+    EXPECT_EQ(v2.stringValue(), "n1");
+    EXPECT_EQ(v3.stringValue(), "Hello, World!");
+}
+
+TEST(ValueTest, Equality) {
+    rdf::Value v1(rdf::URI("http://www.example.com/U1"));
+    rdf::Value v2(rdf::URI("http://www.example.com/U1"));
+    rdf::Value v3(rdf::BNode("n3"));
+
+    EXPECT_EQ(v1, v2);
+    EXPECT_NE(v1, v3);
+}
+
+TEST(ValueTest, ProtoEquality) {
+    rdf::Value v1(rdf::URI("http://www.example.com/U1"));
+    rdf::Value v2(rdf::URI("http://www.example.com/U1"));
+    rdf::Value v3(rdf::BNode("n3"));
+
+    EXPECT_EQ(v1.getMessage(), v2.getMessage());
+    EXPECT_NE(v1.getMessage(), v3.getMessage());
+}
+
+TEST(StatementTest, Construct) {
+    rdf::Statement s(rdf::URI("http://www.example.com/S1"), rdf::URI("http://www.example.com/P1"), "Hello World!");
+
+    EXPECT_EQ(s.getSubject(), "http://www.example.com/S1");
+    EXPECT_EQ(s.getPredicate(), "http://www.example.com/P1");
+    EXPECT_EQ(s.getObject(), "Hello World!");
+}
+}
\ No newline at end of file


[2/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Posted by ss...@apache.org.
http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/test/main.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/test/main.cc b/libraries/ostrich/backend/test/main.cc
new file mode 100644
index 0000000..40755a6
--- /dev/null
+++ b/libraries/ostrich/backend/test/main.cc
@@ -0,0 +1,11 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+// Author: Sebastian Schaffert <sc...@google.com>
+#include <glog/logging.h>
+#include "gtest.h"
+
+// run all tests in the current binary
+int main(int argc, char **argv) {
+    ::google::InitGoogleLogging(argv[0]);
+    ::testing::InitGoogleTest(&argc, argv);
+    return RUN_ALL_TESTS();
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/util/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/util/CMakeLists.txt b/libraries/ostrich/backend/util/CMakeLists.txt
new file mode 100644
index 0000000..f573c3c
--- /dev/null
+++ b/libraries/ostrich/backend/util/CMakeLists.txt
@@ -0,0 +1,3 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/..)
+
+add_library(marmotta_util murmur3.cc murmur3.h split.cc split.h iterator.h)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/util/iterator.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/util/iterator.h b/libraries/ostrich/backend/util/iterator.h
new file mode 100644
index 0000000..cb95d8b
--- /dev/null
+++ b/libraries/ostrich/backend/util/iterator.h
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//
+// A collection of iterators used by different components.
+//
+
+#ifndef MARMOTTA_ITERATOR_H
+#define MARMOTTA_ITERATOR_H
+
+namespace marmotta {
+namespace util {
+
+/**
+ * A common iterator class for iterators binding resources.
+ */
+template<typename T>
+class CloseableIterator {
+ public:
+
+    /**
+     * Close the iterator, freeing any wrapped resources
+     */
+    virtual ~CloseableIterator() {}
+
+    /**
+     * Increment iterator to next element.
+    */
+    virtual CloseableIterator<T>& operator++() = 0;
+
+    /**
+     * Dereference iterator, returning a reference to the current element.
+     */
+    virtual T& operator*() = 0;
+
+    /**
+     * Dereference iterator, returning a pointer to the current element.
+     */
+    virtual T* operator->() = 0;
+
+    /**
+     * Return true in case the iterator has more elements.
+     */
+    virtual bool hasNext() = 0;
+
+};
+
+/**
+ * An empty iterator.
+ */
+template<typename T>
+class EmptyIterator : public CloseableIterator<T> {
+ public:
+    EmptyIterator() { }
+
+    CloseableIterator<T> &operator++() override {
+        return *this;
+    }
+
+    T &operator*() override {
+        throw std::out_of_range("No more elements");
+    };
+
+    T *operator->() override {
+        throw std::out_of_range("No more elements");
+    };
+
+    bool hasNext() override {
+        return false;
+    };
+};
+
+
+
+/**
+ * An iterator wrapping a single element.
+ */
+template<typename T>
+class SingletonIterator : public CloseableIterator<T> {
+ public:
+    SingletonIterator(T& value) : value(value), incremented(false) { }
+
+    CloseableIterator<T> &operator++() override {
+        incremented = true;
+        return *this;
+    };
+
+    T &operator*() override {
+        if (!incremented)
+            return value;
+        else
+            throw std::out_of_range("No more elements");
+    };
+
+    T *operator->() override {
+        if (!incremented)
+            return &value;
+        else
+            throw std::out_of_range("No more elements");
+    };
+
+    bool hasNext() override {
+        return !incremented;
+    };
+
+ private:
+    T value;
+    bool incremented;
+
+};
+
+}
+}
+
+
+#endif //MARMOTTA_ITERATOR_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/util/murmur3.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/util/murmur3.cc b/libraries/ostrich/backend/util/murmur3.cc
new file mode 100644
index 0000000..27bd6c1
--- /dev/null
+++ b/libraries/ostrich/backend/util/murmur3.cc
@@ -0,0 +1,313 @@
+//-----------------------------------------------------------------------------
+// MurmurHash3 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+// Note - The x86 and x64 versions do _not_ produce the same results, as the
+// algorithms are optimized for their respective platforms. You can still
+// compile and run any of them on any platform, but your performance with the
+// non-native version will be less than optimal.
+
+#include "murmur3.h"
+
+#define FORCE_INLINE inline __attribute__((always_inline))
+
+inline uint32_t rotl32 ( uint32_t x, int8_t r )
+{
+    return (x << r) | (x >> (32 - r));
+}
+
+inline uint64_t rotl64 ( uint64_t x, int8_t r )
+{
+    return (x << r) | (x >> (64 - r));
+}
+
+#define ROTL32(x,y)     rotl32(x,y)
+#define ROTL64(x,y)     rotl64(x,y)
+
+#define BIG_CONSTANT(x) (x##LLU)
+
+//-----------------------------------------------------------------------------
+// Block read - if your platform needs to do endian-swapping or can only
+// handle aligned reads, do the conversion here
+
+FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
+{
+    return p[i];
+}
+
+FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i )
+{
+    return p[i];
+}
+
+//-----------------------------------------------------------------------------
+// Finalization mix - force all bits of a hash block to avalanche
+
+FORCE_INLINE uint32_t fmix32 ( uint32_t h )
+{
+    h ^= h >> 16;
+    h *= 0x85ebca6b;
+    h ^= h >> 13;
+    h *= 0xc2b2ae35;
+    h ^= h >> 16;
+
+    return h;
+}
+
+//----------
+
+FORCE_INLINE uint64_t fmix64 ( uint64_t k )
+{
+    k ^= k >> 33;
+    k *= BIG_CONSTANT(0xff51afd7ed558ccd);
+    k ^= k >> 33;
+    k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53);
+    k ^= k >> 33;
+
+    return k;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_32 ( const void * key, int len,
+                          uint32_t seed, void * out )
+{
+    const uint8_t * data = (const uint8_t*)key;
+    const int nblocks = len / 4;
+
+    uint32_t h1 = seed;
+
+    const uint32_t c1 = 0xcc9e2d51;
+    const uint32_t c2 = 0x1b873593;
+
+    //----------
+    // body
+
+    const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
+
+    for(int i = -nblocks; i; i++)
+    {
+        uint32_t k1 = getblock32(blocks,i);
+
+        k1 *= c1;
+        k1 = ROTL32(k1,15);
+        k1 *= c2;
+
+        h1 ^= k1;
+        h1 = ROTL32(h1,13);
+        h1 = h1*5+0xe6546b64;
+    }
+
+    //----------
+    // tail
+
+    const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
+
+    uint32_t k1 = 0;
+
+    switch(len & 3)
+    {
+        case 3: k1 ^= tail[2] << 16;
+        case 2: k1 ^= tail[1] << 8;
+        case 1: k1 ^= tail[0];
+            k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+    };
+
+    //----------
+    // finalization
+
+    h1 ^= len;
+
+    h1 = fmix32(h1);
+
+    *(uint32_t*)out = h1;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x86_128 ( const void * key, const int len,
+                           uint32_t seed, void * out )
+{
+    const uint8_t * data = (const uint8_t*)key;
+    const int nblocks = len / 16;
+
+    uint32_t h1 = seed;
+    uint32_t h2 = seed;
+    uint32_t h3 = seed;
+    uint32_t h4 = seed;
+
+    const uint32_t c1 = 0x239b961b;
+    const uint32_t c2 = 0xab0e9789;
+    const uint32_t c3 = 0x38b34ae5;
+    const uint32_t c4 = 0xa1e38b93;
+
+    //----------
+    // body
+
+    const uint32_t * blocks = (const uint32_t *)(data + nblocks*16);
+
+    for(int i = -nblocks; i; i++)
+    {
+        uint32_t k1 = getblock32(blocks,i*4+0);
+        uint32_t k2 = getblock32(blocks,i*4+1);
+        uint32_t k3 = getblock32(blocks,i*4+2);
+        uint32_t k4 = getblock32(blocks,i*4+3);
+
+        k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+
+        h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b;
+
+        k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
+
+        h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747;
+
+        k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
+
+        h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35;
+
+        k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
+
+        h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17;
+    }
+
+    //----------
+    // tail
+
+    const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
+
+    uint32_t k1 = 0;
+    uint32_t k2 = 0;
+    uint32_t k3 = 0;
+    uint32_t k4 = 0;
+
+    switch(len & 15)
+    {
+        case 15: k4 ^= tail[14] << 16;
+        case 14: k4 ^= tail[13] << 8;
+        case 13: k4 ^= tail[12] << 0;
+            k4 *= c4; k4  = ROTL32(k4,18); k4 *= c1; h4 ^= k4;
+
+        case 12: k3 ^= tail[11] << 24;
+        case 11: k3 ^= tail[10] << 16;
+        case 10: k3 ^= tail[ 9] << 8;
+        case  9: k3 ^= tail[ 8] << 0;
+            k3 *= c3; k3  = ROTL32(k3,17); k3 *= c4; h3 ^= k3;
+
+        case  8: k2 ^= tail[ 7] << 24;
+        case  7: k2 ^= tail[ 6] << 16;
+        case  6: k2 ^= tail[ 5] << 8;
+        case  5: k2 ^= tail[ 4] << 0;
+            k2 *= c2; k2  = ROTL32(k2,16); k2 *= c3; h2 ^= k2;
+
+        case  4: k1 ^= tail[ 3] << 24;
+        case  3: k1 ^= tail[ 2] << 16;
+        case  2: k1 ^= tail[ 1] << 8;
+        case  1: k1 ^= tail[ 0] << 0;
+            k1 *= c1; k1  = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
+    };
+
+    //----------
+    // finalization
+
+    h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len;
+
+    h1 += h2; h1 += h3; h1 += h4;
+    h2 += h1; h3 += h1; h4 += h1;
+
+    h1 = fmix32(h1);
+    h2 = fmix32(h2);
+    h3 = fmix32(h3);
+    h4 = fmix32(h4);
+
+    h1 += h2; h1 += h3; h1 += h4;
+    h2 += h1; h3 += h1; h4 += h1;
+
+    ((uint32_t*)out)[0] = h1;
+    ((uint32_t*)out)[1] = h2;
+    ((uint32_t*)out)[2] = h3;
+    ((uint32_t*)out)[3] = h4;
+}
+
+//-----------------------------------------------------------------------------
+
+void MurmurHash3_x64_128 ( const void * key, const int len,
+                           const uint32_t seed, void * out )
+{
+    const uint8_t * data = (const uint8_t*)key;
+    const int nblocks = len / 16;
+
+    uint64_t h1 = seed;
+    uint64_t h2 = seed;
+
+    const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5);
+    const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f);
+
+    //----------
+    // body
+
+    const uint64_t * blocks = (const uint64_t *)(data);
+
+    for(int i = 0; i < nblocks; i++)
+    {
+        uint64_t k1 = getblock64(blocks,i*2+0);
+        uint64_t k2 = getblock64(blocks,i*2+1);
+
+        k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
+
+        h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729;
+
+        k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
+
+        h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5;
+    }
+
+    //----------
+    // tail
+
+    const uint8_t * tail = (const uint8_t*)(data + nblocks*16);
+
+    uint64_t k1 = 0;
+    uint64_t k2 = 0;
+
+    switch(len & 15)
+    {
+        case 15: k2 ^= ((uint64_t)tail[14]) << 48;
+        case 14: k2 ^= ((uint64_t)tail[13]) << 40;
+        case 13: k2 ^= ((uint64_t)tail[12]) << 32;
+        case 12: k2 ^= ((uint64_t)tail[11]) << 24;
+        case 11: k2 ^= ((uint64_t)tail[10]) << 16;
+        case 10: k2 ^= ((uint64_t)tail[ 9]) << 8;
+        case  9: k2 ^= ((uint64_t)tail[ 8]) << 0;
+            k2 *= c2; k2  = ROTL64(k2,33); k2 *= c1; h2 ^= k2;
+
+        case  8: k1 ^= ((uint64_t)tail[ 7]) << 56;
+        case  7: k1 ^= ((uint64_t)tail[ 6]) << 48;
+        case  6: k1 ^= ((uint64_t)tail[ 5]) << 40;
+        case  5: k1 ^= ((uint64_t)tail[ 4]) << 32;
+        case  4: k1 ^= ((uint64_t)tail[ 3]) << 24;
+        case  3: k1 ^= ((uint64_t)tail[ 2]) << 16;
+        case  2: k1 ^= ((uint64_t)tail[ 1]) << 8;
+        case  1: k1 ^= ((uint64_t)tail[ 0]) << 0;
+            k1 *= c1; k1  = ROTL64(k1,31); k1 *= c2; h1 ^= k1;
+    };
+
+    //----------
+    // finalization
+
+    h1 ^= len; h2 ^= len;
+
+    h1 += h2;
+    h2 += h1;
+
+    h1 = fmix64(h1);
+    h2 = fmix64(h2);
+
+    h1 += h2;
+    h2 += h1;
+
+    ((uint64_t*)out)[0] = h1;
+    ((uint64_t*)out)[1] = h2;
+}
+
+//-----------------------------------------------------------------------------
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/util/murmur3.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/util/murmur3.h b/libraries/ostrich/backend/util/murmur3.h
new file mode 100644
index 0000000..69019a9
--- /dev/null
+++ b/libraries/ostrich/backend/util/murmur3.h
@@ -0,0 +1,18 @@
+//
+// Implementation of Murmur3 hashing using the C++ reference implementation.
+// See https://code.google.com/p/smhasher/wiki/MurmurHash
+//
+
+#ifndef MARMOTTA_MURMUR3_H
+#define MARMOTTA_MURMUR3_H
+
+#include <stdint.h>
+
+void MurmurHash3_x86_32  ( const void * key, int len, uint32_t seed, void * out );
+
+void MurmurHash3_x86_128 ( const void * key, int len, uint32_t seed, void * out );
+
+void MurmurHash3_x64_128 ( const void * key, int len, uint32_t seed, void * out );
+
+
+#endif //MARMOTTA_MURMUR3_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/util/split.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/util/split.cc b/libraries/ostrich/backend/util/split.cc
new file mode 100644
index 0000000..aa8ce7d
--- /dev/null
+++ b/libraries/ostrich/backend/util/split.cc
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "split.h"
+
+namespace marmotta {
+namespace util {
+
+std::vector<std::string> &split(const std::string &s, char delim, std::vector<std::string> &elems) {
+    std::stringstream ss(s);
+    std::string item;
+    while (std::getline(ss, item, delim)) {
+        elems.push_back(item);
+    }
+    return elems;
+}
+
+
+std::vector<std::string> split(const std::string &s, char delim) {
+    std::vector<std::string> elems;
+    split(s, delim, elems);
+    return elems;
+}
+
+}
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/util/split.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/util/split.h b/libraries/ostrich/backend/util/split.h
new file mode 100644
index 0000000..49f796e
--- /dev/null
+++ b/libraries/ostrich/backend/util/split.h
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_SPLIT_H
+#define MARMOTTA_SPLIT_H
+
+#include <string>
+#include <sstream>
+#include <vector>
+
+namespace marmotta {
+namespace util {
+
+// Split a string at a certain delimiter and add the parts to the vector elems.
+std::vector<std::string> &split(const std::string &s, char delim,
+                                std::vector<std::string> &elems);
+
+// Split a string, returning a new vector containing the parts.
+std::vector<std::string> split(const std::string &s, char delim);
+
+}
+}
+
+#endif //MARMOTTA_SPLIT_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/pom.xml
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/pom.xml b/libraries/ostrich/client/pom.xml
new file mode 100644
index 0000000..b0a39b8
--- /dev/null
+++ b/libraries/ostrich/client/pom.xml
@@ -0,0 +1,234 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.marmotta</groupId>
+        <artifactId>ostrich-parent</artifactId>
+        <version>3.4.0-SNAPSHOT</version>
+        <relativePath>../</relativePath>
+    </parent>
+
+    <artifactId>ostrich-client</artifactId>
+    <packaging>jar</packaging>
+
+    <name>Ostrich Triplestore: Persistence Client</name>
+    <description>Sesame Sail wrapper around C++ Marmotta Services</description>
+
+    <pluginRepositories>
+        <pluginRepository>
+            <releases>
+                <updatePolicy>never</updatePolicy>
+            </releases>
+            <snapshots>
+                <enabled>false</enabled>
+            </snapshots>
+            <id>central</id>
+            <name>Central Repository</name>
+            <url>https://repo.maven.apache.org/maven2</url>
+        </pluginRepository>
+        <pluginRepository>
+            <id>protoc-plugin</id>
+            <url>https://dl.bintray.com/sergei-ivanov/maven/</url>
+        </pluginRepository>
+    </pluginRepositories>
+    <build>
+        <extensions>
+            <extension>
+                <groupId>kr.motd.maven</groupId>
+                <artifactId>os-maven-plugin</artifactId>
+                <version>1.3.0.Final</version>
+            </extension>
+        </extensions>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>com.google.protobuf.tools</groupId>
+                <artifactId>maven-protoc-plugin</artifactId>
+                <version>0.4.2</version>
+                <configuration>
+                    <!--
+                      The version of protoc must match protobuf-java. If you don't depend on
+                      protobuf-java directly, you will be transitively depending on the
+                      protobuf-java version that grpc depends on.
+                    -->
+                    <protocArtifact>com.google.protobuf:protoc:3.0.0-beta-1:exe:${os.detected.classifier}</protocArtifact>
+                    <pluginId>grpc-java</pluginId>
+                    <pluginArtifact>io.grpc:protoc-gen-grpc-java:0.9.0:exe:${os.detected.classifier}</pluginArtifact>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>compile</goal>
+                            <goal>compile-custom</goal>
+                        </goals>
+                        <configuration>
+                            <protoSourceRoot>${basedir}/../backend/service</protoSourceRoot>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>ostrich-model</artifactId>
+            <version>3.4.0-SNAPSHOT</version>
+        </dependency>
+
+        <!-- Logging -->
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>log4j-over-slf4j</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>jul-to-slf4j</artifactId>
+        </dependency>
+
+
+        <!-- gRPC -->
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-all</artifactId>
+            <version>0.10.0-SNAPSHOT</version>
+        </dependency>
+
+        <!-- Sesame dependencies -->
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-model</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-sail-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-sail-inferencer</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-queryalgebra-model</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-queryalgebra-evaluation</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>joda-time</groupId>
+            <artifactId>joda-time</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>marmotta-commons</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>marmotta-model-vocabs</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+        </dependency>
+
+
+        <!-- Testing -->
+        <dependency>
+            <artifactId>junit</artifactId>
+            <groupId>junit</groupId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <artifactId>hamcrest-core</artifactId>
+            <groupId>org.hamcrest</groupId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <artifactId>hamcrest-library</artifactId>
+            <groupId>org.hamcrest</groupId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-core</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-classic</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-rio-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-rio-rdfxml</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-repository-sail</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-store-testsuite</artifactId>
+            <scope>test</scope>
+        </dependency>
+        
+        <dependency>
+            <groupId>com.google.code.tempus-fugit</groupId>
+            <artifactId>tempus-fugit</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+
+    </dependencies>
+    
+</project>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/ClosableResponseStream.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/ClosableResponseStream.java b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/ClosableResponseStream.java
new file mode 100644
index 0000000..35d8ada
--- /dev/null
+++ b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/ClosableResponseStream.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.sail;
+
+import com.google.common.base.Preconditions;
+import info.aduna.iteration.CloseableIteration;
+import io.grpc.ClientCall;
+import io.grpc.Metadata;
+import io.grpc.MethodDescriptor;
+import io.grpc.Status;
+import io.grpc.stub.AbstractStub;
+import org.openrdf.sail.SailException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.NoSuchElementException;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+/**
+ * A modified version of ClientCalls.BlockingResponseStream that allows closing the stream early.
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ClosableResponseStream<Svc extends AbstractStub<Svc>, ReqT, T> implements CloseableIteration<T, SailException> {
+
+    private static Logger log = LoggerFactory.getLogger(ClosableResponseStream.class);
+
+    // Due to flow control, only needs to hold up to 2 items: 1 for value, 1 for close.
+    private final BlockingQueue<Object> buffer = new ArrayBlockingQueue<Object>(2);
+    private final ClientCall.Listener<T> listener = new QueuingListener();
+    private final ClientCall<ReqT, T> call;
+    // Only accessed when iterating.
+    private Object last;
+
+    ClosableResponseStream(AbstractStub<Svc> stub, MethodDescriptor<ReqT, T> method, ReqT req) throws SailException {
+        call = stub.getChannel().newCall(method, stub.getCallOptions());
+
+        call.start(listener(), new Metadata());
+        call.request(1);
+        try {
+            call.sendMessage(req);
+            call.halfClose();
+        } catch (Throwable t) {
+            call.cancel();
+            throw new SailException(t);
+        }
+    }
+
+    ClientCall.Listener<T> listener() {
+        return listener;
+    }
+
+    /**
+     * Closes this iteration, freeing any resources that it is holding. If the
+     * iteration has already been closed then invoking this method has no effect.
+     */
+    @Override
+    public void close() throws SailException {
+        call.cancel();
+    }
+
+    /**
+     * Returns <tt>true</tt> if the iteration has more elements. (In other
+     * words, returns <tt>true</tt> if {@link #next} would return an element
+     * rather than throwing a <tt>NoSuchElementException</tt>.)
+     *
+     * @return <tt>true</tt> if the iteration has more elements.
+     * @throws SailException
+     */
+    @Override
+    public boolean hasNext() throws SailException {
+        try {
+            // Will block here indefinitely waiting for content. RPC timeouts defend against permanent
+            // hangs here as the call will become closed.
+            last = (last == null) ? buffer.take() : last;
+        } catch (InterruptedException ie) {
+            Thread.interrupted();
+            throw new SailException(ie);
+        }
+        if (last instanceof Status) {
+            throw new SailException(((Status) last).asRuntimeException());
+        }
+        return last != this;
+    }
+
+    /**
+     * Returns the next element in the iteration.
+     *
+     * @return the next element in the iteration.
+     * @throws NoSuchElementException if the iteration has no more elements or if it has been closed.
+     */
+    @Override
+    public T next() throws SailException {
+        if (!hasNext()) {
+            throw new NoSuchElementException();
+        }
+        try {
+            call.request(1);
+            @SuppressWarnings("unchecked")
+            T tmp = (T) last;
+            return tmp;
+        } finally {
+            last = null;
+        }
+    }
+
+    /**
+     * Removes from the underlying collection the last element returned by the
+     * iteration (optional operation). This method can be called only once per
+     * call to next.
+     *
+     * @throws UnsupportedOperationException if the remove operation is not supported by this Iteration.
+     * @throws IllegalStateException         If the Iteration has been closed, or if <tt>next()</tt> has not
+     *                                       yet been called, or <tt>remove()</tt> has already been called
+     *                                       after the last call to <tt>next()</tt>.
+     */
+    @Override
+    public void remove() throws SailException {
+
+    }
+
+    private class QueuingListener extends ClientCall.Listener<T> {
+        private boolean done = false;
+
+        @Override
+        public void onHeaders(Metadata headers) {
+        }
+
+        @Override
+        public void onMessage(T value) {
+            Preconditions.checkState(!done, "ClientCall already closed");
+            buffer.add(value);
+        }
+
+        @Override
+        public void onClose(Status status, Metadata trailers) {
+            Preconditions.checkState(!done, "ClientCall already closed");
+            if (status.isOk()) {
+                buffer.add(ClosableResponseStream.this);
+            } else {
+                buffer.add(status);
+            }
+            done = true;
+        }
+
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSail.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSail.java b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSail.java
new file mode 100644
index 0000000..84d8c2e
--- /dev/null
+++ b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSail.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.sail;
+
+import org.openrdf.model.ValueFactory;
+import org.openrdf.sail.NotifyingSailConnection;
+import org.openrdf.sail.Sail;
+import org.openrdf.sail.SailException;
+import org.openrdf.sail.helpers.NotifyingSailBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class OstrichSail extends NotifyingSailBase implements Sail {
+    private static Logger log = LoggerFactory.getLogger(OstrichSail.class);
+
+    private OstrichValueFactory valueFactory = new OstrichValueFactory();
+
+    private String host;
+    private int port;
+
+    public OstrichSail(String host, int port) {
+        this.host = host;
+        this.port = port;
+    }
+
+    /**
+     * Do store-specific operations to initialize the store. The default
+     * implementation of this method does nothing.
+     */
+    @Override
+    protected void initializeInternal() throws SailException {
+        log.info("Initialising CMarmotta Sail (host={}, port={})", host, port);
+    }
+
+    @Override
+    protected NotifyingSailConnection getConnectionInternal() throws SailException {
+        return new OstrichSailConnection(this, host, port);
+    }
+
+    /**
+     * Do store-specific operations to ensure proper shutdown of the store.
+     */
+    @Override
+    protected void shutDownInternal() throws SailException {
+
+    }
+
+    /**
+     * Checks whether this Sail object is writable, i.e. if the data contained in
+     * this Sail object can be changed.
+     */
+    @Override
+    public boolean isWritable() throws SailException {
+        return true;
+    }
+
+    /**
+     * Gets a ValueFactory object that can be used to create URI-, blank node-,
+     * literal- and statement objects.
+     *
+     * @return a ValueFactory object for this Sail object.
+     */
+    @Override
+    public ValueFactory getValueFactory() {
+        return valueFactory;
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSailConnection.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSailConnection.java b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSailConnection.java
new file mode 100644
index 0000000..bb80665
--- /dev/null
+++ b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichSailConnection.java
@@ -0,0 +1,529 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.sail;
+
+import com.google.common.util.concurrent.SettableFuture;
+import com.google.protobuf.Empty;
+import com.google.protobuf.Int64Value;
+import info.aduna.iteration.*;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.Status;
+import io.grpc.stub.StreamObserver;
+import org.apache.marmotta.ostrich.client.proto.Sail;
+import org.apache.marmotta.ostrich.client.proto.SailServiceGrpc;
+import org.apache.marmotta.ostrich.client.proto.Sparql;
+import org.apache.marmotta.ostrich.client.proto.SparqlServiceGrpc;
+import org.apache.marmotta.ostrich.model.*;
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.*;
+import org.openrdf.query.BindingSet;
+import org.openrdf.query.Dataset;
+import org.openrdf.query.QueryEvaluationException;
+import org.openrdf.query.QueryInterruptedException;
+import org.openrdf.query.algebra.QueryRoot;
+import org.openrdf.query.algebra.StatementPattern;
+import org.openrdf.query.algebra.TupleExpr;
+import org.openrdf.query.algebra.Var;
+import org.openrdf.query.algebra.evaluation.EvaluationStrategy;
+import org.openrdf.query.algebra.evaluation.TripleSource;
+import org.openrdf.query.algebra.evaluation.impl.*;
+import org.openrdf.query.impl.EmptyBindingSet;
+import org.openrdf.query.impl.MapBindingSet;
+import org.openrdf.sail.SailException;
+import org.openrdf.sail.helpers.NotifyingSailConnectionBase;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.channels.ClosedByInterruptException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class OstrichSailConnection extends NotifyingSailConnectionBase {
+
+    private static Logger log = LoggerFactory.getLogger(OstrichSailConnection.class);
+
+    private final ManagedChannel channel;
+    private final SailServiceGrpc.SailServiceBlockingStub stub;
+    private final SailServiceGrpc.SailServiceStub sailServiceStub;
+    private final SparqlServiceGrpc.SparqlServiceStub sparqlServiceStub;
+
+    private SettableFuture<Void> finishFuture;
+    private StreamObserver<Sail.UpdateResponse> updateResponseObserver;
+    private StreamObserver<Sail.UpdateRequest> updateRequestObserver;
+
+    public OstrichSailConnection(OstrichSail parent, String host, int port) {
+        super(parent);
+        channel = ManagedChannelBuilder.forAddress(host, port)
+                .usePlaintext(true)
+                .build();
+        stub = SailServiceGrpc.newBlockingStub(channel);
+        sailServiceStub = SailServiceGrpc.newStub(channel);
+        sparqlServiceStub = SparqlServiceGrpc.newStub(channel);
+
+        updateResponseObserver = new StreamObserver<Sail.UpdateResponse>() {
+            @Override
+            public void onNext(Sail.UpdateResponse updateResponse) {
+                log.info(
+                        "Committed transaction (added statements={}, removed statements={}, added namespaces={}, removed namespaces={})",
+                        updateResponse.getAddedStatements(), updateResponse.getRemovedStatements(),
+                        updateResponse.getAddedNamespaces(), updateResponse.getRemovedNamespaces());
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                finishFuture.setException(throwable);
+            }
+
+            @Override
+            public void onCompleted() {
+                finishFuture.set(null);
+            }
+        };
+    }
+
+    @Override
+    protected void addStatementInternal(Resource subj, URI pred, Value obj, Resource... contexts) throws SailException {
+        log.info("Adding statements.");
+        ensureTransaction();
+
+        if (contexts.length > 0) {
+            for (Resource ctx : contexts) {
+                ProtoStatement stmt = new ProtoStatement(subj, pred, obj, ctx);
+                Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setStmtAdded(stmt.getMessage()).build();
+                updateRequestObserver.onNext(u);
+            }
+        } else {
+            ProtoStatement stmt = new ProtoStatement(subj, pred, obj, null);
+            Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setStmtAdded(stmt.getMessage()).build();
+            updateRequestObserver.onNext(u);
+        }
+    }
+
+    @Override
+    protected void closeInternal() throws SailException {
+        log.info("Closing connection.");
+        commit();
+
+        try {
+            channel.shutdown().awaitTermination(5, TimeUnit.SECONDS);
+        } catch (InterruptedException e) {
+            new SailException("Shutdown interrupted", e);
+        }
+    }
+
+    @Override
+    protected CloseableIteration<? extends BindingSet, QueryEvaluationException> evaluateInternal(TupleExpr tupleExpr, Dataset dataset, BindingSet bindings, boolean includeInferred) throws SailException {
+        // Clone the tuple expression to allow for more aggressive optimizations
+        tupleExpr = tupleExpr.clone();
+
+        if (!(tupleExpr instanceof QueryRoot)) {
+            // Add a dummy root node to the tuple expressions to allow the
+            // optimizers to modify the actual root node
+            tupleExpr = new QueryRoot(tupleExpr);
+        }
+
+        try {
+            CMarmottaTripleSource tripleSource = new CMarmottaTripleSource(this,includeInferred);
+            EvaluationStrategy strategy = new EvaluationStrategyImpl(tripleSource, dataset);
+
+            new BindingAssigner().optimize(tupleExpr, dataset, bindings);
+            new ConstantOptimizer(strategy).optimize(tupleExpr, dataset, bindings);
+            new CompareOptimizer().optimize(tupleExpr, dataset, bindings);
+            new ConjunctiveConstraintSplitter().optimize(tupleExpr, dataset, bindings);
+            new DisjunctiveConstraintOptimizer().optimize(tupleExpr, dataset, bindings);
+            new SameTermFilterOptimizer().optimize(tupleExpr, dataset, bindings);
+            new QueryModelNormalizer().optimize(tupleExpr, dataset, bindings);
+            new QueryJoinOptimizer(new InternalEvaluationStatistics()).optimize(tupleExpr, dataset, bindings);
+            new IterativeEvaluationOptimizer().optimize(tupleExpr, dataset, bindings);
+            new FilterOptimizer().optimize(tupleExpr, dataset, bindings);
+            new OrderLimitOptimizer().optimize(tupleExpr, dataset, bindings);
+
+            return strategy.evaluate(tupleExpr, EmptyBindingSet.getInstance());
+
+        } catch (QueryEvaluationException e) {
+            throw new SailException(e);
+        }
+    }
+
+
+    /**
+     * Send a SPARQL query to a backend supporting direct SPARQL evaluation.
+     *
+     * @param query
+     * @return
+     * @throws SailException
+     */
+    public CloseableIteration<? extends BindingSet, QueryEvaluationException> directTupleQuery(String query) throws SailException {
+        log.info("Committing transaction before querying ...");
+        commitForQuery();
+
+        Sparql.SparqlRequest request = Sparql.SparqlRequest.newBuilder().setQuery(query).build();
+
+        return new ExceptionConvertingIteration<BindingSet, QueryEvaluationException>(
+                new ConvertingIteration<Sparql.SparqlResponse, BindingSet, SailException>(
+                        new ClosableResponseStream<>(sparqlServiceStub, SparqlServiceGrpc.METHOD_TUPLE_QUERY, request)) {
+                    @Override
+                    protected BindingSet convert(Sparql.SparqlResponse sourceObject) throws SailException {
+                        MapBindingSet result = new MapBindingSet();
+                        for (Sparql.SparqlResponse.Binding b :sourceObject.getBindingList()) {
+
+                            Value v = null;
+                            switch (b.getValue().getValuesCase()) {
+                                case RESOURCE:
+                                    switch(b.getValue().getResource().getResourcesCase()) {
+                                        case URI:
+                                            v = new ProtoURI(b.getValue().getResource().getUri());
+                                        case BNODE:
+                                            v = new ProtoBNode(b.getValue().getResource().getBnode());
+                                    }
+                                case LITERAL:
+                                    switch(b.getValue().getLiteral().getLiteralsCase()) {
+                                        case STRINGLITERAL:
+                                            v = new ProtoStringLiteral(b.getValue().getLiteral().getStringliteral());
+                                        case DATALITERAL:
+                                            v = new ProtoDatatypeLiteral(b.getValue().getLiteral().getDataliteral());
+                                    }
+                            }
+                            result.addBinding(b.getVariable(), v);
+                        }
+                        return result;
+                    }
+                }) {
+            @Override
+            protected QueryEvaluationException convert(Exception e) {
+                return new QueryEvaluationException(e);
+            }
+        };
+    }
+
+    @Override
+    protected CloseableIteration<? extends Resource, SailException> getContextIDsInternal() throws SailException {
+        log.info("Committing transaction before querying ...");
+        commitForQuery();
+
+        return wrapResourceIterator(stub.getContexts(Empty.getDefaultInstance()));
+    }
+
+    @Override
+    protected CloseableIteration<? extends Statement, SailException> getStatementsInternal(Resource subj, URI pred, Value obj, boolean includeInferred, Resource... contexts) throws SailException {
+        log.info("Committing transaction before querying ...");
+        commitForQuery();
+
+        if (contexts.length > 0) {
+            ArrayList<CloseableIteration<? extends Statement, SailException>> iterators = new ArrayList<>(contexts.length);
+            for (Resource ctx : contexts) {
+                final ProtoStatement pattern = new ProtoStatement(subj, pred, obj, ctx);
+                iterators.add(new DelayedIteration<Statement, SailException>() {
+                    @Override
+                    protected Iteration<? extends Statement, ? extends SailException> createIteration() throws SailException {
+                        return wrapStatementIterator(new ClosableResponseStream<>(sailServiceStub, SailServiceGrpc.METHOD_GET_STATEMENTS, pattern.getMessage()));
+                    }
+                });
+            }
+            return new UnionIteration<>(iterators);
+        }
+
+        ProtoStatement pattern = new ProtoStatement(subj, pred, obj, null);
+
+        return wrapStatementIterator(new ClosableResponseStream<>(sailServiceStub, SailServiceGrpc.METHOD_GET_STATEMENTS, pattern.getMessage()));
+    }
+
+    @Override
+    protected long sizeInternal(Resource... contexts) throws SailException {
+        log.info("Committing transaction before querying ...");
+        commitForQuery();
+
+        Sail.ContextRequest.Builder builder = Sail.ContextRequest.newBuilder();
+        for (Resource ctx : contexts) {
+            if (ctx instanceof URI) {
+                builder.addContextBuilder().getUriBuilder().setUri(ctx.stringValue());
+            } else if(ctx instanceof BNode) {
+                builder.addContextBuilder().getBnodeBuilder().setId(ctx.stringValue());
+            }
+        }
+
+        Int64Value v = stub.size(builder.build());
+        return v.getValue();
+    }
+
+    @Override
+    protected void startTransactionInternal() throws SailException {
+    }
+
+    protected void ensureTransaction() {
+        if (updateRequestObserver == null) {
+            finishFuture = SettableFuture.create();
+            updateRequestObserver = sailServiceStub.update(updateResponseObserver);
+        }
+    }
+
+    protected void commitForQuery() throws SailException {
+        if (isActive()) {
+            commitInternal();
+            startTransactionInternal();
+        }
+    }
+
+    @Override
+    protected void commitInternal() throws SailException {
+        if (updateRequestObserver != null) {
+            log.info("Start transaction commit");
+            updateRequestObserver.onCompleted();
+            try {
+                finishFuture.get();
+            } catch (InterruptedException | ExecutionException e) {
+                throw new SailException("Error while writing to server", e);
+            }
+            updateRequestObserver = null;
+            log.info("Transaction committed.");
+        }
+    }
+
+    @Override
+    protected void rollbackInternal() throws SailException {
+        updateRequestObserver.onError(new Exception("transaction rollback"));
+        updateRequestObserver = null;
+    }
+
+    @Override
+    protected void removeStatementsInternal(Resource subj, URI pred, Value obj, Resource... contexts) throws SailException {
+        log.info("Removing statements.");
+        commitForQuery();
+        ensureTransaction();
+
+        if (contexts.length > 0) {
+            for (Resource ctx : contexts) {
+                ProtoStatement stmt = new ProtoStatement(subj, pred, obj, ctx);
+                Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setStmtRemoved(stmt.getMessage()).build();
+                updateRequestObserver.onNext(u);
+            }
+        } else {
+            ProtoStatement stmt = new ProtoStatement(subj, pred, obj, null);
+            Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setStmtRemoved(stmt.getMessage()).build();
+            updateRequestObserver.onNext(u);
+        }
+    }
+
+    @Override
+    protected void clearInternal(Resource... contexts) throws SailException {
+        log.info("Clearing statements.");
+        commitForQuery();
+        ensureTransaction();
+
+        if (contexts.length > 0) {
+            for (Resource ctx : contexts) {
+                ProtoStatement stmt = new ProtoStatement(null, null, null, ctx);
+                Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setStmtRemoved(stmt.getMessage()).build();
+                updateRequestObserver.onNext(u);
+            }
+        } else {
+            ProtoStatement stmt = new ProtoStatement(null, null, null, null);
+            Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setStmtRemoved(stmt.getMessage()).build();
+            updateRequestObserver.onNext(u);
+        }
+    }
+
+    @Override
+    protected CloseableIteration<? extends Namespace, SailException> getNamespacesInternal() throws SailException {
+        log.info("Getting namespaces.");
+        commitForQuery();
+
+        Empty pattern = Empty.getDefaultInstance();
+        return wrapNamespaceIterator(stub.getNamespaces(pattern));
+    }
+
+    @Override
+    protected String getNamespaceInternal(String prefix) throws SailException {
+        log.info("Committing transaction before querying ...");
+        commitForQuery();
+
+        Model.Namespace pattern = Model.Namespace.newBuilder().setPrefix(prefix).build();
+        try {
+            return stub.getNamespace(pattern).getUri();
+        } catch (io.grpc.StatusRuntimeException ex) {
+            if (ex.getStatus().getCode() == Status.Code.NOT_FOUND) {
+                return null;
+            }
+            throw new SailException(ex);
+        }
+    }
+
+    @Override
+    protected void setNamespaceInternal(String prefix, String name) throws SailException {
+        log.info("Setting namespace {} = {}.", prefix, name);
+        ensureTransaction();
+
+        ProtoNamespace ns = new ProtoNamespace(prefix, name);
+        Sail.UpdateRequest u = Sail.UpdateRequest.newBuilder().setNsAdded(ns.getMessage()).build();
+        updateRequestObserver.onNext(u);
+
+    }
+
+    @Override
+    protected void removeNamespaceInternal(String prefix) throws SailException {
+        log.info("Removing namespace {}.", prefix);
+        commitForQuery();
+        ensureTransaction();
+
+        Sail.UpdateRequest.Builder builder = Sail.UpdateRequest.newBuilder();
+        builder.getNsRemovedBuilder().setPrefix(prefix);
+        updateRequestObserver.onNext(builder.build());
+    }
+
+    @Override
+    protected void clearNamespacesInternal() throws SailException {
+        log.info("Clearing namespaces.");
+        commitForQuery();
+        ensureTransaction();
+
+        Sail.UpdateRequest.Builder builder = Sail.UpdateRequest.newBuilder();
+        builder.setNsRemoved(Model.Namespace.getDefaultInstance());
+        updateRequestObserver.onNext(builder.build());
+    }
+
+    private static CloseableIteration<Statement, SailException> wrapStatementIterator(CloseableIteration<Model.Statement, SailException> it) {
+        return new ConvertingIteration<Model.Statement, Statement, SailException>(it) {
+            @Override
+            protected Statement convert(Model.Statement sourceObject) throws SailException {
+                return new ProtoStatement(sourceObject);
+            }
+        };
+    }
+
+
+    private static CloseableIteration<Statement, SailException> wrapStatementIterator(Iterator<Model.Statement> it) {
+        return new ConvertingIteration<Model.Statement, Statement, SailException>(
+                new IteratorIteration<Model.Statement, SailException>(it)) {
+            @Override
+            protected Statement convert(Model.Statement sourceObject) throws SailException {
+                return new ProtoStatement(sourceObject);
+            }
+        };
+    }
+
+    private static CloseableIteration<Namespace, SailException> wrapNamespaceIterator(Iterator<Model.Namespace> it) {
+        return new ConvertingIteration<Model.Namespace, Namespace, SailException>(
+                new IteratorIteration<Model.Namespace, SailException>(it)) {
+            @Override
+            protected Namespace convert(Model.Namespace sourceObject) throws SailException {
+                return new ProtoNamespace(sourceObject);
+            }
+        };
+    }
+
+    private static CloseableIteration<Resource, SailException> wrapResourceIterator(Iterator<Model.Resource> it) {
+        return new ConvertingIteration<Model.Resource, Resource, SailException>(
+                new IteratorIteration<Model.Resource, SailException>(it)) {
+            @Override
+            protected Resource convert(Model.Resource sourceObject) throws SailException {
+                switch (sourceObject.getResourcesCase()) {
+                    case URI:
+                        return new ProtoURI(sourceObject.getUri());
+                    case BNODE:
+                        return new ProtoBNode(sourceObject.getBnode());
+                }
+                return null;
+            }
+        };
+    }
+
+    protected static class InternalEvaluationStatistics extends EvaluationStatistics {
+
+        public InternalEvaluationStatistics() {
+        }
+
+        @Override
+        protected CardinalityCalculator createCardinalityCalculator() {
+            return new InternalCardinalityCalculator();
+        }
+
+        protected class InternalCardinalityCalculator extends CardinalityCalculator {
+
+            @Override
+            protected double getCardinality(StatementPattern sp) {
+                return super.getCardinality(sp);
+            }
+
+            protected Value getConstantValue(Var var) {
+                return (var != null) ? var.getValue() : null;
+            }
+        }
+    }
+
+    /**
+     * A helper class using a CMarmottaSailConnection as triple source for SPARQL queries.
+     */
+    private static class CMarmottaTripleSource implements TripleSource {
+
+        private boolean inferred;
+        private OstrichSailConnection connection;
+
+        private CMarmottaTripleSource(OstrichSailConnection connection, boolean inferred) {
+            this.inferred   = inferred;
+            this.connection = connection;
+        }
+
+        @Override
+        public CloseableIteration<? extends Statement, QueryEvaluationException> getStatements(Resource subj, URI pred, Value obj, Resource... contexts) throws QueryEvaluationException {
+            try {
+                return new ExceptionConvertingIteration<Statement, QueryEvaluationException>(
+                        connection.getStatements(subj, pred, obj, inferred, contexts)
+                ) {
+                    @Override
+                    protected QueryEvaluationException convert(Exception e) {
+                        if (e instanceof ClosedByInterruptException) {
+                            return new QueryInterruptedException(e);
+                        }
+                        else if (e instanceof IOException) {
+                            return new QueryEvaluationException(e);
+                        }
+                        else if (e instanceof SailException) {
+                            return new QueryEvaluationException(e);
+                        }
+                        else if (e instanceof RuntimeException) {
+                            throw (RuntimeException)e;
+                        }
+                        else if (e == null) {
+                            throw new IllegalArgumentException("e must not be null");
+                        }
+                        else {
+                            throw new IllegalArgumentException("Unexpected exception type: " + e.getClass(),e);
+                        }
+                    }
+                };
+            } catch (SailException ex) {
+                throw new QueryEvaluationException(ex);
+            }
+        }
+
+        @Override
+        public ValueFactory getValueFactory() {
+            return new OstrichValueFactory();
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichValueFactory.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichValueFactory.java b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichValueFactory.java
new file mode 100644
index 0000000..054492d
--- /dev/null
+++ b/libraries/ostrich/client/src/main/java/org/apache/marmotta/ostrich/sail/OstrichValueFactory.java
@@ -0,0 +1,260 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.sail;
+
+import org.apache.marmotta.commons.sesame.model.LiteralCommons;
+import org.apache.marmotta.commons.util.DateUtils;
+import org.apache.marmotta.ostrich.model.*;
+import org.openrdf.model.*;
+
+import javax.xml.datatype.XMLGregorianCalendar;
+import java.util.Date;
+import java.util.Random;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class OstrichValueFactory implements ValueFactory {
+
+    private Random anonIdGenerator;
+
+    public OstrichValueFactory() {
+        this.anonIdGenerator = new Random();
+    }
+
+    /**
+     * Creates a new bNode.
+     *
+     * @return An object representing the bNode.
+     */
+    @Override
+    public BNode createBNode() {
+        return new ProtoBNode(Long.toHexString(System.currentTimeMillis())+Integer.toHexString(anonIdGenerator.nextInt(1000)));
+    }
+
+    /**
+     * Creates a new URI from the supplied string-representation.
+     *
+     * @param uri A string-representation of a URI.
+     * @return An object representing the URI.
+     */
+    @Override
+    public URI createURI(String uri) {
+        return new ProtoURI(uri);
+    }
+
+    /**
+     * Creates a new URI from the supplied namespace and local name. Calling this
+     * method is funtionally equivalent to calling {@link #createURI(String)
+     * createURI(namespace+localName)}, but allows the ValueFactory to reuse
+     * supplied namespace and local name strings whenever possible. Note that the
+     * values returned by {@link URI#getNamespace()} and
+     * {@link URI#getLocalName()} are not necessarily the same as the values that
+     * are supplied to this method.
+     *
+     * @param namespace The URI's namespace.
+     * @param localName The URI's local name.
+     * @throws IllegalArgumentException If the supplied namespace and localname do not resolve to a legal
+     *                                  (absolute) URI.
+     */
+    @Override
+    public URI createURI(String namespace, String localName) {
+        return new ProtoURI(namespace+localName);
+    }
+
+    /**
+     * Creates a new blank node with the given node identifier.
+     *
+     * @param nodeID The blank node identifier.
+     * @return An object representing the blank node.
+     */
+    @Override
+    public BNode createBNode(String nodeID) {
+        return new ProtoBNode(nodeID);
+    }
+
+    /**
+     * Creates a new literal with the supplied label.
+     *
+     * @param label The literal's label.
+     */
+    @Override
+    public Literal createLiteral(String label) {
+        return new ProtoStringLiteral(label);
+    }
+
+    /**
+     * Creates a new literal with the supplied label and language attribute.
+     *
+     * @param label    The literal's label.
+     * @param language The literal's language attribute, or <tt>null</tt> if the literal
+     */
+    @Override
+    public Literal createLiteral(String label, String language) {
+        return new ProtoStringLiteral(label, language);
+    }
+
+    /**
+     * Creates a new literal with the supplied label and datatype.
+     *
+     * @param label    The literal's label.
+     * @param datatype The literal's datatype, or <tt>null</tt> if the literal doesn't
+     */
+    @Override
+    public Literal createLiteral(String label, URI datatype) {
+        return new ProtoDatatypeLiteral(label, datatype);
+    }
+
+    /**
+     * Creates a new <tt>xsd:boolean</tt>-typed literal representing the
+     * specified value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:boolean</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(boolean value) {
+        return new ProtoDatatypeLiteral(Boolean.toString(value), LiteralCommons.getXSDType(Boolean.class));
+    }
+
+    /**
+     * Creates a new <tt>xsd:byte</tt>-typed literal representing the specified
+     * value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:byte</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(byte value) {
+        return new ProtoDatatypeLiteral(Byte.toString(value), LiteralCommons.getXSDType(Byte.class));
+    }
+
+    /**
+     * Creates a new <tt>xsd:short</tt>-typed literal representing the specified
+     * value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:short</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(short value) {
+        return new ProtoDatatypeLiteral(Short.toString(value), LiteralCommons.getXSDType(Short.class));
+    }
+
+    /**
+     * Creates a new <tt>xsd:int</tt>-typed literal representing the specified
+     * value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:int</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(int value) {
+        return new ProtoDatatypeLiteral(Integer.toString(value), LiteralCommons.getXSDType(Integer.class));
+    }
+
+    /**
+     * Creates a new <tt>xsd:long</tt>-typed literal representing the specified
+     * value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:long</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(long value) {
+        return new ProtoDatatypeLiteral(Long.toString(value), LiteralCommons.getXSDType(Long.class));
+    }
+
+    /**
+     * Creates a new <tt>xsd:float</tt>-typed literal representing the specified
+     * value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:float</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(float value) {
+        return new ProtoDatatypeLiteral(Float.toString(value), LiteralCommons.getXSDType(Float.class));
+    }
+
+    /**
+     * Creates a new <tt>xsd:double</tt>-typed literal representing the specified
+     * value.
+     *
+     * @param value The value for the literal.
+     * @return An <tt>xsd:double</tt>-typed literal for the specified value.
+     */
+    @Override
+    public Literal createLiteral(double value) {
+        return new ProtoDatatypeLiteral(Double.toString(value), LiteralCommons.getXSDType(Double.class));
+    }
+
+    /**
+     * Creates a new literal representing the specified calendar that is typed
+     * using the appropriate XML Schema date/time datatype.
+     *
+     * @param calendar The value for the literal.
+     * @return An typed literal for the specified calendar.
+     */
+    @Override
+    public Literal createLiteral(XMLGregorianCalendar calendar) {
+        return new ProtoDatatypeLiteral(calendar.toXMLFormat(), LiteralCommons.getXSDType(Date.class));
+    }
+
+    /**
+     * Creates a new literal representing the specified date that is typed using
+     * the appropriate XML Schema date/time datatype.
+     *
+     * @param date
+     * @since 2.7.0
+     */
+    @Override
+    public Literal createLiteral(Date date) {
+        return new ProtoDatatypeLiteral(DateUtils.getXMLCalendar(date).toXMLFormat(), LiteralCommons.getXSDType(Date.class));
+    }
+
+    /**
+     * Creates a new statement with the supplied subject, predicate and object.
+     *
+     * @param subject   The statement's subject.
+     * @param predicate The statement's predicate.
+     * @param object    The statement's object.
+     * @return The created statement.
+     */
+    @Override
+    public Statement createStatement(Resource subject, URI predicate, Value object) {
+        return new ProtoStatement(subject, predicate, object, null);
+    }
+
+    /**
+     * Creates a new statement with the supplied subject, predicate and object
+     * and associated context.
+     *
+     * @param subject   The statement's subject.
+     * @param predicate The statement's predicate.
+     * @param object    The statement's object.
+     * @param context   The statement's context.
+     * @return The created statement.
+     */
+    @Override
+    public Statement createStatement(Resource subject, URI predicate, Value object, Resource context) {
+        return new ProtoStatement(subject, predicate, object, context);
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/CMarmottaSailTest.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/CMarmottaSailTest.java b/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/CMarmottaSailTest.java
new file mode 100644
index 0000000..f60cc4e
--- /dev/null
+++ b/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/CMarmottaSailTest.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.sail.test;
+
+import org.apache.marmotta.ostrich.sail.OstrichSail;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.openrdf.sail.RDFStoreTest;
+import org.openrdf.sail.Sail;
+import org.openrdf.sail.SailConnection;
+import org.openrdf.sail.SailException;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class CMarmottaSailTest extends RDFStoreTest {
+
+    private static Sail repository;
+
+    @BeforeClass
+    public static void setupClass() throws SailException {
+        repository = new OstrichSail("localhost", 10000) {
+            @Override
+            public void shutDown() throws SailException {
+                // Clear repository on shutdown, but otherwise reuse it.
+                SailConnection con = getConnection();
+                con.begin();
+                try {
+                    con.clear();
+                    con.clearNamespaces();
+                } finally {
+                    con.commit();
+                    con.close();
+                }
+            }
+        };
+        repository.initialize();
+    }
+
+    @AfterClass
+    public static void teardownClass() throws SailException {
+        repository.shutDown();
+    }
+
+
+    /**
+     * Gets an instance of the Sail that should be tested. The returned
+     * repository should already have been initialized.
+     *
+     * @return an initialized Sail.
+     * @throws SailException If the initialization of the repository failed.
+     */
+    @Override
+    protected Sail createSail() throws SailException {
+        return repository;
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/TestSailConnection.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/TestSailConnection.java b/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/TestSailConnection.java
new file mode 100644
index 0000000..b904b3c
--- /dev/null
+++ b/libraries/ostrich/client/src/test/java/org/apache/marmotta/ostrich/sail/test/TestSailConnection.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.sail.test;
+
+import org.apache.marmotta.ostrich.sail.OstrichSail;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.openrdf.model.Statement;
+import org.openrdf.model.URI;
+import org.openrdf.repository.Repository;
+import org.openrdf.repository.RepositoryConnection;
+import org.openrdf.repository.RepositoryException;
+import org.openrdf.repository.RepositoryResult;
+import org.openrdf.repository.sail.SailRepository;
+import org.openrdf.rio.RDFFormat;
+import org.openrdf.rio.RDFHandlerException;
+import org.openrdf.rio.RDFWriter;
+import org.openrdf.rio.Rio;
+
+/**
+ * Add file description here!
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class TestSailConnection {
+
+    private static Repository repository;
+
+    @BeforeClass
+    public static void setup() throws RepositoryException {
+        repository = new SailRepository(new OstrichSail("localhost", 10000));
+        repository.initialize();
+    }
+
+    @AfterClass
+    public static void teardown() throws RepositoryException {
+        repository.shutDown();
+    }
+
+    @Test
+    public void testQuery() throws RepositoryException, RDFHandlerException {
+        RDFWriter writer = Rio.createWriter(RDFFormat.TURTLE, System.out);
+        URI s = repository.getValueFactory().createURI("http://umbel.org/umbel/rc/Zyban");
+
+        RepositoryConnection con = repository.getConnection();
+        try {
+            con.begin();
+            writer.startRDF();
+
+            RepositoryResult<Statement> it = con.getStatements(s, null, null, true);
+            while (it.hasNext()) {
+                writer.handleStatement(it.next());
+            }
+
+            writer.endRDF();
+
+            con.commit();
+        } catch(RepositoryException ex) {
+            con.rollback();
+        } finally {
+            con.close();
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/pom.xml
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/pom.xml b/libraries/ostrich/model/pom.xml
new file mode 100644
index 0000000..893346c
--- /dev/null
+++ b/libraries/ostrich/model/pom.xml
@@ -0,0 +1,219 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.marmotta</groupId>
+        <artifactId>cmarmotta-parent</artifactId>
+        <version>3.4.0-SNAPSHOT</version>
+        <relativePath>../</relativePath>
+    </parent>
+
+    <artifactId>ostrich-model</artifactId>
+    <packaging>jar</packaging>
+
+    <name>Ostrich Triplestore: Model</name>
+    <description>Sesame Model wrapper around C++ Marmotta Proto Model</description>
+
+    <pluginRepositories>
+        <pluginRepository>
+            <releases>
+                <updatePolicy>never</updatePolicy>
+            </releases>
+            <snapshots>
+                <enabled>false</enabled>
+            </snapshots>
+            <id>central</id>
+            <name>Central Repository</name>
+            <url>https://repo.maven.apache.org/maven2</url>
+        </pluginRepository>
+        <pluginRepository>
+            <id>protoc-plugin</id>
+            <url>https://dl.bintray.com/sergei-ivanov/maven/</url>
+        </pluginRepository>
+    </pluginRepositories>
+    <build>
+        <extensions>
+            <extension>
+                <groupId>kr.motd.maven</groupId>
+                <artifactId>os-maven-plugin</artifactId>
+                <version>1.3.0.Final</version>
+            </extension>
+        </extensions>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-jar-plugin</artifactId>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>test-jar</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>com.google.protobuf.tools</groupId>
+                <artifactId>maven-protoc-plugin</artifactId>
+                <version>0.4.2</version>
+                <configuration>
+                    <!--
+                      The version of protoc must match protobuf-java. If you don't depend on
+                      protobuf-java directly, you will be transitively depending on the
+                      protobuf-java version that grpc depends on.
+                    -->
+                    <protocArtifact>com.google.protobuf:protoc:3.0.0-beta-1:exe:${os.detected.classifier}</protocArtifact>
+                    <protoSourceRoot>${basedir}/../backend/model</protoSourceRoot>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>compile</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+
+    <dependencies>
+        <!-- Logging -->
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>log4j-over-slf4j</artifactId>
+        </dependency>
+
+
+        <!-- Protobuf -->
+        <dependency>
+            <groupId>com.google.protobuf</groupId>
+            <artifactId>protobuf-java</artifactId>
+            <version>3.0.0-beta-1</version>
+        </dependency>
+
+        <!-- Sesame dependencies -->
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-model</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-sail-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-sail-inferencer</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-queryalgebra-model</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-queryalgebra-evaluation</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>joda-time</groupId>
+            <artifactId>joda-time</artifactId>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>marmotta-commons</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.marmotta</groupId>
+            <artifactId>marmotta-model-vocabs</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+        </dependency>
+
+
+        <!-- Testing -->
+        <dependency>
+            <artifactId>junit</artifactId>
+            <groupId>junit</groupId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <artifactId>hamcrest-core</artifactId>
+            <groupId>org.hamcrest</groupId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <artifactId>hamcrest-library</artifactId>
+            <groupId>org.hamcrest</groupId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-core</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>ch.qos.logback</groupId>
+            <artifactId>logback-classic</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-rio-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-rio-rdfxml</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-repository-sail</artifactId>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.openrdf.sesame</groupId>
+            <artifactId>sesame-store-testsuite</artifactId>
+            <scope>test</scope>
+        </dependency>
+        
+        <dependency>
+            <groupId>com.google.code.tempus-fugit</groupId>
+            <artifactId>tempus-fugit</artifactId>
+            <scope>test</scope>
+        </dependency>
+
+
+    </dependencies>
+    
+</project>

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoBNode.java
----------------------------------------------------------------------
diff --git a/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoBNode.java b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoBNode.java
new file mode 100644
index 0000000..3b6a750
--- /dev/null
+++ b/libraries/ostrich/model/src/main/java/org/apache/marmotta/ostrich/model/ProtoBNode.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.marmotta.ostrich.model;
+
+import org.apache.marmotta.ostrich.model.proto.Model;
+import org.openrdf.model.BNode;
+import org.openrdf.model.Literal;
+import org.openrdf.model.URI;
+
+/**
+ * An implementation of a Sesame BNode backed by a protocol buffer.
+ *
+ * @author Sebastian Schaffert (sschaffert@apache.org)
+ */
+public class ProtoBNode implements BNode {
+
+    private Model.BNode message;
+
+    public ProtoBNode(String id) {
+        message = Model.BNode.newBuilder().setId(id).build();
+    }
+
+    public ProtoBNode(Model.BNode message) {
+        this.message = message;
+    }
+
+    public Model.BNode getMessage() {
+        return message;
+    }
+
+    /**
+     * retrieves this blank node's identifier.
+     *
+     * @return A blank node identifier.
+     */
+    @Override
+    public String getID() {
+        return message.getId();
+    }
+
+    /**
+     * Returns the String-value of a <tt>Value</tt> object. This returns either
+     * a {@link Literal}'s label, a {@link URI}'s URI or a {@link BNode}'s ID.
+     */
+    @Override
+    public String stringValue() {
+        return message.getId();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+
+        if(o instanceof BNode) {
+            return this.stringValue().equals(((BNode)o).stringValue());
+        }
+        return false;
+    }
+
+    @Override
+    public int hashCode() {
+        return stringValue().hashCode();
+    }
+}


[7/7] marmotta git commit: move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"

Posted by ss...@apache.org.
move experimental C++ LevelDB backend into Apache Marmotta main, and named the new module "ostrich" as an analogy to "kiwi"


Project: http://git-wip-us.apache.org/repos/asf/marmotta/repo
Commit: http://git-wip-us.apache.org/repos/asf/marmotta/commit/0ff22a0c
Tree: http://git-wip-us.apache.org/repos/asf/marmotta/tree/0ff22a0c
Diff: http://git-wip-us.apache.org/repos/asf/marmotta/diff/0ff22a0c

Branch: refs/heads/develop
Commit: 0ff22a0c3e086efdae4f25aa11aa27ff987c99bc
Parents: ed387b9
Author: Sebastian Schaffert <ss...@apache.org>
Authored: Sat Dec 12 17:07:51 2015 +0100
Committer: Sebastian Schaffert <ss...@apache.org>
Committed: Sat Dec 12 17:07:51 2015 +0100

----------------------------------------------------------------------
 launchers/marmotta-webapp/pom.xml               |    22 +
 libraries/ostrich/backend/CMakeLists.txt        |    31 +
 libraries/ostrich/backend/README.md             |    87 +
 libraries/ostrich/backend/client/CMakeLists.txt |     8 +
 libraries/ostrich/backend/client/client.cc      |   275 +
 .../ostrich/backend/cmake/FindGFlags.cmake      |    48 +
 libraries/ostrich/backend/cmake/FindGLog.cmake  |    18 +
 libraries/ostrich/backend/cmake/FindGRPC.cmake  |    67 +
 .../ostrich/backend/cmake/FindLevelDB.cmake     |    18 +
 .../ostrich/backend/cmake/FindRAPTOR.cmake      |   103 +
 .../ostrich/backend/cmake/FindRasqal.cmake      |    99 +
 .../ostrich/backend/cmake/FindTcmalloc.cmake    |    39 +
 libraries/ostrich/backend/model/CMakeLists.txt  |     6 +
 libraries/ostrich/backend/model/model.proto     |    77 +
 libraries/ostrich/backend/model/rdf_model.cc    |   348 +
 libraries/ostrich/backend/model/rdf_model.h     |   709 +
 .../ostrich/backend/model/rdf_operators.cc      |    61 +
 libraries/ostrich/backend/model/rdf_operators.h |   240 +
 libraries/ostrich/backend/parser/CMakeLists.txt |     4 +
 libraries/ostrich/backend/parser/rdf_parser.cc  |   175 +
 libraries/ostrich/backend/parser/rdf_parser.h   |    87 +
 .../ostrich/backend/persistence/CMakeLists.txt  |    10 +
 .../backend/persistence/leveldb_persistence.cc  |   685 +
 .../backend/persistence/leveldb_persistence.h   |   185 +
 .../backend/persistence/leveldb_server.cc       |    73 +
 .../backend/persistence/leveldb_service.cc      |   254 +
 .../backend/persistence/leveldb_service.h       |   120 +
 .../backend/persistence/leveldb_sparql.cc       |   114 +
 .../backend/persistence/leveldb_sparql.h        |    52 +
 .../ostrich/backend/serializer/CMakeLists.txt   |     4 +
 .../ostrich/backend/serializer/serializer.cc    |    49 +
 .../ostrich/backend/serializer/serializer.h     |    54 +
 .../backend/serializer/serializer_base.cc       |    64 +
 .../backend/serializer/serializer_base.h        |   104 +
 .../backend/serializer/serializer_proto.cc      |    54 +
 .../backend/serializer/serializer_proto.h       |    50 +
 .../backend/serializer/serializer_raptor.cc     |   266 +
 .../backend/serializer/serializer_raptor.h      |    55 +
 .../ostrich/backend/service/CMakeLists.txt      |     9 +
 libraries/ostrich/backend/service/sail.proto    |   102 +
 libraries/ostrich/backend/service/sparql.proto  |    45 +
 .../ostrich/backend/sharding/CMakeLists.txt     |    10 +
 libraries/ostrich/backend/sharding/server.cc    |    66 +
 libraries/ostrich/backend/sharding/sharding.cc  |   335 +
 libraries/ostrich/backend/sharding/sharding.h   |   174 +
 libraries/ostrich/backend/sparql/CMakeLists.txt |     7 +
 .../ostrich/backend/sparql/rasqal_adapter.cc    |   299 +
 .../ostrich/backend/sparql/rasqal_adapter.h     |   107 +
 .../ostrich/backend/sparql/rasqal_model.cc      |   193 +
 libraries/ostrich/backend/sparql/rasqal_model.h |    74 +
 libraries/ostrich/backend/test/CMakeLists.txt   |    12 +
 libraries/ostrich/backend/test/SparqlTest.cc    |   266 +
 libraries/ostrich/backend/test/StatementTest.cc |   135 +
 libraries/ostrich/backend/test/gtest-all.cc     |  9592 ++++++++
 libraries/ostrich/backend/test/gtest.h          | 20061 +++++++++++++++++
 libraries/ostrich/backend/test/main.cc          |    11 +
 libraries/ostrich/backend/util/CMakeLists.txt   |     3 +
 libraries/ostrich/backend/util/iterator.h       |   131 +
 libraries/ostrich/backend/util/murmur3.cc       |   313 +
 libraries/ostrich/backend/util/murmur3.h        |    18 +
 libraries/ostrich/backend/util/split.cc         |    40 +
 libraries/ostrich/backend/util/split.h          |    38 +
 libraries/ostrich/client/pom.xml                |   234 +
 .../ostrich/sail/ClosableResponseStream.java    |   163 +
 .../marmotta/ostrich/sail/OstrichSail.java      |    87 +
 .../ostrich/sail/OstrichSailConnection.java     |   529 +
 .../ostrich/sail/OstrichValueFactory.java       |   260 +
 .../ostrich/sail/test/CMarmottaSailTest.java    |    74 +
 .../ostrich/sail/test/TestSailConnection.java   |    80 +
 libraries/ostrich/model/pom.xml                 |   219 +
 .../marmotta/ostrich/model/ProtoBNode.java      |    79 +
 .../ostrich/model/ProtoDatatypeLiteral.java     |   106 +
 .../ostrich/model/ProtoLiteralBase.java         |   187 +
 .../marmotta/ostrich/model/ProtoNamespace.java  |    86 +
 .../marmotta/ostrich/model/ProtoStatement.java  |   221 +
 .../ostrich/model/ProtoStringLiteral.java       |   101 +
 .../apache/marmotta/ostrich/model/ProtoURI.java |   113 +
 .../ostrich/model/test/StatementTest.java       |    68 +
 .../marmotta/ostrich/model/test/URITest.java    |    57 +
 .../model/src/test/resources/logback.xml        |    28 +
 libraries/ostrich/pom.xml                       |    50 +
 libraries/pom.xml                               |     9 +
 .../backends/marmotta-backend-ostrich/pom.xml   |   175 +
 .../backend/ostrich/OstrichProvider.java        |    94 +
 .../src/main/resources/META-INF/beans.xml       |    28 +
 .../main/resources/config-defaults.properties   |    24 +
 .../resources/config-descriptions.properties    |    26 +
 .../src/main/resources/kiwi-module.properties   |    38 +
 .../src/main/resources/web/admin/about.html     |    36 +
 .../main/resources/web/admin/configuration.html |    52 +
 platform/backends/pom.xml                       |     6 +
 91 files changed, 39986 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/launchers/marmotta-webapp/pom.xml
----------------------------------------------------------------------
diff --git a/launchers/marmotta-webapp/pom.xml b/launchers/marmotta-webapp/pom.xml
index c9d0cd6..e02e44a 100644
--- a/launchers/marmotta-webapp/pom.xml
+++ b/launchers/marmotta-webapp/pom.xml
@@ -415,6 +415,28 @@
             </dependencies>
         </profile>
 
+        <profile>
+            <id>ostrich</id>
+            <activation>
+                <property>
+                    <name>marmotta.backend</name>
+                    <value>ostrich</value>
+                </property>
+            </activation>
+            <dependencies>
+                <dependency>
+                    <groupId>org.apache.marmotta</groupId>
+                    <artifactId>marmotta-backend-ostrich</artifactId>
+                    <version>${project.version}</version>
+                </dependency>
+                <dependency>
+                    <groupId>org.apache.marmotta</groupId>
+                    <artifactId>marmotta-ldcache-file</artifactId>
+                    <version>${project.version}</version>
+                </dependency>
+            </dependencies>
+        </profile>
+
 
         <!-- Caching Backends for KiWi -->
 

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/CMakeLists.txt b/libraries/ostrich/backend/CMakeLists.txt
new file mode 100644
index 0000000..d8232b7
--- /dev/null
+++ b/libraries/ostrich/backend/CMakeLists.txt
@@ -0,0 +1,31 @@
+cmake_minimum_required(VERSION 3.0)
+project(Marmotta)
+
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -g")
+set(PROTOBUF_IMPORT_DIRS "${CMAKE_SOURCE_DIR}/model")
+set(USE_TCMALLOC TRUE)
+
+find_package (Threads REQUIRED)
+find_package (RAPTOR REQUIRED)
+find_package (Rasqal REQUIRED)
+find_package (GFlags REQUIRED)
+find_package (Protobuf REQUIRED)
+find_package (GRPC REQUIRED)
+find_package (LevelDB REQUIRED)
+find_package (GLog REQUIRED)
+find_package (Tcmalloc)
+
+add_definitions(-DNDEBUG)
+
+add_subdirectory(util)
+add_subdirectory(model)
+add_subdirectory(sparql)
+add_subdirectory(service)
+add_subdirectory(parser)
+add_subdirectory(serializer)
+add_subdirectory(persistence)
+add_subdirectory(sharding)
+add_subdirectory(client)
+add_subdirectory(test)
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/README.md
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/README.md b/libraries/ostrich/backend/README.md
new file mode 100644
index 0000000..047a968
--- /dev/null
+++ b/libraries/ostrich/backend/README.md
@@ -0,0 +1,87 @@
+# Apache Marmotta LevelDB/C++ Backend
+
+This repository implements an experimental high-performance backend for Apache Marmotta
+using LevelDB as storage and gRPC as communication channel between the Java frontend
+and the C++ backend. 
+
+If it proves to be useful, the repository will eventually be merged into the main 
+development branch of Apache Marmotta
+
+## Dependencies (C++)
+
+To compile the C++ backend, you need to have the following dependencies installed:
+
+  * libraptor (used for parsing/serializing in C++)
+  * librasqal (used for server-side SPARQL evaluation)
+  * libglog (logging)
+  * libgflags (command line arguments)
+  * libleveldb (database backend)
+  * libgrpc (gRPC runtime)
+  * libprotobuf (messaging, data model)
+
+With the exception of libgrpc and libprotobuf, all libraries are available in Linux repositories.
+Debian:
+
+    apt-get install libraptor2-dev librasqal3-dev libgoogle-glog-dev libgflags-dev libleveldb-dev
+    
+The backend uses the new Proto 3 format and the gRPC SDK. These need to be installed separately,
+please follow the instructions at [https://github.com/grpc/grpc](https://github.com/grpc/grpc/blob/master/INSTALL).
+
+
+## Compilation (C++)
+
+The backend uses cmake to compile the modules. Create a new directory `build`, run cmake, and run make:
+
+    mkdir build && cd build
+    cmake ..
+    make
+
+## Compilation (Java)
+
+The frontend is compiled with Maven and depends on many Apache Marmotta modules to work. Build it with
+
+    cd java
+    mvn clean install
+    
+## Running C++ Backend
+
+Start the backend from the cmake build directory as follows:
+
+    ./service/marmotta_persistence -db /path/to/database -port 10000
+    
+The binary accepts many different options. Please see `--help` for details.
+
+## Running Sharding
+
+The repository contains an experimental implementation of a sharding server that proxies and 
+distributes requests based on a hash calculation over statements. In heavy load environments,
+this is potentially much faster than running a single persistence backend. The setup requires
+several persistence backends (shards) and a sharding proxy. To experiment, you can start these
+on the same machine as follows:
+
+    ./service/marmotta_persistence -db /path/to/shard1 -port 10001
+    ./service/marmotta_persistence -db /path/to/shard2 -port 10002
+    ./sharding/marmotta_sharding --port 10000 --backends localhost:10001,localhost:10002
+
+You can then access the sharding server through Marmotta like the persistence server. Running all instances
+on the same host is only useful for testing. In production environments, you would of course run all three
+(or more) instances on different hosts. Note that the number and order of backends should not change once
+data has been imported, because otherwise the hashing algorithm will do the wrong thing.
+
+## Running Apache Marmotta 
+
+A preconfigured version of Apache Marmotta is available in `java/webapp`. It connects to 
+`localhost:10000` by default and can be started with:
+
+    mvn tomcat7:run
+    
+Afterwards, point your browser to `localhost:8080`.
+
+## Command Line Client
+
+A C++ command line client is available for very fast bulk imports and simple queries. To import
+a large turtle file, run:
+
+    ./client/marmotta_client --format=turtle import file.ttl
+
+The client connects by default to `localhost:10000` (change with `--host` and `--port` flags).

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/client/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/client/CMakeLists.txt b/libraries/ostrich/backend/client/CMakeLists.txt
new file mode 100644
index 0000000..88ad11a
--- /dev/null
+++ b/libraries/ostrich/backend/client/CMakeLists.txt
@@ -0,0 +1,8 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_BINARY_DIR}/../model)
+
+add_executable(marmotta_client client.cc)
+target_link_libraries(marmotta_client
+        marmotta_model marmotta_service marmotta_parser marmotta_serializer
+        ${GFLAGS_LIBRARY}
+        ${CMAKE_THREAD_LIBS_INIT} ${PROTOBUF_LIBRARIES} ${GRPC_LIBRARIES})
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/client/client.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/client/client.cc b/libraries/ostrich/backend/client/client.cc
new file mode 100644
index 0000000..16c9022
--- /dev/null
+++ b/libraries/ostrich/backend/client/client.cc
@@ -0,0 +1,275 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fstream>
+
+#include <grpc/grpc.h>
+#include <grpc++/channel.h>
+#include <grpc++/client_context.h>
+#include <grpc++/create_channel.h>
+#include <grpc++/security/credentials.h>
+#include <grpc++/support/sync_stream.h>
+
+#include <google/protobuf/text_format.h>
+#include <google/protobuf/empty.pb.h>
+#include <google/protobuf/wrappers.pb.h>
+
+#include <gflags/gflags.h>
+
+#include "model/rdf_model.h"
+#include "parser/rdf_parser.h"
+#include "serializer/serializer.h"
+#include "service/sail.pb.h"
+#include "service/sail.grpc.pb.h"
+#include "service/sparql.pb.h"
+#include "service/sparql.grpc.pb.h"
+
+
+using grpc::Channel;
+using grpc::ClientContext;
+using grpc::ClientReader;
+using grpc::ClientReaderWriter;
+using grpc::ClientWriter;
+using grpc::Status;
+using google::protobuf::TextFormat;
+
+using namespace marmotta;
+namespace svc = marmotta::service::proto;
+namespace spq = marmotta::sparql::proto;
+
+// A STL iterator wrapper around a client reader.
+template <class T, class Proto>
+class ClientReaderIterator : public util::CloseableIterator<T> {
+ public:
+    ClientReaderIterator() : finished(true) { }
+
+    ClientReaderIterator(ClientReader<Proto>* r) : reader(r), finished(false) {
+        // Immediately move to first element.
+        operator++();
+    }
+
+    ClientReaderIterator& operator++() override {
+        if (!finished) {
+            finished = !reader->Read(&buffer);
+            current = T(buffer);
+            if (finished) {
+                reader->Finish();
+            }
+        }
+        return *this;
+    }
+
+    T& operator*() override {
+        return current;
+    }
+
+    T* operator->() override {
+        return &current;
+    }
+
+    bool hasNext() override {
+        return !finished;
+    }
+
+ private:
+    ClientReader<Proto>* reader;
+    Proto buffer;
+    T current;
+    bool finished;
+};
+
+typedef ClientReaderIterator<rdf::Statement, rdf::proto::Statement> StatementReader;
+typedef ClientReaderIterator<rdf::Namespace, rdf::proto::Namespace> NamespaceReader;
+
+class MarmottaClient {
+ public:
+    MarmottaClient(const std::string& server)
+            : stub_(svc::SailService::NewStub(
+            grpc::CreateChannel(server, grpc::InsecureChannelCredentials()))),
+              sparql_(spq::SparqlService::NewStub(
+                      grpc::CreateChannel(server, grpc::InsecureChannelCredentials()))){}
+
+    void importDataset(std::istream& in, parser::Format format) {
+        ClientContext nscontext, stmtcontext;
+
+        google::protobuf::Int64Value nsstats;
+        google::protobuf::Int64Value stmtstats;
+
+        std::unique_ptr<ClientWriter<rdf::proto::Namespace> > nswriter(
+                stub_->AddNamespaces(&nscontext, &nsstats));
+        std::unique_ptr<ClientWriter<rdf::proto::Statement> > stmtwriter(
+                stub_->AddStatements(&stmtcontext, &stmtstats));
+
+        parser::Parser p("http://www.example.com", format);
+        p.setStatementHandler([&stmtwriter](const rdf::Statement& stmt) {
+            stmtwriter->Write(stmt.getMessage());
+        });
+        p.setNamespaceHandler([&nswriter](const rdf::Namespace& ns) {
+            nswriter->Write(ns.getMessage());
+        });
+        p.parse(in);
+
+        stmtwriter->WritesDone();
+        nswriter->WritesDone();
+
+        Status nsst = nswriter->Finish();
+        Status stmtst = stmtwriter->Finish();
+
+        if (nsst.ok() && stmtst.ok()) {
+            std::cout << "Added " << nsstats.value() << " namespaces and "
+                                  << stmtstats.value() << " statements" << std::endl;
+        } else {
+            std::cout << "Failed writing data to server: " << stmtst.error_message() << std::endl;
+        }
+    }
+
+
+    void patternQuery(const rdf::Statement &pattern, std::ostream &out, serializer::Format format) {
+        ClientContext context;
+
+        std::unique_ptr<ClientReader<rdf::proto::Statement> > reader(
+            stub_->GetStatements(&context, pattern.getMessage()));
+
+        serializer::Serializer serializer("http://www.example.com", format);
+        serializer.serialize(StatementReader(reader.get()), out);
+    }
+
+    void patternDelete(const rdf::Statement &pattern) {
+        ClientContext context;
+        google::protobuf::Int64Value result;
+
+        Status status = stub_->RemoveStatements(&context, pattern.getMessage(), &result);
+        if (status.ok()) {
+            std::cout << "Deleted " << result.value() << " statements." << std::endl;
+        } else {
+            std::cerr << "Failed deleting statements: " << status.error_message() << std::endl;
+        }
+    }
+
+    void tupleQuery(const std::string& query, std::ostream &out) {
+        ClientContext context;
+        spq::SparqlRequest request;
+        request.set_query(query);
+
+        std::unique_ptr<ClientReader<spq::SparqlResponse>> reader(
+                sparql_->TupleQuery(&context, request));
+
+        auto out_ = new google::protobuf::io::OstreamOutputStream(&out);
+        spq::SparqlResponse result;
+        while (reader->Read(&result)) {
+            TextFormat::Print(result, dynamic_cast<google::protobuf::io::ZeroCopyOutputStream*>(out_));
+        }
+        delete out_;
+    }
+
+    void listNamespaces(std::ostream &out) {
+        ClientContext context;
+
+        google::protobuf::Empty pattern;
+
+        std::unique_ptr<ClientReader<rdf::proto::Namespace> > reader(
+                stub_->GetNamespaces(&context, pattern));
+
+        NamespaceReader it(reader.get());
+        for (; it.hasNext(); ++it) {
+            out << (*it).getPrefix() << " = " << (*it).getUri() << std::endl;
+        }
+    }
+
+    int64_t size(const svc::ContextRequest& r) {
+        ClientContext context;
+        google::protobuf::Int64Value result;
+
+        Status status = stub_->Size(&context, r, &result);
+        if (status.ok()) {
+            return result.value();
+        } else {
+            return -1;
+        }
+    }
+ private:
+    std::unique_ptr<svc::SailService::Stub> stub_;
+    std::unique_ptr<spq::SparqlService::Stub> sparql_;
+};
+
+
+DEFINE_string(format, "rdfxml", "RDF format to use for parsing/serializing.");
+DEFINE_string(host, "localhost", "Address/name of server to access.");
+DEFINE_string(port, "10000", "Port of server to access.");
+DEFINE_string(output, "", "File to write result to.");
+
+int main(int argc, char** argv) {
+    GOOGLE_PROTOBUF_VERIFY_VERSION;
+
+    google::ParseCommandLineFlags(&argc, &argv, true);
+
+    MarmottaClient client(FLAGS_host + ":" + FLAGS_port);
+
+    if ("import" == std::string(argv[1])) {
+        std::ifstream in(argv[2]);
+        std::cout << "Importing " << argv[2] << " ... " << std::endl;
+        client.importDataset(in, parser::FormatFromString(FLAGS_format));
+        std::cout << "Finished!" << std::endl;
+    }
+
+    if ("select" == std::string(argv[1])) {
+        rdf::proto::Statement query;
+        TextFormat::ParseFromString(argv[2], &query);
+        if (FLAGS_output != "") {
+            std::ofstream out(FLAGS_output);
+            client.patternQuery(rdf::Statement(query), out, serializer::FormatFromString(FLAGS_format));
+        } else {
+            client.patternQuery(rdf::Statement(query), std::cout, serializer::FormatFromString(FLAGS_format));
+        }
+    }
+
+    if ("sparql" == std::string(argv[1])) {
+        std::string query = argv[2];
+        if (FLAGS_output != "") {
+            std::ofstream out(FLAGS_output);
+            client.tupleQuery(query, out);
+        } else {
+            client.tupleQuery(query, std::cout);
+        }
+    }
+
+    if ("delete" == std::string(argv[1])) {
+        rdf::proto::Statement query;
+        TextFormat::ParseFromString(argv[2], &query);
+        client.patternDelete(rdf::Statement(query));
+    }
+
+    if ("size" == std::string(argv[1])) {
+        svc::ContextRequest query;
+        TextFormat::ParseFromString(argv[2], &query);
+        std::cout << "Size: " << client.size(query) << std::endl;
+    }
+
+
+    if ("namespaces" == std::string(argv[1])) {
+        if (FLAGS_output != "") {
+            std::ofstream out(FLAGS_output);
+            client.listNamespaces(out);
+        } else {
+            client.listNamespaces(std::cout);
+        }
+    }
+
+    google::protobuf::ShutdownProtobufLibrary();
+
+    return 0;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindGFlags.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindGFlags.cmake b/libraries/ostrich/backend/cmake/FindGFlags.cmake
new file mode 100644
index 0000000..2d44a8c
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindGFlags.cmake
@@ -0,0 +1,48 @@
+# - Try to find GFLAGS
+#
+# The following variables are optionally searched for defaults
+#  GFLAGS_ROOT_DIR:            Base directory where all GFLAGS components are found
+#
+# The following are set after configuration is done:
+#  GFLAGS_FOUND
+#  GFLAGS_INCLUDE_DIRS
+#  GFLAGS_LIBRARIES
+#  GFLAGS_LIBRARYRARY_DIRS
+
+include(FindPackageHandleStandardArgs)
+
+set(GFLAGS_ROOT_DIR "" CACHE PATH "Folder contains Gflags")
+
+# We are testing only a couple of files in the include directories
+if(WIN32)
+    find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
+            PATHS ${GFLAGS_ROOT_DIR}/src/windows)
+else()
+    find_path(GFLAGS_INCLUDE_DIR gflags/gflags.h
+            PATHS ${GFLAGS_ROOT_DIR})
+endif()
+
+if(MSVC)
+    find_library(GFLAGS_LIBRARY_RELEASE
+            NAMES libgflags
+            PATHS ${GFLAGS_ROOT_DIR}
+            PATH_SUFFIXES Release)
+
+    find_library(GFLAGS_LIBRARY_DEBUG
+            NAMES libgflags-debug
+            PATHS ${GFLAGS_ROOT_DIR}
+            PATH_SUFFIXES Debug)
+
+    set(GFLAGS_LIBRARY optimized ${GFLAGS_LIBRARY_RELEASE} debug ${GFLAGS_LIBRARY_DEBUG})
+else()
+    find_library(GFLAGS_LIBRARY gflags)
+endif()
+
+find_package_handle_standard_args(GFLAGS DEFAULT_MSG
+        GFLAGS_INCLUDE_DIR GFLAGS_LIBRARY)
+
+
+if(GFLAGS_FOUND)
+    set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR})
+    set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY})
+endif()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindGLog.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindGLog.cmake b/libraries/ostrich/backend/cmake/FindGLog.cmake
new file mode 100644
index 0000000..3d09d06
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindGLog.cmake
@@ -0,0 +1,18 @@
+# Find Google Logging
+
+find_path(GLOG_INCLUDE_PATH NAMES glog/logging.h)
+find_library(GLOG_LIBRARY NAMES glog)
+
+if(GLOG_INCLUDE_PATH AND GLOG_LIBRARY)
+    set(GLOG_FOUND TRUE)
+endif(GLOG_INCLUDE_PATH AND GLOG_LIBRARY)
+
+if(GLOG_FOUND)
+    if(NOT GLOG_FIND_QUIETLY)
+        message(STATUS "Found GLOG: ${GLOG_LIBRARY}; includes - ${GLOG_INCLUDE_PATH}")
+    endif(NOT GLOG_FIND_QUIETLY)
+else(GLOG_FOUND)
+    if(GLOG_FIND_REQUIRED)
+        message(FATAL_ERROR "Could not find GLOG library.")
+    endif(GLOG_FIND_REQUIRED)
+endif(GLOG_FOUND)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindGRPC.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindGRPC.cmake b/libraries/ostrich/backend/cmake/FindGRPC.cmake
new file mode 100644
index 0000000..d5aa718
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindGRPC.cmake
@@ -0,0 +1,67 @@
+find_program(GRPC_CPP_PLUGIN grpc_cpp_plugin) # Get full path to plugin
+
+find_library(GRPC_LIBRARY NAMES grpc)
+find_library(GRPCPP_LIBRARY NAMES grpc++)
+find_library(GPR_LIBRARY NAMES gpr)
+set(GRPC_LIBRARIES ${GRPCPP_LIBRARY} ${GRPC_LIBRARY} ${GPR_LIBRARY})
+if(GRPC_LIBRARIES)
+    message(STATUS "Found GRPC: ${GRPC_LIBRARIES}; plugin - ${GRPC_CPP_PLUGIN}")
+endif()
+
+
+function(PROTOBUF_GENERATE_GRPC_CPP SRCS HDRS)
+  if(NOT ARGN)
+    message(SEND_ERROR "Error: PROTOBUF_GENERATE_GRPC_CPP() called without any proto files")
+    return()
+  endif()
+
+  if(PROTOBUF_GENERATE_CPP_APPEND_PATH) # This variable is common for all types of output.
+    # Create an include path for each file specified
+    foreach(FIL ${ARGN})
+      get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
+      get_filename_component(ABS_PATH ${ABS_FIL} PATH)
+      list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
+      if(${_contains_already} EQUAL -1)
+          list(APPEND _protobuf_include_path -I ${ABS_PATH})
+      endif()
+    endforeach()
+  else()
+    set(_protobuf_include_path -I ${CMAKE_CURRENT_SOURCE_DIR})
+  endif()
+
+  if(DEFINED PROTOBUF_IMPORT_DIRS)
+    foreach(DIR ${PROTOBUF_IMPORT_DIRS})
+      get_filename_component(ABS_PATH ${DIR} ABSOLUTE)
+      list(FIND _protobuf_include_path ${ABS_PATH} _contains_already)
+      if(${_contains_already} EQUAL -1)
+          list(APPEND _protobuf_include_path -I ${ABS_PATH})
+      endif()
+    endforeach()
+  endif()
+
+  set(${SRCS})
+  set(${HDRS})
+  foreach(FIL ${ARGN})
+    get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
+    get_filename_component(FIL_WE ${FIL} NAME_WE)
+
+    list(APPEND ${SRCS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc")
+    list(APPEND ${HDRS} "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h")
+
+    add_custom_command(
+      OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.cc"
+             "${CMAKE_CURRENT_BINARY_DIR}/${FIL_WE}.grpc.pb.h"
+      COMMAND  ${PROTOBUF_PROTOC_EXECUTABLE}
+      ARGS --grpc_out=${CMAKE_CURRENT_BINARY_DIR}
+           --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN}
+           ${_protobuf_include_path} ${ABS_FIL}
+      DEPENDS ${ABS_FIL} ${PROTOBUF_PROTOC_EXECUTABLE}
+      COMMENT "Running gRPC C++ protocol buffer compiler on ${FIL}"
+      VERBATIM)
+  endforeach()
+
+  set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED TRUE)
+  set(${SRCS} ${${SRCS}} PARENT_SCOPE)
+  set(${HDRS} ${${HDRS}} PARENT_SCOPE)
+endfunction()
+

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindLevelDB.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindLevelDB.cmake b/libraries/ostrich/backend/cmake/FindLevelDB.cmake
new file mode 100644
index 0000000..db99bf9
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindLevelDB.cmake
@@ -0,0 +1,18 @@
+# Find libleveldb.a - key/value storage system
+
+find_path(LevelDB_INCLUDE_PATH NAMES leveldb/db.h)
+find_library(LevelDB_LIBRARY NAMES leveldb)
+
+if(LevelDB_INCLUDE_PATH AND LevelDB_LIBRARY)
+    set(LevelDB_FOUND TRUE)
+endif(LevelDB_INCLUDE_PATH AND LevelDB_LIBRARY)
+
+if(LevelDB_FOUND)
+    if(NOT LevelDB_FIND_QUIETLY)
+        message(STATUS "Found LevelDB: ${LevelDB_LIBRARY}; includes - ${LevelDB_INCLUDE_PATH}")
+    endif(NOT LevelDB_FIND_QUIETLY)
+else(LevelDB_FOUND)
+    if(LevelDB_FIND_REQUIRED)
+        message(FATAL_ERROR "Could not find leveldb library.")
+    endif(LevelDB_FIND_REQUIRED)
+endif(LevelDB_FOUND)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindRAPTOR.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindRAPTOR.cmake b/libraries/ostrich/backend/cmake/FindRAPTOR.cmake
new file mode 100644
index 0000000..ef12f1b
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindRAPTOR.cmake
@@ -0,0 +1,103 @@
+# - Try to find the Raptor RDF parsing library (http://librdf.org/raptor/)
+# Once done this will define
+#
+#  RAPTOR_FOUND       - system has Raptor
+#  RAPTOR_LIBRARIES   - Link these to use Raptor
+#  RAPTOR_INCLUDE_DIR - Include directory for using Raptor
+#  RAPTOR_DEFINITIONS - Compiler switches required for using Raptor
+#
+#  Capabilities
+#       RAPTOR_HAVE_TRIG   - Set if raptor has TRIG
+
+# (c) 2007-2011 Sebastian Trueg <tr...@kde.org>
+# (c) 2011 Artem Serebriyskiy <v....@gmail.com>
+# (c) 2011 Michael Jansen <kd...@michael-jansen.biz>
+#
+# Based on FindFontconfig Copyright (c) 2006,2007 Laurent Montel, <mo...@kde.org>
+#
+# Redistribution and use is allowed according to the terms of the BSD license.
+# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
+
+
+MACRO ( FIND_RAPTOR )
+
+ENDMACRO ()
+
+
+
+# Check if we have cached results in case the last round was successful.
+if ( NOT( RAPTOR_INCLUDE_DIR AND RAPTOR_LIBRARIES ) OR NOT RAPTOR_FOUND )
+
+	set( RAPTOR_LDFLAGS )
+
+    find_package(PkgConfig)
+
+    if ( NOT WIN32 )
+        pkg_check_modules(PC_RAPTOR QUIET raptor)
+        if ( PC_RAPTOR_FOUND )
+            set(RAPTOR_DEFINITIONS ${PC_RAPTOR_CFLAGS_OTHER})
+            set(RAPTOR_VERSION ${PC_RAPTOR_VERSION} CACHE STRING "Raptor Version found" )
+            string( REGEX REPLACE "^.*-lraptor;" "" RAPTOR_LDFLAGS "${PC_RAPTOR_STATIC_LDFLAGS}")
+        endif ()
+    endif ()
+
+    find_path(RAPTOR_INCLUDE_DIR
+      NAMES raptor2.h raptor2/raptor2.h raptor.h raptor/raptor.h
+	    PATHS $ENV{RAPTOR_DIR}/include
+	          $ENV{RAPTOR_DIR}
+	          ~/Library/Frameworks
+	          /Library/Frameworks
+	          /usr/local/include
+	          /usr/include/
+	          /sw/include        # Fink
+	          /opt/local/include # MacPorts
+		  /opt/csw/include   # Blastwave
+		  /usr/local/opt/raptor/include   # brew
+                  /opt/include
+	          /usr/freeware/include
+
+    )
+
+
+    find_library(RAPTOR_LIBRARY
+	    NAMES raptor raptor2
+	    PATHS $ENV{RAPTOR_DIR}/lib
+	          $ENV{RAPTOR_DIR}/lib-dbg
+	          $ENV{RAPTOR_DIR}
+	          ~/Library/Frameworks
+	          /Library/Frameworks
+	          /usr/local/lib
+	          /usr/local/lib64
+	          /usr/lib
+	          /usr/lib64
+	          /sw/lib        # Fink
+	          /opt/local/lib # MacPorts
+	          /opt/csw/lib   # Blastwave
+                  /usr/local/opt/raptor/lib   # brew
+	          /opt/lib
+	          /usr/freeware/lib64
+    )
+
+  	if ( RAPTOR_LDFLAGS )
+  	  set( RAPTOR_LIBRARY ${RAPTOR_LIBRARY} ${RAPTOR_LDFLAGS} )
+	endif ()
+
+    mark_as_advanced(RAPTOR_INCLUDE_DIR RAPTOR_LIBRARY)
+
+endif () # Check for cached values
+
+include(FindPackageHandleStandardArgs)
+
+find_package_handle_standard_args(
+    Raptor
+    VERSION_VAR   RAPTOR_VERSION
+    REQUIRED_VARS RAPTOR_LIBRARY RAPTOR_INCLUDE_DIR)
+
+mark_as_advanced(RAPTOR_VERSION)
+
+if (NOT RAPTOR_FOUND AND Raptor_FIND_VERSION_MAJOR EQUAL "2" AND NOT Raptor_FIND_QUIET )
+    pkg_check_modules(PC_RAPTOR QUIET raptor)
+    if (PC_RAPTOR_FOUND)
+        message( STATUS "You have raptor1 version ${PC_RAPTOR_VERSION} installed. Please update." )
+    endif ()
+endif ()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindRasqal.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindRasqal.cmake b/libraries/ostrich/backend/cmake/FindRasqal.cmake
new file mode 100644
index 0000000..e80ab34
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindRasqal.cmake
@@ -0,0 +1,99 @@
+# - Try to find the Rasqal rdf query library (http://librdf.org/rasqal/)
+# Once done this will define
+#
+#  RASQAL_FOUND       - system has Rasqal
+#  RASQAL_LIBRARIES   - Link these to use RASQAL
+#  RASQAL_INCLUDE_DIR - The include directory for using rasqal
+#  RASQAL_DEFINITIONS - Compiler switches required for using RASQAL
+#  RASQAL_VERSION     - The rasqal version string
+
+# (c) 2007-2009 Sebastian Trueg <tr...@kde.org>
+#
+# Based on FindFontconfig Copyright (c) 2006,2007 Laurent Montel, <mo...@kde.org>
+#
+# Redistribution and use is allowed according to the terms of the BSD license.
+# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
+
+
+if(WINCE)
+  FIND_PROGRAM(
+    RASQAL_CONFIG
+    NAMES rasqal-config
+    PATHS ${HOST_BINDIR} NO_DEFAULT_PATH
+    )
+else(WINCE)
+  FIND_PROGRAM(
+    RASQAL_CONFIG
+    NAMES rasqal-config
+    )
+endif(WINCE)
+
+  if(RASQAL_CONFIG)
+    EXECUTE_PROCESS(
+      COMMAND ${RASQAL_CONFIG} --version
+      OUTPUT_VARIABLE RASQAL_VERSION
+      )
+    if(RASQAL_VERSION)
+      STRING(REPLACE "\n" "" RASQAL_VERSION ${RASQAL_VERSION})
+  
+      # extract include paths from rasqal-config
+      EXECUTE_PROCESS(
+        COMMAND ${RASQAL_CONFIG} --cflags
+        OUTPUT_VARIABLE rasqal_CFLAGS_ARGS)
+      STRING( REPLACE " " ";" rasqal_CFLAGS_ARGS ${rasqal_CFLAGS_ARGS} )
+      FOREACH( _ARG ${rasqal_CFLAGS_ARGS} )
+        IF(${_ARG} MATCHES "^-I")
+          STRING(REGEX REPLACE "^-I" "" _ARG ${_ARG})
+          STRING( REPLACE "\n" "" _ARG ${_ARG} )
+          LIST(APPEND rasqal_INCLUDE_DIRS ${_ARG})
+        ENDIF(${_ARG} MATCHES "^-I")
+      ENDFOREACH(_ARG)
+  
+      # extract lib paths from rasqal-config
+      EXECUTE_PROCESS(
+        COMMAND ${RASQAL_CONFIG} --libs
+        OUTPUT_VARIABLE rasqal_CFLAGS_ARGS)
+      STRING( REPLACE " " ";" rasqal_CFLAGS_ARGS ${rasqal_CFLAGS_ARGS} )
+      FOREACH( _ARG ${rasqal_CFLAGS_ARGS} )
+        IF(${_ARG} MATCHES "^-L")
+          STRING(REGEX REPLACE "^-L" "" _ARG ${_ARG})
+          LIST(APPEND rasqal_LIBRARY_DIRS ${_ARG})
+        ENDIF(${_ARG} MATCHES "^-L")
+      ENDFOREACH(_ARG)
+    endif(RASQAL_VERSION)
+  endif(RASQAL_CONFIG)
+
+  find_path(RASQAL_INCLUDE_DIR rasqal.h
+    PATHS
+    ${redland_INCLUDE_DIRS}
+    ${rasqal_INCLUDE_DIRS}
+    /usr/X11/include
+    PATH_SUFFIXES redland rasqal
+  )
+
+  find_library(RASQAL_LIBRARIES NAMES rasqal librasqal
+    PATHS
+    ${rasqal_LIBRARY_DIRS}
+  )
+
+  include(FindPackageHandleStandardArgs)
+  find_package_handle_standard_args(
+      Rasqal
+      VERSION_VAR   RASQAL_VERSION
+      REQUIRED_VARS RASQAL_LIBRARIES RASQAL_INCLUDE_DIR)
+
+  if (RASQAL_FOUND)
+    set(RASQAL_DEFINITIONS ${rasqal_CFLAGS})
+    if (NOT Rasqal_FIND_QUIETLY)
+      message(STATUS "Found Rasqal ${RASQAL_VERSION}: libs - ${RASQAL_LIBRARIES}; includes - ${RASQAL_INCLUDE_DIR}")
+    endif (NOT Rasqal_FIND_QUIETLY)
+  else (RASQAL_FOUND)
+    if (Rasqal_FIND_REQUIRED)
+      message(FATAL_ERROR "Could NOT find Rasqal")
+    endif (Rasqal_FIND_REQUIRED)
+  endif (RASQAL_FOUND)
+
+
+mark_as_advanced(RASQAL_INCLUDE_DIR_TMP
+                 RASQAL_INCLUDE_DIR 
+                 RASQAL_LIBRARIES)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/cmake/FindTcmalloc.cmake
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/cmake/FindTcmalloc.cmake b/libraries/ostrich/backend/cmake/FindTcmalloc.cmake
new file mode 100644
index 0000000..eb89d61
--- /dev/null
+++ b/libraries/ostrich/backend/cmake/FindTcmalloc.cmake
@@ -0,0 +1,39 @@
+# - Find Tcmalloc
+# Find the native Tcmalloc includes and library
+#
+#  Tcmalloc_INCLUDE_DIR - where to find Tcmalloc.h, etc.
+#  Tcmalloc_LIBRARIES   - List of libraries when using Tcmalloc.
+#  Tcmalloc_FOUND       - True if Tcmalloc found.
+
+find_path(Tcmalloc_INCLUDE_DIR google/tcmalloc.h)
+
+if (USE_TCMALLOC)
+  set(Tcmalloc_NAMES tcmalloc_and_profiler)
+else ()
+  set(Tcmalloc_NAMES tcmalloc_minimal tcmalloc)
+endif ()
+
+find_library(Tcmalloc_LIBRARY NAMES ${Tcmalloc_NAMES})
+
+if (Tcmalloc_INCLUDE_DIR AND Tcmalloc_LIBRARY)
+  set(Tcmalloc_FOUND TRUE)
+  set( Tcmalloc_LIBRARIES ${Tcmalloc_LIBRARY} )
+else ()
+  set(Tcmalloc_FOUND FALSE)
+  set( Tcmalloc_LIBRARIES )
+endif ()
+
+if (Tcmalloc_FOUND)
+  message(STATUS "Found Tcmalloc: ${Tcmalloc_LIBRARY}")
+else ()
+  message(STATUS "Not Found Tcmalloc")
+  if (Tcmalloc_FIND_REQUIRED)
+    message(STATUS "Looked for Tcmalloc libraries named ${Tcmalloc_NAMES}.")
+    message(FATAL_ERROR "Could NOT find Tcmalloc library")
+  endif ()
+endif ()
+
+mark_as_advanced(
+  Tcmalloc_LIBRARY
+  Tcmalloc_INCLUDE_DIR
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/model/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/model/CMakeLists.txt b/libraries/ostrich/backend/model/CMakeLists.txt
new file mode 100644
index 0000000..473e6c8
--- /dev/null
+++ b/libraries/ostrich/backend/model/CMakeLists.txt
@@ -0,0 +1,6 @@
+file(GLOB ProtoFiles "${CMAKE_CURRENT_SOURCE_DIR}/*.proto")
+PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${ProtoFiles})
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/..)
+
+add_library(marmotta_model rdf_model.cc rdf_model.h ${PROTO_SRCS} ${PROTO_HDRS} rdf_operators.h rdf_operators.cc)
+target_link_libraries(marmotta_model ${CMAKE_THREAD_LIBS_INIT} ${PROTOBUF_LIBRARIES})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/model/model.proto
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/model/model.proto b/libraries/ostrich/backend/model/model.proto
new file mode 100644
index 0000000..2076303
--- /dev/null
+++ b/libraries/ostrich/backend/model/model.proto
@@ -0,0 +1,77 @@
+syntax = "proto3";
+
+package marmotta.rdf.proto;
+
+option java_package = "org.apache.marmotta.ostrich.model.proto";
+
+// Namespaces consist of a prefix (short string used as replacement of a
+// URI prefix) and a uri (the URI to replace by a prefix)
+message Namespace {
+    string prefix = 1;
+    string uri = 2;
+}
+
+// URI resources have a single required field, the uri they are pointing to.
+message URI {
+    string uri = 1;
+}
+
+// BNodes/anonymous nodes have a single required field, the node ID.
+message BNode {
+    string id = 1;
+}
+
+// Resources are either URIs or BNodes.
+message Resource {
+    oneof Resources {
+        URI uri = 1;
+        BNode bnode = 2;
+    }
+}
+
+// A string literal has string content and an optional language specification.
+// At least content is required.
+message StringLiteral {
+    string content = 1;
+    string language = 2;
+}
+
+// A datatype literal has string content of a specific datatype and a URI
+// identifying that datatype (typically in XSD namespace). Both fields are
+// required.
+message DatatypeLiteral {
+    string content = 1;
+    URI datatype = 2;
+}
+
+// A literal is either a string literal with optional language or a
+// datatype literal with required content and datatype.
+message Literal {
+    oneof Literals {
+        StringLiteral stringliteral = 1;
+        DatatypeLiteral dataliteral = 2;
+    }
+}
+
+// Values can be resources or literals
+message Value {
+    oneof Values {
+        Resource resource = 1;
+        Literal literal = 2;
+    }
+}
+
+// A statement has subject, predicate and object, and an optional context.
+// The Statement message is also used for pattern queries, in which case
+// non-existing fields are interpreted as wildcards.
+message Statement {
+    Resource subject = 1;
+    URI predicate = 2;
+    Value object = 3;
+    Resource context = 4;
+}
+
+// A collection of statements.
+message Statements {
+    repeated Statement statement = 1;
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/model/rdf_model.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/model/rdf_model.cc b/libraries/ostrich/backend/model/rdf_model.cc
new file mode 100644
index 0000000..bd7d76d
--- /dev/null
+++ b/libraries/ostrich/backend/model/rdf_model.cc
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <new>
+
+#include "rdf_model.h"
+
+namespace marmotta {
+namespace rdf {
+
+static std::string as_turtle_(const proto::URI& uri) {
+    return "<" + uri.uri() + ">";
+}
+
+static std::string as_turtle_(const proto::BNode& bnode) {
+    return "_:" + bnode.id();
+}
+
+static std::string as_turtle_(const proto::StringLiteral& literal) {
+    if (literal.language() == "") {
+        return "\"" + literal.content() + "\"";
+    } else {
+        return "\"" + literal.content() + "\"@" + literal.language();
+    }
+}
+
+static std::string as_turtle_(const proto::DatatypeLiteral& literal) {
+    return "\"" + literal.content() + "\"^^" + as_turtle_(literal.datatype());
+}
+
+static std::string as_turtle_(const proto::Resource& resource) {
+    if (resource.has_uri()) {
+        return as_turtle_(resource.uri());
+    }
+    if (resource.has_bnode()) {
+        return as_turtle_(resource.bnode());
+    }
+    return "";
+}
+
+static std::string as_turtle_(const proto::Value& value) {
+    if (value.has_resource()) {
+        if (value.resource().has_uri()) {
+            return as_turtle_(value.resource().uri());
+        }
+        if (value.resource().has_bnode()) {
+            return as_turtle_(value.resource().bnode());
+        }
+    }
+    if (value.has_literal()) {
+        if (value.literal().has_stringliteral()) {
+            return as_turtle_(value.literal().stringliteral());
+        }
+        if (value.literal().has_dataliteral()) {
+            return as_turtle_(value.literal().dataliteral());
+        }
+    }
+    return "";
+}
+
+std::string URI::as_turtle() const {
+    return as_turtle_(internal_);
+}
+
+std::string BNode::as_turtle() const {
+    return as_turtle_(internal_);
+}
+
+std::string StringLiteral::as_turtle() const {
+    return as_turtle_(internal_);
+}
+
+std::string DatatypeLiteral::as_turtle() const {
+    return as_turtle_(internal_);
+}
+
+
+
+std::string Resource::stringValue() const {
+    switch (type) {
+        case URI:
+            return internal_.uri().uri();
+        case BNODE:
+            return internal_.bnode().id();
+        default:
+            return "";
+    }
+}
+
+std::string Resource::as_turtle() const {
+    return as_turtle_(internal_);
+}
+
+
+Value &Value::operator=(const marmotta::rdf::URI &_uri) {
+    type = URI;
+    internal_.mutable_resource()->mutable_uri()->MergeFrom(_uri.getMessage());
+    return *this;
+}
+
+
+Value &Value::operator=(const BNode &_bnode) {
+    type = BNODE;
+    internal_.mutable_resource()->mutable_bnode()->MergeFrom(_bnode.getMessage());
+    return *this;
+}
+
+Value &Value::operator=(const StringLiteral &literal) {
+    type = STRING_LITERAL;
+    internal_.mutable_literal()->mutable_stringliteral()->MergeFrom(literal.getMessage());
+    return *this;
+}
+
+Value &Value::operator=(const DatatypeLiteral &literal) {
+    type = DATATYPE_LITERAL;
+    internal_.mutable_literal()->mutable_dataliteral()->MergeFrom(literal.getMessage());
+    return *this;
+}
+
+Value &Value::operator=(marmotta::rdf::URI &&_uri) {
+    type = URI;
+    internal_.mutable_resource()->mutable_uri()->Swap(&_uri.internal_);
+    return *this;
+}
+
+
+Value &Value::operator=(BNode &&_bnode) {
+    type = BNODE;
+    internal_.mutable_resource()->mutable_bnode()->Swap(&_bnode.internal_);
+    return *this;
+}
+
+Value &Value::operator=(StringLiteral &&literal) {
+    type = STRING_LITERAL;
+    internal_.mutable_literal()->mutable_stringliteral()->Swap(&literal.internal_);
+    return *this;
+}
+
+Value &Value::operator=(DatatypeLiteral &&literal) {
+    type = DATATYPE_LITERAL;
+    internal_.mutable_literal()->mutable_dataliteral()->Swap(&literal.internal_);
+    return *this;
+}
+
+std::string Value::stringValue() const {
+    switch (type) {
+        case URI:
+            return internal_.resource().uri().uri();
+        case BNODE:
+            return internal_.resource().bnode().id();
+        case STRING_LITERAL:
+            return internal_.literal().stringliteral().content();
+        case DATATYPE_LITERAL:
+            return internal_.literal().dataliteral().content();
+        default:
+            return "";
+    }
+}
+
+std::string Value::as_turtle() const {
+    return as_turtle_(internal_);
+}
+
+
+std::string Statement::as_turtle() const {
+    if (hasContext()) {
+        return as_turtle_(internal_.context()) + " { " +
+               as_turtle_(internal_.subject()) + " " +
+               as_turtle_(internal_.predicate()) + " " +
+               as_turtle_(internal_.object()) + ". }";
+    } else {
+        return as_turtle_(internal_.subject()) + " " +
+               as_turtle_(internal_.predicate()) + " " +
+               as_turtle_(internal_.object()) + ".";
+    }
+}
+
+Value::Value(const proto::Value& v) : internal_(v) {
+    if (v.has_resource()) {
+        if (v.resource().has_uri())
+            type = URI;
+        else
+            type = BNODE;
+    } else if (v.has_literal()) {
+        if (v.literal().has_stringliteral())
+            type = STRING_LITERAL;
+        else
+            type = DATATYPE_LITERAL;
+    } else {
+        type = NONE;
+    }
+}
+
+Value::Value(proto::Value&& v) {
+    if (v.has_resource()) {
+        if (v.resource().has_uri())
+            type = URI;
+        else
+            type = BNODE;
+    } else if (v.has_literal()) {
+        if (v.literal().has_stringliteral())
+            type = STRING_LITERAL;
+        else
+            type = DATATYPE_LITERAL;
+    } else {
+        type = NONE;
+    }
+    internal_.Swap(&v);
+}
+
+Resource::Resource(const proto::Resource& v) : internal_(v) {
+    if (v.has_uri())
+        type = URI;
+    else if (v.has_bnode())
+        type = BNODE;
+    else
+        type = NONE;
+}
+
+Resource::Resource(proto::Resource&& v) {
+    if (v.has_uri())
+        type = URI;
+    else if (v.has_bnode())
+        type = BNODE;
+    else
+        type = NONE;
+    internal_.Swap(&v);
+}
+
+Resource &Resource::operator=(const rdf::URI &uri) {
+    type = URI;
+    internal_.mutable_uri()->MergeFrom(uri.getMessage());
+    return *this;
+}
+
+Resource &Resource::operator=(const rdf::BNode &bnode) {
+    type = BNODE;
+    internal_.mutable_bnode()->MergeFrom(bnode.getMessage());
+    return *this;
+}
+
+Resource &Resource::operator=(rdf::URI &&uri) {
+    type = URI;
+    internal_.mutable_uri()->Swap(&uri.internal_);
+    return *this;
+}
+
+Resource &Resource::operator=(rdf::BNode &&bnode) {
+    type = BNODE;
+    internal_.mutable_bnode()->Swap(&bnode.internal_);
+    return *this;
+}
+
+URI &URI::operator=(proto::URI &&other) {
+    internal_.Swap(&other);
+    return *this;
+}
+
+URI &URI::operator=(const URI &other) {
+    internal_.MergeFrom(other.internal_);
+    return *this;
+}
+
+URI &URI::operator=(URI &&other) {
+    internal_.Swap(&other.internal_);
+    return *this;
+}
+
+BNode &BNode::operator=(proto::BNode &&other) {
+    internal_.Swap(&other);
+    return *this;
+}
+
+BNode &BNode::operator=(const BNode &other) {
+    internal_.MergeFrom(other.internal_);
+    return *this;
+}
+
+BNode &BNode::operator=(BNode &&other) {
+    internal_.Swap(&other.internal_);
+    return *this;
+}
+
+StringLiteral &StringLiteral::operator=(proto::StringLiteral &&other) {
+    internal_.Swap(&other);
+    return *this;
+}
+
+StringLiteral &StringLiteral::operator=(const StringLiteral &other) {
+    internal_.MergeFrom(other.internal_);
+    return *this;
+}
+
+StringLiteral &StringLiteral::operator=(StringLiteral &&other) {
+    internal_.Swap(&other.internal_);
+    return *this;
+}
+
+DatatypeLiteral &DatatypeLiteral::operator=(proto::DatatypeLiteral &&other) {
+    internal_.Swap(&other);
+    return *this;
+}
+
+DatatypeLiteral &DatatypeLiteral::operator=(const DatatypeLiteral &other) {
+    internal_.MergeFrom(other.internal_);
+    return *this;
+}
+
+DatatypeLiteral &DatatypeLiteral::operator=(DatatypeLiteral &&other) {
+    internal_.Swap(&other.internal_);
+    return *this;
+}
+
+Statement &Statement::operator=(const proto::Statement &other) {
+    internal_.MergeFrom(other);
+    return *this;
+}
+
+Statement &Statement::operator=(proto::Statement &&other) {
+    internal_.Swap(&other);
+    return *this;
+}
+
+Statement &Statement::operator=(const Statement &other) {
+    internal_.MergeFrom(other.internal_);
+    return *this;
+}
+
+Statement &Statement::operator=(Statement &&other) {
+    internal_.Swap(&other.internal_);
+    return *this;
+}
+}  // namespace rdf
+}  // namespace marmotta

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/model/rdf_model.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/model/rdf_model.h b/libraries/ostrich/backend/model/rdf_model.h
new file mode 100644
index 0000000..ee5c5bb
--- /dev/null
+++ b/libraries/ostrich/backend/model/rdf_model.h
@@ -0,0 +1,709 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_RDF_MODEL_H
+#define MARMOTTA_RDF_MODEL_H
+
+#include <string>
+#include <iostream>
+
+#include "model/model.pb.h"
+
+/*
+ * This namespace contains the model definition for the C++ version of
+ * Marmotta.
+ *
+ * All objects are backed by proto messages, but offer more convenient
+ * high-level constructs.
+ *
+ * All objects implement copy as well as efficient move operations for
+ * constructors and assignment operators. Converting back and forth between
+ * a proto message and a model object is therefore very cheap.
+ */
+namespace marmotta {
+namespace rdf {
+
+/**
+ * RDF namespace, consisting of a prefix and a URI.
+ */
+class Namespace {
+ public:
+    /*
+     * default constructor, creates empty namespace.
+     */
+    Namespace() {}
+
+    /**
+     * Create a new namespace from the given prefix and uri (0-terminated
+     * C-style strings).
+     */
+    Namespace(const char* prefix, const char* uri)  {
+        // Raptor sends us a nullptr for the base NS.
+        if (prefix != nullptr) {
+            internal_.set_prefix(prefix);
+        }
+        internal_.set_uri(uri);
+    }
+
+    /**
+     * Create a new namespace from the given prefix and uri.
+     */
+    Namespace(const std::string &prefix, const std::string &uri)  {
+        internal_.set_prefix(prefix);
+        internal_.set_uri(uri);
+    }
+
+    /**
+     * Create a new namespace from a namespace proto message.
+     */
+    Namespace(const proto::Namespace &ns) : internal_(ns) { };
+
+    /**
+     * Create a new namespace from a namespace proto message (move constructor).
+     */
+    Namespace(proto::Namespace &&ns) {
+        internal_.Swap(&ns);
+    };
+
+    /**
+     * Get the prefix used to identify this namespace.
+     */
+    const std::string &getPrefix() const {
+        return internal_.prefix();
+    }
+
+    /**
+     * Set the prefix used to identify this namespace.
+     */
+    void setPrefix(std::string &prefix) {
+        internal_.set_prefix(prefix);
+    }
+
+    /**
+     * Get the URI identified by this namespace.
+     */
+    const std::string &getUri() const {
+        return internal_.uri();
+    }
+
+    /**
+     * Set the URI identified by this namespace.
+     */
+    void setUri(std::string &uri) {
+        internal_.set_uri(uri);
+    }
+
+    /**
+     * Get a reference to the proto message wrapped by the Namespace object.
+     */
+    const proto::Namespace& getMessage() const {
+        return internal_;
+    }
+
+ private:
+    proto::Namespace internal_;
+};
+
+/**
+ * RDF URI implementation, backed by a URI proto message.
+ */
+class URI {
+ public:
+    /**
+     * Default constructor, creates an empty URI.
+     */
+    URI() { }
+
+    /**
+     * Create an URI object from the URI string passed as argument.
+     */
+    URI(const std::string &uri) {
+        internal_.set_uri(uri);
+    }
+
+    /**
+     * Create an URI object from the URI string passed as argument.
+     */
+    URI(const char* uri) {
+        internal_.set_uri(uri);
+    }
+
+    /**
+     * Create an URI object from the proto message passed as argument (copy
+     * constructor).
+     */
+    URI(const proto::URI &uri) : internal_(uri) { }
+
+    /**
+     * Create an URI object from the proto message passed as argument (move
+     * constructor, the original proto message is invalidated).
+     */
+    URI(proto::URI &&uri) {
+        internal_.Swap(&uri);
+    }
+
+    /**
+     * Copy constructor, create an URI from another URI.
+     */
+    URI(const URI &other) : internal_(other.internal_) {};
+
+    /**
+     * Move constructor, create an URI from another URI, invalidating the
+     * original URI.
+     */
+    URI(URI&& uri) {
+        internal_.Swap(&uri.internal_);
+    }
+
+    URI & operator=(proto::URI &&other);
+    URI & operator=(const URI &other);
+    URI & operator=(URI &&other);
+
+    /**
+     * Get the string representation of the URI.
+     */
+    const std::string &getUri() const {
+        return internal_.uri();
+    }
+
+    /**
+     * Set the string representation of the URI.
+     */
+    void setUri(std::string &uri) {
+        internal_.set_uri(uri);
+    }
+
+    /**
+     * Get a canonical string representation of the URI.
+     */
+    const std::string &stringValue() const {
+        return internal_.uri();
+    }
+
+    /**
+     * Get a Turtle representation of the URI.
+     */
+    std::string as_turtle() const;
+
+    /**
+     * Get a reference to the proto message wrapped by the URI object.
+     */
+    const proto::URI& getMessage() const {
+        return internal_;
+    }
+
+ private:
+    proto::URI internal_;
+
+    friend class Value;
+    friend class Resource;
+    friend class Statement;
+};
+
+/**
+ * RDF Blank node implementation, backed by a BNode proto message.
+ */
+class BNode {
+ public:
+    /**
+     * Default constructor, creates empty BNode.
+     */
+    BNode() { }
+
+    /**
+     * Create a new BNode using the ID passed as argument.
+     */
+    BNode(const std::string &id)  {
+        internal_.set_id(id);
+    }
+
+    /**
+     * Create a new BNode using the ID passed as argument.
+     */
+    BNode(const char* id)  {
+        internal_.set_id(id);
+    }
+
+    /**
+     * Create a new BNode from the proto message passed as argument (copy
+     * constructor).
+     */
+    BNode(const proto::BNode &n) : internal_(n) { }
+
+    /**
+     * Create a new BNode from the proto message passed as argument (move
+     * constructor, original message is invalidated).
+     */
+    BNode(proto::BNode &&n) {
+        internal_.Swap(&n);
+    };
+
+    /**
+     * Copy constructor, create a BNode from another BNode.
+     */
+    BNode(const BNode &n) : internal_(n.internal_) {};
+
+    /**
+     * Move constructor, create a BNode from another BNode. The other BNode
+     * is invalidated.
+     */
+    BNode(BNode &&n) {
+        internal_.Swap(&n.internal_);
+    };
+
+    BNode & operator=(proto::BNode &&other);;
+    BNode & operator=(const BNode &other);;
+    BNode & operator=(BNode &&other);;
+
+    /**
+     * Return the id of this blank node.
+     */
+    const std::string &getId() const {
+        return internal_.id();
+    }
+
+    /**
+     * Set the id of this blank node.
+     */
+    void setId(std::string &id) {
+        internal_.set_id(id);
+    }
+
+    /**
+     * Get a canonical string representation of the URI.
+     */
+    const std::string &stringValue() const {
+        return internal_.id();
+    }
+
+    /**
+     * Get a Turtle representation of the URI.
+     */
+    std::string as_turtle() const;
+
+    /**
+     * Get a reference to the proto message wrapped by the BNode object.
+     */
+    const proto::BNode& getMessage() const {
+        return internal_;
+    }
+
+ private:
+    proto::BNode internal_;
+
+    friend class Value;
+    friend class Resource;
+};
+
+
+class StringLiteral {
+ public:
+    StringLiteral() { }
+
+    StringLiteral(const std::string &content)  {
+        internal_.set_content(content);
+    }
+
+    StringLiteral(const std::string &content, const std::string &language) {
+        internal_.set_content(content);
+        internal_.set_language(language);
+    }
+
+    StringLiteral(const proto::StringLiteral &other) : internal_(other) { };
+
+    StringLiteral(proto::StringLiteral &&other) {
+        internal_.Swap(&other);
+    }
+
+    StringLiteral(const StringLiteral &other) : internal_(other.internal_) {};
+
+    StringLiteral(StringLiteral &&other) {
+        internal_.Swap(&other.internal_);
+    }
+
+    StringLiteral & operator=(proto::StringLiteral &&other);;
+    StringLiteral & operator=(const StringLiteral &other);;
+    StringLiteral & operator=(StringLiteral &&other);;
+
+    const std::string &getContent() const {
+        return internal_.content();
+    }
+
+    void setContent(std::string &content) {
+        internal_.set_content(content);
+    }
+
+    const std::string &getLanguage() const {
+        return internal_.language();
+    }
+
+    void setLanguage(std::string &language) {
+        internal_.set_language(language);
+    }
+
+    const std::string &stringValue() const {
+        return internal_.content();
+    }
+
+    const proto::StringLiteral& getMessage() const {
+        return internal_;
+    }
+
+    std::string as_turtle() const;
+
+ private:
+    proto::StringLiteral internal_;
+
+    friend class Value;
+};
+
+
+class DatatypeLiteral {
+ public:
+    DatatypeLiteral() { }
+
+    DatatypeLiteral(const std::string &content, URI const &datatype) {
+        internal_.set_content(content);
+        internal_.mutable_datatype()->MergeFrom(datatype.getMessage());
+    }
+
+    DatatypeLiteral(const proto::DatatypeLiteral &other) : internal_(other) { };
+
+    DatatypeLiteral(proto::DatatypeLiteral &&other) {
+        internal_.Swap(&other);
+    }
+
+    DatatypeLiteral(const DatatypeLiteral &other) : internal_(other.internal_) { };
+
+    DatatypeLiteral(DatatypeLiteral &&other) {
+        internal_.Swap(&other.internal_);
+    }
+
+    DatatypeLiteral & operator=(proto::DatatypeLiteral &&other);;
+    DatatypeLiteral & operator=(const DatatypeLiteral &other);;
+    DatatypeLiteral & operator=(DatatypeLiteral &&other);;
+
+    const std::string &getContent() const {
+        return internal_.content();
+    }
+
+    void setContent(std::string &content) {
+        internal_.set_content(content);
+    }
+
+    URI getDatatype() const {
+        return URI(internal_.datatype());
+    }
+
+    void setDatatype(const URI &datatype) {
+        internal_.mutable_datatype()->MergeFrom(datatype.getMessage());
+    }
+
+    const std::string &stringValue() const {
+        return internal_.content();
+    }
+
+    int intValue() const {
+        return std::stoi(getContent());
+    }
+
+    operator int() const {
+        return std::stoi(getContent());
+    }
+
+    long long longValue() const {
+        return std::stoll(getContent());
+    }
+
+    operator long long() const {
+        return std::stoll(getContent());
+    }
+
+    float floatValue() const {
+        return std::stof(getContent());
+    }
+
+    operator float() const {
+        return std::stof(getContent());
+    }
+
+    double doubleValue() const {
+        return std::stod(getContent());
+    }
+
+    operator double() const {
+        return std::stod(getContent());
+    }
+
+    const proto::DatatypeLiteral& getMessage() const {
+        return internal_;
+    }
+
+    std::string as_turtle() const;
+
+ private:
+    proto::DatatypeLiteral internal_;
+
+    friend class Value;
+};
+
+/**
+ * Value is a polymorphic, but strictly typed generic implementation for URI,
+ * BNode and Literal. Copy/move constructors and assignment operators allow
+ * using URI, BNode and Literal wherever a Value is required.
+ */
+class Value {
+ public:
+    enum {
+        URI = 1, BNODE, STRING_LITERAL, DATATYPE_LITERAL, NONE
+    } type;
+
+    Value() : type(NONE) { }
+
+    Value(const proto::Value& v);
+
+    Value(proto::Value&& v);
+
+    Value(const marmotta::rdf::URI &uri) : type(URI) {
+        internal_.mutable_resource()->mutable_uri()->MergeFrom(uri.getMessage());
+    }
+
+    Value(marmotta::rdf::URI &&uri) : type(URI) {
+        internal_.mutable_resource()->mutable_uri()->Swap(&uri.internal_);
+    }
+
+    Value(const BNode &bnode) : type(BNODE) {
+        internal_.mutable_resource()->mutable_bnode()->MergeFrom(bnode.getMessage());
+    }
+
+    Value(BNode &&bnode) : type(BNODE) {
+        internal_.mutable_resource()->mutable_bnode()->Swap(&bnode.internal_);
+    }
+
+    Value(const StringLiteral &sliteral) : type(STRING_LITERAL) {
+        internal_.mutable_literal()->mutable_stringliteral()->MergeFrom(sliteral.getMessage());
+    };
+
+    Value(StringLiteral &&sliteral) : type(STRING_LITERAL) {
+        internal_.mutable_literal()->mutable_stringliteral()->Swap(&sliteral.internal_);
+    };
+
+    Value(const DatatypeLiteral &dliteral) : type(DATATYPE_LITERAL) {
+        internal_.mutable_literal()->mutable_dataliteral()->MergeFrom(dliteral.getMessage());
+    };
+
+    Value(DatatypeLiteral &&dliteral) : type(DATATYPE_LITERAL) {
+        internal_.mutable_literal()->mutable_dataliteral()->Swap(&dliteral.internal_);
+    };
+
+    Value(const std::string &literal) : type(STRING_LITERAL) {
+        internal_.mutable_literal()->mutable_stringliteral()->set_content(literal);
+    };
+
+    Value(const char* literal) : type(STRING_LITERAL) {
+        internal_.mutable_literal()->mutable_stringliteral()->set_content(literal);
+    };
+
+
+    Value &operator=(const rdf::URI &uri);
+
+    Value &operator=(const rdf::BNode &bnode);
+
+    Value &operator=(const rdf::StringLiteral &literal);
+
+    Value &operator=(const rdf::DatatypeLiteral &literal);
+
+    Value &operator=(rdf::URI &&uri);
+
+    Value &operator=(rdf::BNode &&bnode);
+
+    Value &operator=(rdf::StringLiteral &&literal);
+
+    Value &operator=(rdf::DatatypeLiteral &&literal);
+
+    std::string stringValue() const;
+
+    std::string as_turtle() const;
+
+    const proto::Value& getMessage() const {
+        return internal_;
+    }
+ private:
+    proto::Value internal_;
+
+    friend class Statement;
+};
+
+
+class Resource {
+ public:
+    enum {
+        URI, BNODE, NONE
+    } type;
+
+    Resource() : type(NONE) { };
+
+    Resource(const proto::Resource& v);
+
+    Resource(proto::Resource&& v);
+
+    Resource(const std::string &uri) : type(URI) {
+        internal_.mutable_uri()->set_uri(uri);
+    };
+
+    Resource(const char* uri) : type(URI) {
+        internal_.mutable_uri()->set_uri(uri);
+    };
+
+    Resource(const rdf::URI &uri) : type(URI) {
+        internal_.mutable_uri()->MergeFrom(uri.getMessage());
+    }
+
+    Resource(const rdf::BNode &bnode) : type(BNODE) {
+        internal_.mutable_bnode()->MergeFrom(bnode.getMessage());
+    }
+
+    Resource(rdf::URI &&uri) : type(URI) {
+        internal_.mutable_uri()->Swap(&uri.internal_);
+    }
+
+    Resource(rdf::BNode &&bnode) : type(BNODE) {
+        internal_.mutable_bnode()->Swap(&bnode.internal_);
+    }
+
+    Resource & operator=(const rdf::URI &uri);
+
+    Resource & operator=(const rdf::BNode &bnode);
+
+    Resource & operator=(rdf::URI &&uri);
+
+    Resource & operator=(rdf::BNode &&bnode);
+
+    std::string stringValue() const;
+
+    std::string as_turtle() const;
+
+    const proto::Resource& getMessage() const {
+        return internal_;
+    }
+ private:
+    proto::Resource internal_;
+
+    friend class Statement;
+};
+
+
+class Statement {
+ public:
+    Statement() {}
+
+    Statement(const Statement& other) : internal_(other.internal_) {}
+    Statement(Statement&& other) {
+        internal_.Swap(&other.internal_);
+    }
+
+    Statement(const proto::Statement& other) : internal_(other) {}
+    Statement(proto::Statement&& other) {
+        internal_.Swap(&other);
+    }
+
+    Statement & operator=(const proto::Statement &other);
+    Statement & operator=(proto::Statement &&other);
+    Statement & operator=(const Statement &other);
+    Statement & operator=(Statement &&other);
+
+
+    Statement(Resource const &subject, URI const &predicate, Value const &object) {
+        internal_.mutable_subject()->MergeFrom(subject.getMessage());
+        internal_.mutable_predicate()->MergeFrom(predicate.getMessage());
+        internal_.mutable_object()->MergeFrom(object.getMessage());
+    }
+
+
+    Statement(Resource const &subject, URI const &predicate, Value const &object, Resource const &context) {
+        internal_.mutable_subject()->MergeFrom(subject.getMessage());
+        internal_.mutable_predicate()->MergeFrom(predicate.getMessage());
+        internal_.mutable_object()->MergeFrom(object.getMessage());
+        internal_.mutable_context()->MergeFrom(context.getMessage());
+    }
+
+    Statement(Resource &&subject, URI &&predicate, Value &&object) {
+        internal_.mutable_subject()->Swap(&subject.internal_);
+        internal_.mutable_predicate()->Swap(&predicate.internal_);
+        internal_.mutable_object()->Swap(&object.internal_);
+    }
+
+
+    Statement(Resource &&subject, URI &&predicate, Value &&object, Resource &&context) {
+        internal_.mutable_subject()->Swap(&subject.internal_);
+        internal_.mutable_predicate()->Swap(&predicate.internal_);
+        internal_.mutable_object()->Swap(&object.internal_);
+        internal_.mutable_context()->Swap(&context.internal_);
+    }
+
+
+    Resource getSubject() const {
+        return Resource(internal_.subject());
+    }
+
+    void setSubject(Resource const &subject) {
+        internal_.mutable_subject()->MergeFrom(subject.getMessage());
+    }
+
+    URI getPredicate() const {
+        return URI(internal_.predicate());
+    }
+
+    void setPredicate(URI const &predicate) {
+        internal_.mutable_predicate()->MergeFrom(predicate.getMessage());
+    }
+
+    Value getObject() const {
+        return Value(internal_.object());
+    }
+
+    void setObject(Value const &object) {
+        internal_.mutable_object()->MergeFrom(object.getMessage());
+    }
+
+    Resource getContext() const {
+        return Resource(internal_.context());
+    }
+
+    void setContext(Resource const &context) {
+        internal_.mutable_context()->MergeFrom(context.getMessage());
+    }
+
+    bool hasContext() const {
+        return internal_.has_context();
+    }
+
+    std::string as_turtle() const;
+
+    const proto::Statement& getMessage() const {
+        return internal_;
+    }
+ private:
+    proto::Statement internal_;
+};
+
+
+}  // namespace rdf
+}  // namespace marmotta
+
+
+#endif //MARMOTTA_RDF_MODEL_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/model/rdf_operators.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/model/rdf_operators.cc b/libraries/ostrich/backend/model/rdf_operators.cc
new file mode 100644
index 0000000..b9e49ad
--- /dev/null
+++ b/libraries/ostrich/backend/model/rdf_operators.cc
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "rdf_operators.h"
+
+namespace marmotta {
+namespace rdf {
+namespace proto {
+
+bool operator==(const Value &lhs, const Value &rhs) {
+    if (lhs.has_resource() && rhs.has_resource()) {
+        if (lhs.resource().has_uri() && rhs.resource().has_uri()) {
+            return lhs.resource().uri() == rhs.resource().uri();
+        } else if (lhs.resource().has_bnode() && rhs.resource().has_bnode()) {
+            return lhs.resource().bnode() == rhs.resource().bnode();
+        }
+    } else if(lhs.has_literal() && rhs.has_literal()) {
+        if (lhs.literal().has_stringliteral() && rhs.literal().has_stringliteral()) {
+            return lhs.literal().stringliteral() == rhs.literal().stringliteral();
+        } else if (lhs.literal().has_dataliteral() && rhs.literal().has_dataliteral()) {
+            return lhs.literal().dataliteral() == rhs.literal().dataliteral();
+        }
+    }
+    return false;
+}
+
+bool operator==(const Resource &lhs, const Resource &rhs) {
+    if (lhs.has_uri() && rhs.has_uri()) {
+        return lhs.uri() == rhs.uri();
+    } else if (lhs.has_bnode() && rhs.has_bnode()) {
+        return lhs.bnode() == rhs.bnode();
+    }
+    return false;
+}
+
+bool operator==(const Statement &lhs, const Statement &rhs) {
+    return operator==(lhs.subject(), rhs.subject()) &&
+           operator==(lhs.predicate(), rhs.predicate()) &&
+           operator==(lhs.object(), rhs.object()) &&
+           operator==(lhs.context(), rhs.context());
+
+}
+
+
+}  // namespace proto
+}  // namespace rdf
+}  // namespace marmotta

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/model/rdf_operators.h
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/model/rdf_operators.h b/libraries/ostrich/backend/model/rdf_operators.h
new file mode 100644
index 0000000..2cf8183
--- /dev/null
+++ b/libraries/ostrich/backend/model/rdf_operators.h
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef MARMOTTA_RDF_OPERATORS_H
+#define MARMOTTA_RDF_OPERATORS_H
+
+#include "model/rdf_model.h"
+
+namespace marmotta {
+namespace rdf {
+namespace proto {
+
+inline bool operator==(const Namespace &lhs, const Namespace &rhs) {
+    return lhs.uri() == rhs.uri();
+}
+
+inline bool operator!=(const Namespace &lhs, const Namespace &rhs) {
+    return lhs.uri() != rhs.uri();
+}
+
+inline bool operator==(const URI &lhs, const URI &rhs) {
+    return lhs.uri() == rhs.uri();
+}
+
+inline bool operator!=(const URI &lhs, const URI &rhs) {
+    return lhs.uri() != rhs.uri();
+}
+
+inline bool operator==(const BNode &lhs, const BNode &rhs) {
+    return lhs.id() == rhs.id();
+}
+
+inline bool operator!=(const BNode &lhs, const BNode &rhs) {
+    return lhs.id() != rhs.id();
+}
+
+
+inline bool operator==(const StringLiteral &lhs, const StringLiteral &rhs) {
+    return lhs.content() == rhs.content() && lhs.language() == rhs.language();
+}
+
+inline bool operator!=(const StringLiteral &lhs, const StringLiteral &rhs) {
+    return lhs.content() != rhs.content() || lhs.language() != rhs.language();
+}
+
+inline bool operator==(const DatatypeLiteral &lhs, const DatatypeLiteral &rhs) {
+    return lhs.content() == rhs.content() && lhs.datatype().uri() == rhs.datatype().uri();
+}
+
+inline bool operator!=(const DatatypeLiteral &lhs, const DatatypeLiteral &rhs) {
+    return lhs.content() != rhs.content() || lhs.datatype().uri() != rhs.datatype().uri();
+}
+
+bool operator==(const Value &lhs, const Value &rhs);
+
+inline bool operator!=(const Value &lhs, const Value &rhs) {
+    return !operator==(lhs,rhs);
+};
+
+
+bool operator==(const Resource &lhs, const Resource &rhs);
+
+inline bool operator!=(const Resource &lhs, const Resource &rhs) {
+    return !operator==(lhs,rhs);
+};
+
+bool operator==(const Statement &lhs, const Statement &rhs);
+
+inline bool operator!=(const Statement &lhs, const Statement &rhs) {
+    return !operator==(lhs,rhs);
+};
+
+
+}  // namespace proto
+
+
+inline bool operator==(const Namespace &lhs, const Namespace &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const Namespace &lhs, const Namespace &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const URI &lhs, const URI &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const URI &lhs, const URI &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const BNode &lhs, const BNode &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const BNode &lhs, const BNode &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const StringLiteral &lhs, const StringLiteral &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const StringLiteral &lhs, const StringLiteral &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const DatatypeLiteral &lhs, const DatatypeLiteral &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const DatatypeLiteral &lhs, const DatatypeLiteral &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const Value &lhs, const Value &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const Value &lhs, const Value &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const Resource &lhs, const Resource &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const Resource &lhs, const Resource &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+inline bool operator==(const Statement &lhs, const Statement &rhs) {
+    return lhs.getMessage() == rhs.getMessage();
+}
+
+inline bool operator!=(const Statement &lhs, const Statement &rhs) {
+    return !operator==(lhs,rhs);
+}
+
+}  // namespace rdf
+}  // namespace marmotta
+
+namespace std {
+
+// Define std::hash specializations for our proto messages. Note that this generic
+// computation serializes the message and is therefore expensive. Consider using
+// specialised implementations instead.
+template<>
+struct hash<google::protobuf::Message> {
+    std::size_t operator()(const google::protobuf::Message &k) const {
+        std::string content;
+        k.SerializeToString(&content);
+        return std::hash<string>()(content);
+    }
+};
+
+// Hash implementation for URIs. Uses a faster implementation than the generic
+// proto message version.
+template<>
+struct hash<marmotta::rdf::proto::URI> {
+    std::size_t operator()(const marmotta::rdf::proto::URI &k) const {
+        return std::hash<std::string>()(k.uri());
+    }
+};
+
+// Hash implementation for BNodes. Uses a faster implementation than the generic
+// proto message version.
+template<>
+struct hash<marmotta::rdf::proto::BNode> {
+    std::size_t operator()(const marmotta::rdf::proto::BNode &k) const {
+        return std::hash<std::string>()(k.id());
+    }
+};
+
+// Hash implementation for Resources. Uses a faster implementation than the generic
+// proto message version.
+template<>
+struct hash<marmotta::rdf::proto::Resource> {
+    std::size_t operator()(const marmotta::rdf::proto::Resource &k) const {
+        if (k.has_uri()) {
+            return std::hash<marmotta::rdf::proto::URI>()(k.uri());
+        } else if (k.has_bnode()) {
+            return std::hash<marmotta::rdf::proto::BNode>()(k.bnode());
+        }
+        return std::hash<google::protobuf::Message>()(k);
+    }
+};
+
+template<>
+struct hash<marmotta::rdf::proto::Value> {
+    std::size_t operator()(const marmotta::rdf::proto::Value &k) const {
+        return std::hash<google::protobuf::Message>()(k);
+    }
+};
+
+template<>
+struct hash<marmotta::rdf::proto::StringLiteral> {
+    std::size_t operator()(const marmotta::rdf::proto::StringLiteral &k) const {
+        return std::hash<google::protobuf::Message>()(k);
+    }
+};
+
+template<>
+struct hash<marmotta::rdf::proto::DatatypeLiteral> {
+    std::size_t operator()(const marmotta::rdf::proto::DatatypeLiteral &k) const {
+        return std::hash<google::protobuf::Message>()(k);
+    }
+};
+
+template<>
+struct hash<marmotta::rdf::proto::Statement> {
+    std::size_t operator()(const marmotta::rdf::proto::Statement &k) const {
+        return std::hash<google::protobuf::Message>()(k);
+    }
+};
+
+template<>
+struct hash<marmotta::rdf::proto::Namespace> {
+    std::size_t operator()(const marmotta::rdf::proto::Namespace &k) const {
+        return std::hash<google::protobuf::Message>()(k);
+    }
+};
+}  // namespace std
+
+#endif //MARMOTTA_RDF_OPERATORS_H

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/parser/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/parser/CMakeLists.txt b/libraries/ostrich/backend/parser/CMakeLists.txt
new file mode 100644
index 0000000..3ed5634
--- /dev/null
+++ b/libraries/ostrich/backend/parser/CMakeLists.txt
@@ -0,0 +1,4 @@
+include_directories(.. ${CMAKE_CURRENT_BINARY_DIR}/..)
+
+add_library(marmotta_parser rdf_parser.h rdf_parser.cc)
+target_link_libraries(marmotta_parser marmotta_model ${CMAKE_THREAD_LIBS_INIT} ${RAPTOR_LIBRARY})
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/marmotta/blob/0ff22a0c/libraries/ostrich/backend/parser/rdf_parser.cc
----------------------------------------------------------------------
diff --git a/libraries/ostrich/backend/parser/rdf_parser.cc b/libraries/ostrich/backend/parser/rdf_parser.cc
new file mode 100644
index 0000000..9a1fea3
--- /dev/null
+++ b/libraries/ostrich/backend/parser/rdf_parser.cc
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <raptor2/raptor2.h>
+#include "rdf_parser.h"
+
+namespace marmotta {
+namespace parser {
+Parser::Parser(const rdf::URI& baseUri, Format format)
+        : stmt_handler([](const rdf::Statement& stmt) { })
+        , ns_handler([](const rdf::Namespace& ns) { })
+{
+    world = raptor_new_world();
+    base  = raptor_new_uri(world, (unsigned char const *) baseUri.getUri().c_str());
+
+    switch (format) {
+        case RDFXML:
+            parser = raptor_new_parser(world, "rdfxml");
+            break;
+        case TURTLE:
+            parser = raptor_new_parser(world, "turtle");
+            break;
+        case NTRIPLES:
+            parser = raptor_new_parser(world, "ntriples");
+            break;
+        case RDFA:
+            parser = raptor_new_parser(world, "rdfa");
+            break;
+        case RDFJSON:
+            parser = raptor_new_parser(world, "json");
+            break;
+        case GUESS:
+            parser = raptor_new_parser(world, "guess");
+            break;
+    }
+
+    raptor_parser_set_statement_handler(parser, this, raptor_stmt_handler);
+    raptor_parser_set_namespace_handler(parser, this, raptor_ns_handler);
+}
+
+Parser::~Parser() {
+    raptor_free_parser(parser);
+    raptor_free_uri(base);
+    raptor_free_world(world);
+}
+
+
+void Parser::raptor_stmt_handler(void *user_data, raptor_statement *statement) {
+    Parser* p = static_cast<Parser*>(user_data);
+
+    rdf::Resource subject; rdf::URI predicate; rdf::Value object; rdf::Resource context;
+    switch (statement->subject->type) {
+        case RAPTOR_TERM_TYPE_URI:
+            subject = rdf::URI((const char*)raptor_uri_as_string(statement->subject->value.uri));
+            break;
+        case RAPTOR_TERM_TYPE_BLANK:
+            subject = rdf::BNode(std::string((const char*)statement->subject->value.blank.string, statement->subject->value.blank.string_len));
+            break;
+        default:
+            raptor_parser_parse_abort(p->parser);
+            throw ParseError("invalid subject term type");
+    }
+
+    switch (statement->predicate->type) {
+        case RAPTOR_TERM_TYPE_URI:
+            predicate = rdf::URI((const char*)raptor_uri_as_string(statement->predicate->value.uri));
+            break;
+        default:
+            raptor_parser_parse_abort(p->parser);
+            throw ParseError("invalid predicate term type");
+    }
+
+    switch (statement->object->type) {
+        case RAPTOR_TERM_TYPE_URI:
+            object = rdf::URI((const char*)raptor_uri_as_string(statement->object->value.uri));
+            break;
+        case RAPTOR_TERM_TYPE_BLANK:
+            object = rdf::BNode(std::string((const char*)statement->object->value.blank.string, statement->object->value.blank.string_len));
+            break;
+        case RAPTOR_TERM_TYPE_LITERAL:
+            if(statement->object->value.literal.language != NULL) {
+                object = rdf::StringLiteral(
+                        std::string((const char*)statement->object->value.literal.string, statement->object->value.literal.string_len),
+                        std::string((const char*)statement->object->value.literal.language, statement->object->value.literal.language_len)
+                );
+            } else if(statement->object->value.literal.datatype != NULL) {
+                object = rdf::DatatypeLiteral(
+                        std::string((const char*)statement->object->value.literal.string, statement->object->value.literal.string_len),
+                        rdf::URI((const char*)raptor_uri_as_string(statement->object->value.literal.datatype))
+                );
+            } else {
+                object = rdf::StringLiteral(
+                        std::string((const char*)statement->object->value.literal.string, statement->object->value.literal.string_len)
+                );
+            }
+            break;
+        default:
+            raptor_parser_parse_abort(p->parser);
+            throw ParseError("invalid object term type");
+    }
+
+    if (statement->graph != NULL) {
+        switch (statement->graph->type) {
+            case RAPTOR_TERM_TYPE_URI:
+                context = rdf::URI((const char*)raptor_uri_as_string(statement->graph->value.uri));
+                break;
+            case RAPTOR_TERM_TYPE_BLANK:
+                context = rdf::BNode(std::string((const char*)statement->graph->value.blank.string, statement->graph->value.blank.string_len));
+                break;
+            default:
+                raptor_parser_parse_abort(p->parser);
+                throw ParseError("invalid graph term type");
+        }
+    } else {
+        context = rdf::URI();
+    }
+
+    p->stmt_handler(rdf::Statement(subject, predicate, object, context));
+}
+
+
+void Parser::raptor_ns_handler(void *user_data, raptor_namespace *nspace) {
+    Parser* p = static_cast<Parser*>(user_data);
+    p->ns_handler(rdf::Namespace(
+            (const char*)raptor_namespace_get_prefix(nspace),
+            (const char*)raptor_uri_as_string(raptor_namespace_get_uri(nspace))));
+}
+
+void Parser::parse(std::istream &in) {
+    if(in) {
+        raptor_parser_parse_start(parser, base);
+
+        char buffer[8192];
+        while (in.read(buffer, 8192)) {
+            raptor_parser_parse_chunk(parser, (unsigned char const *) buffer, in.gcount(), 0);
+        }
+        raptor_parser_parse_chunk(parser, (unsigned char const *) buffer, in.gcount(), 1);
+    }
+}
+
+Format FormatFromString(const std::string &name) {
+    if (name == "rdfxml" || name == "rdf/xml" || name == "xml") {
+        return RDFXML;
+    }
+    if (name == "n3" || name == "ntriples" || name == "text/n3") {
+        return NTRIPLES;
+    }
+    if (name == "turtle" || name == "text/turtle") {
+        return TURTLE;
+    }
+    if (name == "json" || name == "application/json" || name == "application/rdf+json") {
+        return RDFJSON;
+    }
+    if (name == "auto" || name == "guess") {
+        return GUESS;
+    }
+    return RDFXML;
+}
+
+}
+}