You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@geode.apache.org by ji...@apache.org on 2016/04/21 19:16:59 UTC

[24/50] [abbrv] incubator-geode git commit: GEODE-1244: Revert rename of package, directory, project and file rename for geode-spark-connector

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/LocatorHelperTest.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/LocatorHelperTest.scala b/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/LocatorHelperTest.scala
deleted file mode 100644
index c775784..0000000
--- a/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/LocatorHelperTest.scala
+++ /dev/null
@@ -1,168 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package unittest.io.pivotal.geode.spark.connector
-
-import java.net.InetAddress
-
-import io.pivotal.geode.spark.connector.internal.LocatorHelper
-import org.scalatest.FunSuite
-
-class LocatorHelperTest extends FunSuite {
-
-  test("locatorStr2HostPortPair hostname w/o domain") {
-    val (host, port) = ("localhost", 10334)
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host:$port").get ==(host, port))
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host[$port]").get ==(host, port))
-  }
-
-  test("locatorStr2HostPortPair hostname w/ domain") {
-    val (host, port) = ("localhost", 10334)
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host:$port").get ==(host, port))
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host[$port]").get ==(host, port))
-  }
-
-  test("locatorStr2HostPortPair w/ invalid host name") {
-    // empty or null locatorStr
-    assert(LocatorHelper.locatorStr2HostPortPair("").isFailure)
-    assert(LocatorHelper.locatorStr2HostPortPair(null).isFailure)
-    // host name has leading `.`
-    assert(LocatorHelper.locatorStr2HostPortPair(".localhost.1234").isFailure)
-    // host name has leading and/or tail white space
-    assert(LocatorHelper.locatorStr2HostPortPair(" localhost.1234").isFailure)
-    assert(LocatorHelper.locatorStr2HostPortPair("localhost .1234").isFailure)
-    assert(LocatorHelper.locatorStr2HostPortPair(" localhost .1234").isFailure)
-    // host name contain invalid characters
-    assert(LocatorHelper.locatorStr2HostPortPair("local%host.1234").isFailure)
-    assert(LocatorHelper.locatorStr2HostPortPair("localhost*.1234").isFailure)
-    assert(LocatorHelper.locatorStr2HostPortPair("^localhost.1234").isFailure)
-  }
-
-  test("locatorStr2HostPortPair w/ valid port") {
-    val host = "192.168.0.1"
-    // port has 2, 3, 4, 5 digits
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host:20").get ==(host, 20))
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host:300").get ==(host, 300))
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host:4000").get ==(host, 4000))
-    assert(LocatorHelper.locatorStr2HostPortPair(s"$host:50000").get ==(host, 50000))
-  }
-  
-  test("locatorStr2HostPortPair w/ invalid port") {
-    // port number is less than 2 digits
-    assert(LocatorHelper.locatorStr2HostPortPair("locslhost.9").isFailure)
-    // port number is more than 5 digits
-    assert(LocatorHelper.locatorStr2HostPortPair("locslhost.100000").isFailure)
-    // port number is invalid
-    assert(LocatorHelper.locatorStr2HostPortPair("locslhost.1xx1").isFailure)
-  }
-  
-  test("parseLocatorsString with valid locator(s)") {
-    val (host1, port1) = ("localhost", 10334)
-    assert(LocatorHelper.parseLocatorsString(s"$host1:$port1") == Seq((host1, port1)))
-    val (host2, port2) = ("localhost2", 10335)
-    assert(LocatorHelper.parseLocatorsString(s"$host1:$port1,$host2:$port2") == Seq((host1, port1),(host2, port2)))
-    val (host3, port3) = ("localhost2", 10336)
-    assert(LocatorHelper.parseLocatorsString(s"$host1:$port1,$host2:$port2,$host3:$port3") == 
-      Seq((host1, port1),(host2, port2),(host3, port3)))
-  }
-
-  test("parseLocatorsString with invalid locator(s)") {
-    // empty and null locatorsStr
-    intercept[Exception] { LocatorHelper.parseLocatorsString("") }
-    intercept[Exception] { LocatorHelper.parseLocatorsString(null) }
-    // 1 bad locatorStr
-    intercept[Exception] { LocatorHelper.parseLocatorsString("local%host.1234") }
-    // 1 good locatorStr and 1 bad locatorStr
-    intercept[Exception] { LocatorHelper.parseLocatorsString("localhost:2345,local%host.1234") }
-    intercept[Exception] { LocatorHelper.parseLocatorsString("local^host:2345,localhost.1234") }
-  }
-
-  test("pickPreferredGeodeServers: shared servers and one gf-server per host") {
-    val (srv1, srv2, srv3, srv4) = (("host1", 4001), ("host2", 4002), ("host3", 4003),("host4", 4004))
-    val servers = Seq(srv1, srv2, srv3, srv4)
-    verifyPickPreferredGeodeServers(servers, "host1", "<driver>", Seq(srv1, srv2, srv3))
-    verifyPickPreferredGeodeServers(servers, "host2", "0", Seq(srv2, srv3, srv4))
-    verifyPickPreferredGeodeServers(servers, "host3", "1", Seq(srv3, srv4, srv1))
-    verifyPickPreferredGeodeServers(servers, "host4", "2", Seq(srv4, srv1, srv2))
-  }
-
-  test("pickPreferredGeodeServers: shared servers, one gf-server per host, un-sorted list") {
-    val (srv1, srv2, srv3, srv4) = (("host1", 4001), ("host2", 4002), ("host3", 4003),("host4", 4004))
-    val servers = Seq(srv4, srv2, srv3, srv1)
-    verifyPickPreferredGeodeServers(servers, "host1", "<driver>", Seq(srv1, srv2, srv3))
-    verifyPickPreferredGeodeServers(servers, "host2", "0", Seq(srv2, srv3, srv4))
-    verifyPickPreferredGeodeServers(servers, "host3", "1", Seq(srv3, srv4, srv1))
-    verifyPickPreferredGeodeServers(servers, "host4", "2", Seq(srv4, srv1, srv2))
-  }
-
-  test("pickPreferredGeodeServers: shared servers and two gf-server per host") {
-    val (srv1, srv2, srv3, srv4) = (("host1", 4001), ("host1", 4002), ("host2", 4003), ("host2", 4004))
-    val servers = Seq(srv1, srv2, srv3, srv4)
-    verifyPickPreferredGeodeServers(servers, "host1", "<driver>", Seq(srv1, srv2, srv3))
-    verifyPickPreferredGeodeServers(servers, "host1", "0", Seq(srv2, srv1, srv3))
-    verifyPickPreferredGeodeServers(servers, "host2", "1", Seq(srv3, srv4, srv1))
-    verifyPickPreferredGeodeServers(servers, "host2", "2", Seq(srv4, srv3, srv1))
-  }
-
-  test("pickPreferredGeodeServers: shared servers, two gf-server per host, un-sorted server list") {
-    val (srv1, srv2, srv3, srv4) = (("host1", 4001), ("host1", 4002), ("host2", 4003), ("host2", 4004))
-    val servers = Seq(srv1, srv4, srv3, srv2)
-    verifyPickPreferredGeodeServers(servers, "host1", "<driver>", Seq(srv1, srv2, srv3))
-    verifyPickPreferredGeodeServers(servers, "host1", "0", Seq(srv2, srv1, srv3))
-    verifyPickPreferredGeodeServers(servers, "host2", "1", Seq(srv3, srv4, srv1))
-    verifyPickPreferredGeodeServers(servers, "host2", "2", Seq(srv4, srv3, srv1))
-  }
-
-  test("pickPreferredGeodeServers: no shared servers and one gf-server per host") {
-    val (srv1, srv2, srv3, srv4) = (("host1", 4001), ("host2", 4002), ("host3", 4003),("host4", 4004))
-    val servers = Seq(srv1, srv2, srv3, srv4)
-    verifyPickPreferredGeodeServers(servers, "host5", "<driver>", Seq(srv1, srv2, srv3))
-    verifyPickPreferredGeodeServers(servers, "host6", "0", Seq(srv2, srv3, srv4))
-    verifyPickPreferredGeodeServers(servers, "host7", "1", Seq(srv3, srv4, srv1))
-    verifyPickPreferredGeodeServers(servers, "host8", "2", Seq(srv4, srv1, srv2))
-  }
-
-  test("pickPreferredGeodeServers: no shared servers, one gf-server per host, and less gf-server") {
-    val (srv1, srv2) = (("host1", 4001), ("host2", 4002))
-    val servers = Seq(srv1, srv2)
-    verifyPickPreferredGeodeServers(servers, "host5", "<driver>", Seq(srv1, srv2))
-    verifyPickPreferredGeodeServers(servers, "host6", "0", Seq(srv2, srv1))
-    verifyPickPreferredGeodeServers(servers, "host7", "1", Seq(srv1, srv2))
-    verifyPickPreferredGeodeServers(servers, "host8", "2", Seq(srv2, srv1))
-
-
-    println("host name: " + InetAddress.getLocalHost.getHostName)
-    println("canonical host name: " + InetAddress.getLocalHost.getCanonicalHostName)
-    println("canonical host name 2: " + InetAddress.getByName(InetAddress.getLocalHost.getHostName).getCanonicalHostName)
-  }
-
-  test("pickPreferredGeodeServers: ad-hoc") {
-    val (srv4, srv5, srv6) = (
-      ("w2-gst-pnq-04.gemstone.com", 40411), ("w2-gst-pnq-05.gemstone.com", 40411), ("w2-gst-pnq-06.gemstone.com", 40411))
-    val servers = Seq(srv6, srv5, srv4)
-    verifyPickPreferredGeodeServers(servers, "w2-gst-pnq-03.gemstone.com", "<driver>", Seq(srv4, srv5, srv6))
-    verifyPickPreferredGeodeServers(servers, "w2-gst-pnq-04.gemstone.com", "1", Seq(srv4, srv5, srv6))
-    verifyPickPreferredGeodeServers(servers, "w2-gst-pnq-05.gemstone.com", "0", Seq(srv5, srv6, srv4))
-    verifyPickPreferredGeodeServers(servers, "w2-gst-pnq-06.gemstone.com", "2", Seq(srv6, srv4, srv5))
-  }
-  
-  def verifyPickPreferredGeodeServers(
-    servers: Seq[(String, Int)], hostName: String, executorId: String, expectation: Seq[(String, Int)]): Unit = {
-    val result = LocatorHelper.pickPreferredGeodeServers(servers, hostName, executorId)
-    assert(result == expectation, s"pick servers for $hostName:$executorId")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRDDPartitionerTest.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRDDPartitionerTest.scala b/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRDDPartitionerTest.scala
deleted file mode 100644
index f53c178..0000000
--- a/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRDDPartitionerTest.scala
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package unittest.io.pivotal.geode.spark.connector.rdd
-
-import com.gemstone.gemfire.distributed.internal.ServerLocation
-import io.pivotal.geode.spark.connector.internal.RegionMetadata
-import io.pivotal.geode.spark.connector.internal.rdd.GeodeRDDPartitioner._
-import io.pivotal.geode.spark.connector.GeodeConnection
-import io.pivotal.geode.spark.connector.internal.rdd._
-import org.apache.spark.Partition
-import org.mockito.Mockito._
-import org.scalatest.mock.MockitoSugar
-import org.scalatest.{Matchers, FunSuite}
-
-import java.util.{HashSet => JHashSet, HashMap => JHashMap}
-
-import scala.collection.mutable
-
-class GeodeRDDPartitionerTest extends FunSuite with Matchers with MockitoSugar {
-
-  val emptyServerBucketMap: JHashMap[ServerLocation, JHashSet[Integer]] = new JHashMap()
-
-  def toJavaServerBucketMap(map: Map[(String, Int), Set[Int]]): JHashMap[ServerLocation, JHashSet[Integer]] = {
-    import scala.collection.JavaConversions._
-    val tmp = map.map {case ((host, port), set) => (new ServerLocation(host, port), set.map(Integer.valueOf))}
-    (new JHashMap[ServerLocation, JHashSet[Integer]]() /: tmp) { case (acc, (s, jset)) => acc.put(s, new JHashSet(jset)); acc }
-  }
-  
-  val map: mutable.Map[(String, Int), mutable.Set[Int]] = mutable.Map(
-    ("s0",1) -> mutable.Set.empty, ("s1",2) -> mutable.Set(0), ("s2",3) -> mutable.Set(1, 2), ("s3",4) -> mutable.Set(3, 4, 5))
-
-  
-  // update this test whenever change default setting 
-  test("default partitioned region partitioner") {
-    assert(GeodeRDDPartitioner.defaultPartitionedRegionPartitioner === ServerSplitsPartitioner)
-  }
-
-  // update this test whenever change default setting 
-  test("default replicated region partitioner") {
-    assert(GeodeRDDPartitioner.defaultReplicatedRegionPartitioner === OnePartitionPartitioner)
-  }
-  
-  test("GeodeRDDPartitioner.apply method") {
-    import io.pivotal.geode.spark.connector.internal.rdd.GeodeRDDPartitioner._
-    for ((name, partitioner) <- partitioners) assert(GeodeRDDPartitioner(name) == partitioner)
-    assert(GeodeRDDPartitioner("dummy") == GeodeRDDPartitioner.defaultPartitionedRegionPartitioner)
-    assert(GeodeRDDPartitioner() == GeodeRDDPartitioner.defaultPartitionedRegionPartitioner)
-  }
-  
-  test("OnePartitionPartitioner") {
-    val mockConnection = mock[GeodeConnection]
-    val partitions = OnePartitionPartitioner.partitions[String, String](mockConnection, null, Map.empty)
-    verifySinglePartition(partitions)
-  }
-
-  def verifySinglePartition(partitions: Array[Partition]): Unit = {
-    assert(1 == partitions.size)
-    assert(partitions(0).index === 0)
-    assert(partitions(0).isInstanceOf[GeodeRDDPartition])
-    assert(partitions(0).asInstanceOf[GeodeRDDPartition].bucketSet.isEmpty)
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=1 & no empty bucket") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "server1" -> mutable.Set(3,2,1,0), "server2" -> mutable.Set(5,4))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 6, 1)
-    verifyPartitions(partitions, List(
-      (Set(0, 1, 2, 3), Seq("server1")), (Set(4, 5), Seq("server2"))))
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=1 & 1 empty bucket") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "server1" -> mutable.Set(3,2,1,0), "server2" -> mutable.Set(5,4))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 7, 1)
-    verifyPartitions(partitions, List(
-      (Set(0, 1, 2, 3, 6), Seq("server1")), (Set(4, 5), Seq("server2"))))
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=1 & 2 empty bucket") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "server1" -> mutable.Set(3,2,1,0), "server2" -> mutable.Set(5,4))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 8, 1)
-    verifyPartitions(partitions, List(
-      (Set(0, 1, 2, 3, 6), Seq("server1")), (Set(4, 5, 7), Seq("server2"))))
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=1 & 5 empty bucket") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "server1" -> mutable.Set(3,2,1,0), "server2" -> mutable.Set(5,4))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 11, 1)
-    verifyPartitions(partitions, List(
-      (Set(0, 1, 2, 3, 6, 7, 8), Seq("server1")), (Set(4, 5, 9, 10), Seq("server2"))))
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=1, 4 empty-bucket, non-continuous IDs") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "server1" -> mutable.Set(1, 3), "server2" -> mutable.Set(5,7))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 8, 1)
-    verifyPartitions(partitions, List(
-      (Set(0, 1, 2, 3), Seq("server1")), (Set(4, 5, 6, 7), Seq("server2"))))
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=2, no empty buckets, 3 servers have 1, 2, and 3 buckets") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "s1" -> mutable.Set(0), "s2" -> mutable.Set(1, 2), "s3" -> mutable.Set(3, 4, 5))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 6, 2)
-    // partitions.foreach(println)
-    verifyPartitions(partitions, List(
-      (Set(0), Seq("s1")), (Set(1), Seq("s2")), (Set(2), Seq("s2")), (Set(3, 4), Seq("s3")), (Set(5), Seq("s3"))))
-  }
-
-  test("ServerSplitsPartitioner.doPartitions(): n=3, no empty buckets, 4 servers have 0, 2, 3, and 4 buckets") {
-    val map: List[(String, mutable.Set[Int])] = List(
-      "s0" -> mutable.Set.empty, "s1" -> mutable.Set(0, 1), "s2" -> mutable.Set(2, 3, 4), "s3" -> mutable.Set(5, 6, 7, 8))
-    val partitions = ServerSplitsPartitioner.doPartitions(map, 9, 3)
-    // partitions.foreach(println)
-    verifyPartitions(partitions, List(
-      (Set(0), Seq("s1")), (Set(1), Seq("s1")), (Set(2), Seq("s2")), (Set(3), Seq("s2")), (Set(4), Seq("s2")),
-      (Set(5, 6), Seq("s3")), (Set(7, 8), Seq("s3")) ))
-  }
-
-  test("ServerSplitsPartitioner.partitions(): metadata = None ") {
-    val regionPath = "test"
-    val mockConnection = mock[GeodeConnection]
-    intercept[RuntimeException] { ServerSplitsPartitioner.partitions[String, String](mockConnection, null, Map.empty) }
-  }
-
-  test("ServerSplitsPartitioner.partitions(): replicated region ") {
-    val regionPath = "test"
-    val mockConnection = mock[GeodeConnection]
-    val md = new RegionMetadata(regionPath, false, 11, null)
-    when(mockConnection.getRegionMetadata(regionPath)).thenReturn(Some(md))
-    val partitions = ServerSplitsPartitioner.partitions[String, String](mockConnection, md, Map.empty)
-    verifySinglePartition(partitions)
-  }
-
-  test("ServerSplitsPartitioner.partitions(): partitioned region w/o data ") {
-    val regionPath = "test"
-    val mockConnection = mock[GeodeConnection]
-    val md = new RegionMetadata(regionPath, true, 6, emptyServerBucketMap)
-    when(mockConnection.getRegionMetadata(regionPath)).thenReturn(Some(md))
-    val partitions = ServerSplitsPartitioner.partitions[String, String](mockConnection, md, Map.empty)
-    verifySinglePartition(partitions)
-  }
-
-  test("ServerSplitsPartitioner.partitions(): partitioned region w/ some data ") {
-    import io.pivotal.geode.spark.connector.NumberPartitionsPerServerPropKey
-    val regionPath = "test"
-    val mockConnection = mock[GeodeConnection]
-    val map: Map[(String, Int), Set[Int]] = Map(
-      ("s0",1) -> Set.empty, ("s1",2) -> Set(0), ("s2",3) -> Set(1, 2), ("s3",4) -> Set(3, 4, 5))
-    val md = new RegionMetadata(regionPath, true, 6, toJavaServerBucketMap(map))
-    when(mockConnection.getRegionMetadata(regionPath)).thenReturn(Some(md))
-    val partitions = ServerSplitsPartitioner.partitions[String, String](mockConnection, md, Map(NumberPartitionsPerServerPropKey->"2"))
-    // partitions.foreach(println)
-    verifyPartitions(partitions, List(
-      (Set(0), Seq("s1")), (Set(1), Seq("s2")), (Set(2), Seq("s2")), (Set(3, 4), Seq("s3")), (Set(5), Seq("s3"))))
-  }
-  
-  // Note: since the order of partitions is not pre-determined, we have to verify partition id
-  // and contents separately
-  def verifyPartitions(partitions: Array[Partition], expPartitions: List[(Set[Int], Seq[String])]): Unit = {
-    // 1. check size
-    assert(partitions.size == expPartitions.size)
-    // 2. check IDs are 0 to n-1
-    (0 until partitions.size).toList.zip(partitions).foreach { case (id, p) => assert(id == p.index) }
-
-    // 3. get all pairs of bucket set and its locations, and compare to the expected pairs
-    val list = partitions.map { e =>
-      val p = e.asInstanceOf[GeodeRDDPartition]
-      (p.bucketSet, p.locations)
-    }
-    expPartitions.foreach(e => assert(list.contains(e)))    
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRegionRDDTest.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRegionRDDTest.scala b/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRegionRDDTest.scala
deleted file mode 100644
index 046ceac..0000000
--- a/geode-spark-connector/geode-spark-connector/src/test/scala/unittest/io/pivotal/geode/spark/connector/rdd/GeodeRegionRDDTest.scala
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package unittest.io.pivotal.geode.spark.connector.rdd
-
-import com.gemstone.gemfire.cache.Region
-import io.pivotal.geode.spark.connector.internal.RegionMetadata
-import io.pivotal.geode.spark.connector.internal.rdd.{GeodeRDDPartition, GeodeRegionRDD}
-import io.pivotal.geode.spark.connector.{GeodeConnectionConf, GeodeConnection}
-import org.apache.spark.{TaskContext, Partition, SparkContext}
-import org.mockito.Mockito._
-import org.mockito.Matchers.{eq => mockEq, any => mockAny}
-import org.scalatest.mock.MockitoSugar
-import org.scalatest.{Matchers, FunSuite}
-
-import scala.reflect.ClassTag
-
-class GeodeRegionRDDTest extends FunSuite with Matchers with MockitoSugar {
-
-  /** create common mocks, not all mocks are used by all tests */
-  def createMocks[K, V](regionPath: String)(implicit kt: ClassTag[K], vt: ClassTag[V], m: Manifest[Region[K, V]])
-    : (String, Region[K,V], GeodeConnectionConf, GeodeConnection) = {
-    val mockConnection = mock[GeodeConnection]
-    val mockRegion = mock[Region[K, V]]
-    val mockConnConf = mock[GeodeConnectionConf]
-    when(mockConnConf.getConnection).thenReturn(mockConnection)
-    when(mockConnection.getRegionProxy[K, V](regionPath)).thenReturn(mockRegion)
-    when(mockConnConf.locators).thenReturn(Seq.empty)
-    (regionPath, mockRegion, mockConnConf, mockConnection)
-  }
-  
-  test("create GeodeRDD with non-existing region") {
-    val (regionPath, mockRegion, mockConnConf, mockConnection) = createMocks[String, String]("test")
-    when(mockConnConf.getConnection).thenReturn(mockConnection)
-    when(mockConnection.validateRegion[String,String](regionPath)).thenThrow(new RuntimeException)
-    val mockSparkContext = mock[SparkContext]
-    intercept[RuntimeException] { GeodeRegionRDD[String, String](mockSparkContext, regionPath, mockConnConf) }
-    verify(mockConnConf).getConnection
-    verify(mockConnection).validateRegion[String, String](regionPath)
-  }
-  
-  test("getPartitions with non-existing region") {
-    // region exists when RDD is created, but get removed before getPartitions() is invoked
-    val (regionPath, mockRegion, mockConnConf, mockConnection) = createMocks[String, String]("test")
-    when(mockConnection.getRegionMetadata[String, String](regionPath)).thenReturn(None)
-    val mockSparkContext = mock[SparkContext]
-    intercept[RuntimeException] { GeodeRegionRDD[String, String](mockSparkContext, regionPath, mockConnConf).getPartitions }
-  }
-
-  test("getPartitions with replicated region and not preferred env") {
-    val (regionPath, mockRegion, mockConnConf, mockConnection) = createMocks[String, String]("test")
-    implicit val mockConnConf2 = mockConnConf
-    val mockSparkContext = mock[SparkContext]
-    when(mockConnection.getRegionMetadata[String, String](regionPath)).thenReturn(Some(new RegionMetadata(regionPath, false, 0, null)))
-    val partitions = GeodeRegionRDD(mockSparkContext, regionPath, mockConnConf).partitions
-    verifySinglePartition(partitions)
-  }
-
-  def verifySinglePartition(partitions: Array[Partition]): Unit = {
-    assert(1 == partitions.size)
-    assert(partitions(0).index === 0)
-    assert(partitions(0).isInstanceOf[GeodeRDDPartition])
-    assert(partitions(0).asInstanceOf[GeodeRDDPartition].bucketSet.isEmpty)
-  }
-
-  test("getPartitions with replicated region and preferred OnePartitionPartitioner") {
-    // since it's replicated region, so OnePartitionPartitioner will be used, i.e., override preferred partitioner
-    import io.pivotal.geode.spark.connector.{PreferredPartitionerPropKey, OnePartitionPartitionerName}
-    val (regionPath, mockRegion, mockConnConf, mockConnection) = createMocks[String, String]("test")
-    when(mockConnection.getRegionMetadata[String, String](regionPath)).thenReturn(Some(new RegionMetadata(regionPath, false, 0, null)))
-    implicit val mockConnConf2 = mockConnConf
-    val mockSparkContext = mock[SparkContext]
-    val env = Map(PreferredPartitionerPropKey -> OnePartitionPartitionerName)
-    val partitions = GeodeRegionRDD(mockSparkContext, regionPath, mockConnConf, env).partitions
-    verifySinglePartition(partitions)
-  }
-
-  test("getPartitions with partitioned region and not preferred env") {
-    val (regionPath, mockRegion, mockConnConf, mockConnection) = createMocks[String, String]("test")
-    implicit val mockConnConf2 = mockConnConf
-    val mockSparkContext = mock[SparkContext]
-    when(mockConnection.getRegionMetadata[String, String](regionPath)).thenReturn(Some(new RegionMetadata(regionPath, true, 2, null)))
-    val partitions = GeodeRegionRDD(mockSparkContext, regionPath, mockConnConf).partitions
-    verifySinglePartition(partitions)
-  }
-
-  test("GeodeRDD.compute() method") {
-    val (regionPath, mockRegion, mockConnConf, mockConnection) = createMocks[String, String]("test")
-    implicit val mockConnConf2 = mockConnConf
-    val mockIter = mock[Iterator[(String, String)]]
-    val partition = GeodeRDDPartition(0, Set.empty)
-    when(mockConnection.getRegionMetadata[String, String](regionPath)).thenReturn(Some(new RegionMetadata(regionPath, true, 2, null)))
-    when(mockConnection.getRegionData[String, String](regionPath, None, partition)).thenReturn(mockIter)
-    val mockSparkContext = mock[SparkContext]
-    val rdd = GeodeRegionRDD[String, String](mockSparkContext, regionPath, mockConnConf)
-    val partitions = rdd.partitions
-    assert(1 == partitions.size)
-    val mockTaskContext = mock[TaskContext]
-    rdd.compute(partitions(0), mockTaskContext)        
-    verify(mockConnection).getRegionData[String, String](mockEq(regionPath), mockEq(None), mockEq(partition))
-    // verify(mockConnection).getRegionData[String, String](regionPath, Set.empty.asInstanceOf[Set[Int]], "geodeRDD 0.0")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/Emp.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/Emp.java b/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/Emp.java
deleted file mode 100644
index 03e15a0..0000000
--- a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/Emp.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package demo;
-
-import java.io.Serializable;
-
-/**
- * This is a demo class used in doc/?.md
- */
-public class Emp implements Serializable {
-
-  private int id;
-  
-  private String lname;
-
-  private String fname;
-
-  private int age;
-
-  private String loc;
-
-  public Emp(int id, String lname, String fname, int age, String loc) {
-    this.id = id;
-    this.lname = lname;
-    this.fname = fname;
-    this.age = age;
-    this.loc = loc;
-  }
-
-  public int getId() {
-    return id;
-  }
-
-  public String getLname() {
-    return lname;
-  }
-
-  public String getFname() {
-    return fname;
-  }
-
-  public int getAge() {
-    return age;
-  }
-
-  public String getLoc() {
-    return loc;
-  }
-
-  @Override
-  public String toString() {
-    return "Emp(" + id + ", " + lname + ", " + fname + ", " + age + ", " + loc + ")";
-  }
-
-  @Override
-  public boolean equals(Object o) {
-    if (this == o) return true;
-    if (o == null || getClass() != o.getClass()) return false;
-
-    Emp emp = (Emp) o;
-
-    if (age != emp.age) return false;
-    if (id != emp.id) return false;
-    if (fname != null ? !fname.equals(emp.fname) : emp.fname != null) return false;
-    if (lname != null ? !lname.equals(emp.lname) : emp.lname != null) return false;
-    if (loc != null ? !loc.equals(emp.loc) : emp.loc != null) return false;
-
-    return true;
-  }
-
-  @Override
-  public int hashCode() {
-    int result = id;
-    result = 31 * result + (lname != null ? lname.hashCode() : 0);
-    result = 31 * result + (fname != null ? fname.hashCode() : 0);
-    result = 31 * result + age;
-    result = 31 * result + (loc != null ? loc.hashCode() : 0);
-    return result;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/OQLJavaDemo.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/OQLJavaDemo.java b/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/OQLJavaDemo.java
deleted file mode 100644
index adcf072..0000000
--- a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/OQLJavaDemo.java
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package demo;
-
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.sql.DataFrame;
-import org.apache.spark.sql.SQLContext;
-import static io.pivotal.geode.spark.connector.javaapi.GeodeJavaUtil.*;
-
-
-/**
- * This Spark application demonstrates how to get region data from Geode using Geode
- * OQL Java API. The result is a Spark DataFrame.
- * <p>
- * In order to run it, you will need to start a Geode cluster, and run demo PairRDDSaveJavaDemo
- * first to create some data in the region.
- * <p>
- * Once you compile and package the demo, the jar file basic-demos_2.10-0.5.0.jar
- * should be generated under geode-spark-demos/basic-demos/target/scala-2.10/.
- * Then run the following command to start a Spark job:
- * <pre>
- *   <path to spark>/bin/spark-submit --master=local[2] --class demo.OQLJavaDemo \
- *       <path to>/basic-demos_2.10-0.5.0.jar <locator host>:<port>
- * </pre>
- */
-public class OQLJavaDemo {
-
-  public static void main(String[] argv) {
-
-    if (argv.length != 1) {
-      System.err.printf("Usage: OQLJavaDemo <locators>\n");
-      return;
-    }
-
-    SparkConf conf = new SparkConf().setAppName("OQLJavaDemo");
-    conf.set(GeodeLocatorPropKey, argv[0]); // "192.168.1.47[10335]"
-    JavaSparkContext sc = new JavaSparkContext(conf);
-    SQLContext sqlContext = new org.apache.spark.sql.SQLContext(sc);
-    DataFrame df = javaFunctions(sqlContext).geodeOQL("select * from /str_str_region");
-    System.out.println("======= DataFrame =======\n");
-    df.show();
-    sc.stop();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/PairRDDSaveJavaDemo.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/PairRDDSaveJavaDemo.java b/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/PairRDDSaveJavaDemo.java
deleted file mode 100644
index 52d2a99..0000000
--- a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/PairRDDSaveJavaDemo.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package demo;
-
-import io.pivotal.geode.spark.connector.GeodeConnectionConf;
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaPairRDD;
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import scala.Tuple2;
-import java.util.*;
-
-import static io.pivotal.geode.spark.connector.javaapi.GeodeJavaUtil.*;
-
-/**
- * This Spark application demonstrates how to save a RDD to Geode using Geode Spark
- * Connector with Java.
- * <p/>
- * In order to run it, you will need to start Geode cluster, and create the following region
- * with GFSH:
- * <pre>
- * gfsh> create region --name=str_str_region --type=REPLICATE \
- *         --key-constraint=java.lang.String --value-constraint=java.lang.String
- * </pre>
- * 
- * Once you compile and package the demo, the jar file basic-demos_2.10-0.5.0.jar
- * should be generated under geode-spark-demos/basic-demos/target/scala-2.10/.
- * Then run the following command to start a Spark job:
- * <pre>
- *   <path to spark>/bin/spark-submit --master=local[2] --class demo.PairRDDSaveJavaDemo \
- *       <path to>/basic-demos_2.10-0.5.0.jar <locator host>:<port>
- * </pre>
- * 
- * Verify the data was saved to Geode with GFSH:
- * <pre>gfsh> query --query="select * from /str_str_region.entrySet"  </pre>
- */
-public class PairRDDSaveJavaDemo {
-
-  public static void main(String[] argv) {
-
-    if (argv.length != 1) {
-      System.err.printf("Usage: PairRDDSaveJavaDemo <locators>\n");
-      return;
-    }
-
-    SparkConf conf = new SparkConf().setAppName("PairRDDSaveJavaDemo");
-    conf.set(GeodeLocatorPropKey, argv[0]);
-    JavaSparkContext sc = new JavaSparkContext(conf);
-    GeodeConnectionConf connConf = GeodeConnectionConf.apply(conf);
-
-    List<Tuple2<String, String>> data = new ArrayList<>();
-    data.add(new Tuple2<>("7", "seven"));
-    data.add(new Tuple2<>("8", "eight"));
-    data.add(new Tuple2<>("9", "nine"));
-
-    List<Tuple2<String, String>> data2 = new ArrayList<Tuple2<String, String>>();
-    data2.add(new Tuple2<>("11", "eleven"));
-    data2.add(new Tuple2<>("12", "twelve"));
-    data2.add(new Tuple2<>("13", "thirteen"));
-
-    // method 1: generate JavaPairRDD directly
-    JavaPairRDD<String, String> rdd1 =  sc.parallelizePairs(data);
-    javaFunctions(rdd1).saveToGeode("str_str_region", connConf);
-
-    // method 2: convert JavaRDD<Tuple2<K,V>> to JavaPairRDD<K, V>
-    JavaRDD<Tuple2<String, String>> rdd2 =  sc.parallelize(data2);
-    javaFunctions(toJavaPairRDD(rdd2)).saveToGeode("str_str_region", connConf);
-       
-    sc.stop();
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RDDSaveJavaDemo.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RDDSaveJavaDemo.java b/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RDDSaveJavaDemo.java
deleted file mode 100644
index 1125de5..0000000
--- a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RDDSaveJavaDemo.java
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package demo;
-
-import io.pivotal.geode.spark.connector.GeodeConnectionConf;
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import org.apache.spark.api.java.function.PairFunction;
-import scala.Tuple2;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import static io.pivotal.geode.spark.connector.javaapi.GeodeJavaUtil.*;
-
-/**
- * This Spark application demonstrates how to save a RDD to Geode using Geode Spark
- * Connector with Java.
- * <p/>
- * In order to run it, you will need to start Geode cluster, and create the following region
- * with GFSH:
- * <pre>
- * gfsh> create region --name=str_int_region --type=REPLICATE \
- *         --key-constraint=java.lang.String --value-constraint=java.lang.Integer
- * </pre>
- *
- * Once you compile and package the demo, the jar file basic-demos_2.10-0.5.0.jar
- * should be generated under geode-spark-demos/basic-demos/target/scala-2.10/.
- * Then run the following command to start a Spark job:
- * <pre>
- *   <path to spark>/bin/spark-submit --master=local[2] --class demo.RDDSaveJavaDemo \
- *       <path to>/basic-demos_2.10-0.5.0.jar <locator host>:<port>
- * </pre>
- *
- * Verify the data was saved to Geode with GFSH:
- * <pre>gfsh> query --query="select * from /str_int_region.entrySet"  </pre>
- */
-public class RDDSaveJavaDemo {
-
-  public static void main(String[] argv) {
-
-    if (argv.length != 1) {
-      System.err.printf("Usage: RDDSaveJavaDemo <locators>\n");
-      return;
-    }
-
-    SparkConf conf = new SparkConf().setAppName("RDDSaveJavaDemo");
-    conf.set(GeodeLocatorPropKey, argv[0]);
-    JavaSparkContext sc = new JavaSparkContext(conf);
-
-    List<String> data = new ArrayList<String>();
-    data.add("abcdefg");
-    data.add("abcdefgh");
-    data.add("abcdefghi");
-    JavaRDD<String> rdd =  sc.parallelize(data);
-
-    GeodeConnectionConf connConf = GeodeConnectionConf.apply(conf);
-
-    PairFunction<String, String, Integer> func =  new PairFunction<String, String, Integer>() {
-      @Override public Tuple2<String, Integer> call(String s) throws Exception {
-        return new Tuple2<String, Integer>(s, s.length());
-      }
-    };
-
-    javaFunctions(rdd).saveToGeode("str_int_region", func, connConf);
-
-    sc.stop();
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RegionToRDDJavaDemo.java
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RegionToRDDJavaDemo.java b/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RegionToRDDJavaDemo.java
deleted file mode 100644
index 1ce8ceb..0000000
--- a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/java/demo/RegionToRDDJavaDemo.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package demo;
-
-import org.apache.spark.SparkConf;
-import org.apache.spark.api.java.JavaPairRDD;
-import org.apache.spark.api.java.JavaSparkContext;
-import static io.pivotal.geode.spark.connector.javaapi.GeodeJavaUtil.*;
-
-/**
- * This Spark application demonstrates how to expose a region in Geode as a RDD using Geode
- * Spark Connector with Java.
- * <p>
- * In order to run it, you will need to start Geode cluster, and run demo PairRDDSaveJavaDemo
- * first to create some data in the region.
- * <p>
- * Once you compile and package the demo, the jar file basic-demos_2.10-0.5.0.jar
- * should be generated under geode-spark-demos/basic-demos/target/scala-2.10/.
- * Then run the following command to start a Spark job:
- * <pre>
- *   <path to spark>/bin/spark-submit --master=local[2] --class demo.RegionToRDDJavaDemo \
- *       <path to>/basic-demos_2.10-0.5.0.jar <locator host>:<port>
- * </pre>
- */
-public class RegionToRDDJavaDemo {
-
-  public static void main(String[] argv) {
-
-    if (argv.length != 1) {
-      System.err.printf("Usage: RegionToRDDJavaDemo <locators>\n");
-      return;
-    }
-    
-    SparkConf conf = new SparkConf().setAppName("RegionToRDDJavaDemo"); 
-    conf.set(GeodeLocatorPropKey, argv[0]);
-    JavaSparkContext sc = new JavaSparkContext(conf);
-
-    JavaPairRDD<String, String> rdd = javaFunctions(sc).geodeRegion("str_str_region");
-    System.out.println("=== geodeRegion =======\n" + rdd.collect() + "\n=========================");
-    
-    sc.stop();
-  }
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/geode-spark-demos/basic-demos/src/main/scala/demo/NetworkWordCount.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/scala/demo/NetworkWordCount.scala b/geode-spark-connector/geode-spark-demos/basic-demos/src/main/scala/demo/NetworkWordCount.scala
deleted file mode 100644
index 810b380..0000000
--- a/geode-spark-connector/geode-spark-demos/basic-demos/src/main/scala/demo/NetworkWordCount.scala
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package demo
-
-import org.apache.spark.SparkConf
-import org.apache.spark.streaming.{Seconds, StreamingContext}
-import io.pivotal.geode.spark.connector.GeodeLocatorPropKey
-import io.pivotal.geode.spark.connector.streaming._
-
-/**
- * Counts words in UTF8 encoded, '\n' delimited text received from the network every second.
- * <p><p>
- * In order to run it, you will need to start Geode cluster, and create the following region
- * with GFSH:
- * <pre>
- * gfsh> create region --name=str_int_region --type=REPLICATE \
- *         --key-constraint=java.lang.String --value-constraint=java.lang.Integer
- * </pre> 
- *
- * <p>To run this on your local machine, you need to first run a net cat server
- *    `$ nc -lk 9999`
- * and then run the example
- *    `$ bin/spark-submit --master=local[2] --class demo.NetworkWordCount <path to>/basic-demos_2.10-0.5.0.jar localhost 9999 locatorHost:port`
- * 
- * <p><p> check result that was saved to Geode with GFSH:
- * <pre>gfsh> query --query="select * from /str_int_region.entrySet"  </pre>
- */
-object NetworkWordCount {
-  
-  def main(args: Array[String]) {
-    if (args.length < 3) {
-      System.err.println("Usage: NetworkWordCount <hostname> <port> <geode locator>")
-      System.exit(1)
-    }
-
-    val updateFunc = (values: Seq[Int], state: Option[Int]) => {
-      val currentCount = values.foldLeft(0)(_ + _)
-      val previousCount = state.getOrElse(0)
-      Some(currentCount + previousCount)
-    }
-    
-    // Create the context with a 1 second batch size
-    val sparkConf = new SparkConf().setAppName("NetworkWordCount").set(GeodeLocatorPropKey, args(2))
-    val ssc = new StreamingContext(sparkConf, Seconds(1))
-    ssc.checkpoint(".")
-    
-    // Create a socket stream on target ip:port and count the
-    // words in input stream of \n delimited text (eg. generated by 'nc')
-    // Note that no duplication in storage level only for running locally.
-    // Replication necessary in distributed scenario for fault tolerance.
-    val lines = ssc.socketTextStream(args(0), args(1).toInt)
-    val words = lines.flatMap(_.split(" "))
-    val wordCounts = words.map(x => (x, 1)).reduceByKey(_ + _)
-    val runningCounts = wordCounts.updateStateByKey[Int](updateFunc)
-    // runningCounts.print()
-    runningCounts.saveToGeode("str_int_region")
-    ssc.start()
-    ssc.awaitTermination()
-  }
-  
-}

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/project/Dependencies.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/project/Dependencies.scala b/geode-spark-connector/project/Dependencies.scala
index b2931dd..c77c158 100644
--- a/geode-spark-connector/project/Dependencies.scala
+++ b/geode-spark-connector/project/Dependencies.scala
@@ -22,7 +22,7 @@ object Dependencies {
   object Compile {
     val sparkStreaming = "org.apache.spark" %% "spark-streaming" % "1.3.0" 
     val sparkSql = "org.apache.spark" %% "spark-sql" % "1.3.0"
-    val geode = "org.apache.geode" % "geode-core" % "1.0.0-incubating.M2-SNAPSHOT" excludeAll(ExclusionRule(organization = "org.jboss.netty") )
+    val gemfire = "org.apache.geode" % "geode-core" % "1.0.0-incubating.M2-SNAPSHOT" excludeAll(ExclusionRule(organization = "org.jboss.netty") )
   }
 
   object Test {
@@ -37,9 +37,9 @@ object Dependencies {
 
   val unitTests = Seq(scalaTest, mockito, junit, novoCode)
 
-  val connector = unitTests ++ Seq(sparkStreaming, sparkSql, geode)
+  val connector = unitTests ++ Seq(sparkStreaming, sparkSql, gemfire)
 
-  val functions = Seq(geode, junit)
+  val functions = Seq(gemfire, junit)
  
-  val demos = Seq(sparkStreaming, sparkSql, geode)
+  val demos = Seq(sparkStreaming, sparkSql, gemfire)
 }

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/project/GemFireSparkBuild.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/project/GemFireSparkBuild.scala b/geode-spark-connector/project/GemFireSparkBuild.scala
new file mode 100644
index 0000000..89d8e0b
--- /dev/null
+++ b/geode-spark-connector/project/GemFireSparkBuild.scala
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+import sbt._
+import sbt.Keys._
+import scoverage.ScoverageSbtPlugin._
+import scoverage.ScoverageSbtPlugin
+
+object GemFireSparkConnectorBuild extends Build {
+  import Settings._
+  import Dependencies._ 
+
+  lazy val root = Project(
+    id = "root", 
+    base =file("."), 
+    aggregate = Seq(gemfireFunctions, gemfireSparkConnector,demos),
+    settings = commonSettings ++ Seq( 
+     name := "GemFire Connector for Apache Spark",
+     publishArtifact :=  false,
+     publishLocal := { },
+     publish := { }
+    )
+  )
+ 
+  lazy val gemfireFunctions = Project(
+    id = "gemfire-functions",
+    base = file("gemfire-functions"),
+    settings = commonSettings ++ Seq(libraryDependencies ++= Dependencies.functions,
+      resolvers ++= gfcResolvers,
+      description := "Required GemFire Functions to be deployed onto the GemFire Cluster before using the GemFire Spark Connector"
+    )
+  ).configs(IntegrationTest)
+  
+  lazy val gemfireSparkConnector = Project(
+    id = "gemfire-spark-connector",
+    base = file("gemfire-spark-connector"),
+    settings = gfcSettings ++ Seq(libraryDependencies ++= Dependencies.connector,
+      resolvers ++= gfcResolvers,
+      description := "A library that exposes GemFire regions as Spark RDDs, writes Spark RDDs to GemFire regions, and executes OQL queries from Spark Applications to GemFire"
+    )
+  ).dependsOn(gemfireFunctions).configs(IntegrationTest)
+
+ 
+  /******** Demo Project Definitions ********/ 
+  lazy val demoPath = file("gemfire-spark-demos")
+
+  lazy val demos = Project ( 
+    id = "gemfire-spark-demos",
+    base = demoPath,
+    settings = demoSettings,
+    aggregate = Seq(basicDemos)
+  )
+ 
+  lazy val basicDemos = Project (
+    id = "basic-demos",
+    base = demoPath / "basic-demos",
+    settings = demoSettings ++ Seq(libraryDependencies ++= Dependencies.demos,
+      resolvers ++= gfcResolvers,
+      description := "Sample applications that demonstrates functionality of the GemFire Spark Connector"
+    )
+  ).dependsOn(gemfireSparkConnector)
+}
+

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/project/GeodeSparkBuild.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/project/GeodeSparkBuild.scala b/geode-spark-connector/project/GeodeSparkBuild.scala
deleted file mode 100644
index 07cae51..0000000
--- a/geode-spark-connector/project/GeodeSparkBuild.scala
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-import sbt._
-import sbt.Keys._
-import scoverage.ScoverageSbtPlugin._
-import scoverage.ScoverageSbtPlugin
-
-object GeodeSparkConnectorBuild extends Build {
-  import Settings._
-  import Dependencies._ 
-
-  lazy val root = Project(
-    id = "root", 
-    base =file("."), 
-    aggregate = Seq(geodeFunctions, geodeSparkConnector,demos),
-    settings = commonSettings ++ Seq( 
-     name := "Geode Connector for Apache Spark",
-     publishArtifact :=  false,
-     publishLocal := { },
-     publish := { }
-    )
-  )
- 
-  lazy val geodeFunctions = Project(
-    id = "geode-functions",
-    base = file("geode-functions"),
-    settings = commonSettings ++ Seq(libraryDependencies ++= Dependencies.functions,
-      resolvers ++= gfcResolvers,
-      description := "Required Geode Functions to be deployed onto the Geode Cluster before using the Geode Spark Connector"
-    )
-  ).configs(IntegrationTest)
-  
-  lazy val geodeSparkConnector = Project(
-    id = "geode-spark-connector",
-    base = file("geode-spark-connector"),
-    settings = gfcSettings ++ Seq(libraryDependencies ++= Dependencies.connector,
-      resolvers ++= gfcResolvers,
-      description := "A library that exposes Geode regions as Spark RDDs, writes Spark RDDs to Geode regions, and executes OQL queries from Spark Applications to Geode"
-    )
-  ).dependsOn(geodeFunctions).configs(IntegrationTest)
-
- 
-  /******** Demo Project Definitions ********/ 
-  lazy val demoPath = file("geode-spark-demos")
-
-  lazy val demos = Project ( 
-    id = "geode-spark-demos",
-    base = demoPath,
-    settings = demoSettings,
-    aggregate = Seq(basicDemos)
-  )
- 
-  lazy val basicDemos = Project (
-    id = "basic-demos",
-    base = demoPath / "basic-demos",
-    settings = demoSettings ++ Seq(libraryDependencies ++= Dependencies.demos,
-      resolvers ++= gfcResolvers,
-      description := "Sample applications that demonstrates functionality of the Geode Spark Connector"
-    )
-  ).dependsOn(geodeSparkConnector)
-}
-

http://git-wip-us.apache.org/repos/asf/incubator-geode/blob/ddee87fe/geode-spark-connector/project/Settings.scala
----------------------------------------------------------------------
diff --git a/geode-spark-connector/project/Settings.scala b/geode-spark-connector/project/Settings.scala
index 9aefa9b..796541c 100644
--- a/geode-spark-connector/project/Settings.scala
+++ b/geode-spark-connector/project/Settings.scala
@@ -24,7 +24,7 @@ object Settings extends Build {
     organization := "io.pivotal",
     version := "0.5.0",
     scalaVersion := "2.10.4",
-    organization := "io.pivotal.geode.spark",
+    organization := "io.pivotal.gemfire.spark",
     organizationHomepage := Some(url("http://www.pivotal.io/"))
   ) 
 
@@ -43,7 +43,7 @@ object Settings extends Build {
   val gfcITSettings = inConfig(IntegrationTest)(Defaults.itSettings) ++
     Seq(parallelExecution in IntegrationTest := false, fork in IntegrationTest := true)
 
-  val gfcCompileSettings = inConfig(Compile)(Defaults.compileSettings) ++ Seq(unmanagedSourceDirectories in Compile += baseDirectory.value /"../geode-functions/src")
+  val gfcCompileSettings = inConfig(Compile)(Defaults.compileSettings) ++ Seq(unmanagedSourceDirectories in Compile += baseDirectory.value /"../gemfire-functions/src")
 
   val gfcSettings = commonSettings ++ gfcITSettings ++ gfcCompileSettings