You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by bu...@apache.org on 2017/06/19 21:35:45 UTC
[1/2] hbase git commit: HBASE-17546 Correct scala syntax to match
Apache Spark examples
Repository: hbase
Updated Branches:
refs/heads/branch-2 d3ba357c7 -> 79607fda8
refs/heads/master 226319036 -> 7b6eb90ac
HBASE-17546 Correct scala syntax to match Apache Spark examples
Signed-off-by: Sean Busbey <bu...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/7b6eb90a
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/7b6eb90a
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/7b6eb90a
Branch: refs/heads/master
Commit: 7b6eb90ac9d6f10324f5633baa741d49638064f9
Parents: 2263190
Author: dskskv <ck...@gmail.com>
Authored: Thu Jan 26 20:11:05 2017 +0530
Committer: Sean Busbey <bu...@apache.org>
Committed: Mon Jun 19 16:14:21 2017 -0500
----------------------------------------------------------------------
.../spark/example/datasources/AvroSource.scala | 12 ++++++------
.../spark/example/datasources/DataType.scala | 18 +++++++++---------
.../spark/example/datasources/HBaseSource.scala | 2 +-
3 files changed, 16 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/7b6eb90a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
index 2880c5d..c09e99d 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
@@ -119,14 +119,14 @@ object AvroSource {
.save()
val df = withCatalog(catalog)
- df.show
+ df.show()
df.printSchema()
df.registerTempTable("ExampleAvrotable")
val c = sqlContext.sql("select count(1) from ExampleAvrotable")
- c.show
+ c.show()
val filtered = df.select($"col0", $"col1.favorite_array").where($"col0" === "name001")
- filtered.show
+ filtered.show()
val collected = filtered.collect()
if (collected(0).getSeq[String](1)(0) != "number1") {
throw new UserCustomizedSampleException("value invalid")
@@ -141,7 +141,7 @@ object AvroSource {
.format("org.apache.hadoop.hbase.spark")
.save()
val newDF = withCatalog(avroCatalogInsert)
- newDF.show
+ newDF.show()
newDF.printSchema()
if(newDF.count() != 256) {
throw new UserCustomizedSampleException("value invalid")
@@ -149,10 +149,10 @@ object AvroSource {
df.filter($"col1.name" === "name005" || $"col1.name" <= "name005")
.select("col0", "col1.favorite_color", "col1.favorite_number")
- .show
+ .show()
df.filter($"col1.name" <= "name005" || $"col1.name".contains("name007"))
.select("col0", "col1.favorite_color", "col1.favorite_number")
- .show
+ .show()
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/7b6eb90a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
index 5839bf7..96c6d6e 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
@@ -100,56 +100,56 @@ object DataType {
// test less than 0
val df = withCatalog(cat)
val s = df.filter($"col0" < 0)
- s.show
+ s.show()
if(s.count() != 16){
throw new UserCustomizedSampleException("value invalid")
}
//test less or equal than -10. The number of results is 11
val num1 = df.filter($"col0" <= -10)
- num1.show
+ num1.show()
val c1 = num1.count()
println(s"test result count should be 11: $c1")
//test less or equal than -9. The number of results is 12
val num2 = df.filter($"col0" <= -9)
- num2.show
+ num2.show()
val c2 = num2.count()
println(s"test result count should be 12: $c2")
//test greater or equal than -9". The number of results is 21
val num3 = df.filter($"col0" >= -9)
- num3.show
+ num3.show()
val c3 = num3.count()
println(s"test result count should be 21: $c3")
//test greater or equal than 0. The number of results is 16
val num4 = df.filter($"col0" >= 0)
- num4.show
+ num4.show()
val c4 = num4.count()
println(s"test result count should be 16: $c4")
//test greater than 10. The number of results is 10
val num5 = df.filter($"col0" > 10)
- num5.show
+ num5.show()
val c5 = num5.count()
println(s"test result count should be 10: $c5")
// test "and". The number of results is 11
val num6 = df.filter($"col0" > -10 && $"col0" <= 10)
- num6.show
+ num6.show()
val c6 = num6.count()
println(s"test result count should be 11: $c6")
//test "or". The number of results is 21
val num7 = df.filter($"col0" <= -10 || $"col0" > 10)
- num7.show
+ num7.show()
val c7 = num7.count()
println(s"test result count should be 21: $c7")
//test "all". The number of results is 32
val num8 = df.filter($"col0" >= -100)
- num8.show
+ num8.show()
val c8 = num8.count()
println(s"test result count should be 32: $c8")
http://git-wip-us.apache.org/repos/asf/hbase/blob/7b6eb90a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
index ed23990..056c071 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
@@ -89,7 +89,7 @@ object HBaseSource {
.save()
val df = withCatalog(cat)
- df.show
+ df.show()
df.filter($"col0" <= "row005")
.select($"col0", $"col1").show
df.filter($"col0" === "row005" || $"col0" <= "row005")
[2/2] hbase git commit: HBASE-17546 Correct scala syntax to match
Apache Spark examples
Posted by bu...@apache.org.
HBASE-17546 Correct scala syntax to match Apache Spark examples
Signed-off-by: Sean Busbey <bu...@apache.org>
Project: http://git-wip-us.apache.org/repos/asf/hbase/repo
Commit: http://git-wip-us.apache.org/repos/asf/hbase/commit/79607fda
Tree: http://git-wip-us.apache.org/repos/asf/hbase/tree/79607fda
Diff: http://git-wip-us.apache.org/repos/asf/hbase/diff/79607fda
Branch: refs/heads/branch-2
Commit: 79607fda85b58fc0ba9f9045fd6990d2841e276c
Parents: d3ba357
Author: dskskv <ck...@gmail.com>
Authored: Thu Jan 26 20:11:05 2017 +0530
Committer: Sean Busbey <bu...@apache.org>
Committed: Mon Jun 19 16:27:00 2017 -0500
----------------------------------------------------------------------
.../spark/example/datasources/AvroSource.scala | 12 ++++++------
.../spark/example/datasources/DataType.scala | 18 +++++++++---------
.../spark/example/datasources/HBaseSource.scala | 2 +-
3 files changed, 16 insertions(+), 16 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hbase/blob/79607fda/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
index 2880c5d..c09e99d 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/AvroSource.scala
@@ -119,14 +119,14 @@ object AvroSource {
.save()
val df = withCatalog(catalog)
- df.show
+ df.show()
df.printSchema()
df.registerTempTable("ExampleAvrotable")
val c = sqlContext.sql("select count(1) from ExampleAvrotable")
- c.show
+ c.show()
val filtered = df.select($"col0", $"col1.favorite_array").where($"col0" === "name001")
- filtered.show
+ filtered.show()
val collected = filtered.collect()
if (collected(0).getSeq[String](1)(0) != "number1") {
throw new UserCustomizedSampleException("value invalid")
@@ -141,7 +141,7 @@ object AvroSource {
.format("org.apache.hadoop.hbase.spark")
.save()
val newDF = withCatalog(avroCatalogInsert)
- newDF.show
+ newDF.show()
newDF.printSchema()
if(newDF.count() != 256) {
throw new UserCustomizedSampleException("value invalid")
@@ -149,10 +149,10 @@ object AvroSource {
df.filter($"col1.name" === "name005" || $"col1.name" <= "name005")
.select("col0", "col1.favorite_color", "col1.favorite_number")
- .show
+ .show()
df.filter($"col1.name" <= "name005" || $"col1.name".contains("name007"))
.select("col0", "col1.favorite_color", "col1.favorite_number")
- .show
+ .show()
}
}
http://git-wip-us.apache.org/repos/asf/hbase/blob/79607fda/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
index 5839bf7..96c6d6e 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/DataType.scala
@@ -100,56 +100,56 @@ object DataType {
// test less than 0
val df = withCatalog(cat)
val s = df.filter($"col0" < 0)
- s.show
+ s.show()
if(s.count() != 16){
throw new UserCustomizedSampleException("value invalid")
}
//test less or equal than -10. The number of results is 11
val num1 = df.filter($"col0" <= -10)
- num1.show
+ num1.show()
val c1 = num1.count()
println(s"test result count should be 11: $c1")
//test less or equal than -9. The number of results is 12
val num2 = df.filter($"col0" <= -9)
- num2.show
+ num2.show()
val c2 = num2.count()
println(s"test result count should be 12: $c2")
//test greater or equal than -9". The number of results is 21
val num3 = df.filter($"col0" >= -9)
- num3.show
+ num3.show()
val c3 = num3.count()
println(s"test result count should be 21: $c3")
//test greater or equal than 0. The number of results is 16
val num4 = df.filter($"col0" >= 0)
- num4.show
+ num4.show()
val c4 = num4.count()
println(s"test result count should be 16: $c4")
//test greater than 10. The number of results is 10
val num5 = df.filter($"col0" > 10)
- num5.show
+ num5.show()
val c5 = num5.count()
println(s"test result count should be 10: $c5")
// test "and". The number of results is 11
val num6 = df.filter($"col0" > -10 && $"col0" <= 10)
- num6.show
+ num6.show()
val c6 = num6.count()
println(s"test result count should be 11: $c6")
//test "or". The number of results is 21
val num7 = df.filter($"col0" <= -10 || $"col0" > 10)
- num7.show
+ num7.show()
val c7 = num7.count()
println(s"test result count should be 21: $c7")
//test "all". The number of results is 32
val num8 = df.filter($"col0" >= -100)
- num8.show
+ num8.show()
val c8 = num8.count()
println(s"test result count should be 32: $c8")
http://git-wip-us.apache.org/repos/asf/hbase/blob/79607fda/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
----------------------------------------------------------------------
diff --git a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
index ed23990..056c071 100644
--- a/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
+++ b/hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/example/datasources/HBaseSource.scala
@@ -89,7 +89,7 @@ object HBaseSource {
.save()
val df = withCatalog(cat)
- df.show
+ df.show()
df.filter($"col0" <= "row005")
.select($"col0", $"col1").show
df.filter($"col0" === "row005" || $"col0" <= "row005")