You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2019/01/01 06:11:32 UTC
[spark] branch master updated: [SPARK-26499][SQL]
JdbcUtils.makeGetter does not handle ByteType
This is an automated email from the ASF dual-hosted git repository.
gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 5f0ddd2 [SPARK-26499][SQL] JdbcUtils.makeGetter does not handle ByteType
5f0ddd2 is described below
commit 5f0ddd2d6e2fdebf549207bbc4b13ca709eee3c4
Author: Thomas D'Silva <td...@apache.org>
AuthorDate: Tue Jan 1 14:11:14 2019 +0800
[SPARK-26499][SQL] JdbcUtils.makeGetter does not handle ByteType
…Type
## What changes were proposed in this pull request?
Modifed JdbcUtils.makeGetter to handle ByteType.
## How was this patch tested?
Added a new test to JDBCSuite that maps ```TINYINT``` to ```ByteType```.
Closes #23400 from twdsilva/tiny_int_support.
Authored-by: Thomas D'Silva <td...@apache.org>
Signed-off-by: Hyukjin Kwon <gu...@apache.org>
---
.../sql/execution/datasources/jdbc/JdbcUtils.scala | 4 ++++
.../org/apache/spark/sql/jdbc/JDBCSuite.scala | 25 ++++++++++++++++++++++
2 files changed, 29 insertions(+)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
index edea549..922bef2 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JdbcUtils.scala
@@ -438,6 +438,10 @@ object JdbcUtils extends Logging {
(rs: ResultSet, row: InternalRow, pos: Int) =>
row.setShort(pos, rs.getShort(pos + 1))
+ case ByteType =>
+ (rs: ResultSet, row: InternalRow, pos: Int) =>
+ row.update(pos, rs.getByte(pos + 1))
+
case StringType =>
(rs: ResultSet, row: InternalRow, pos: Int) =>
// TODO(davies): use getBytes for better performance, if the encoding is UTF-8
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
index 71e8376..e464163 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCSuite.scala
@@ -56,6 +56,20 @@ class JDBCSuite extends QueryTest
Some(StringType)
}
+ val testH2DialectTinyInt = new JdbcDialect {
+ override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2")
+ override def getCatalystType(
+ sqlType: Int,
+ typeName: String,
+ size: Int,
+ md: MetadataBuilder): Option[DataType] = {
+ sqlType match {
+ case java.sql.Types.TINYINT => Some(ByteType)
+ case _ => None
+ }
+ }
+ }
+
before {
Utils.classForName("org.h2.Driver")
// Extra properties that will be specified for our database. We need these to test
@@ -693,6 +707,17 @@ class JDBCSuite extends QueryTest
JdbcDialects.unregisterDialect(testH2Dialect)
}
+ test("Map TINYINT to ByteType via JdbcDialects") {
+ JdbcDialects.registerDialect(testH2DialectTinyInt)
+ val df = spark.read.jdbc(urlWithUserAndPass, "test.inttypes", new Properties())
+ val rows = df.collect()
+ assert(rows.length === 2)
+ assert(rows(0).get(2).isInstanceOf[Byte])
+ assert(rows(0).getByte(2) === 3)
+ assert(rows(1).isNullAt(2))
+ JdbcDialects.unregisterDialect(testH2DialectTinyInt)
+ }
+
test("Default jdbc dialect registration") {
assert(JdbcDialects.get("jdbc:mysql://127.0.0.1/db") == MySQLDialect)
assert(JdbcDialects.get("jdbc:postgresql://127.0.0.1/db") == PostgresDialect)
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org