You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2015/05/27 09:27:43 UTC
spark git commit: [SPARK-7697][SQL] Use LongType for unsigned int in
JDBCRDD
Repository: spark
Updated Branches:
refs/heads/master 6dd645870 -> 4f98d7a7f
[SPARK-7697][SQL] Use LongType for unsigned int in JDBCRDD
JIRA: https://issues.apache.org/jira/browse/SPARK-7697
The reported problem case is mysql. But for h2 db, there is no unsigned int. So it is not able to add corresponding test.
Author: Liang-Chi Hsieh <vi...@gmail.com>
Closes #6229 from viirya/unsignedint_as_long and squashes the following commits:
dc4b5d8 [Liang-Chi Hsieh] Merge remote-tracking branch 'upstream/master' into unsignedint_as_long
608695b [Liang-Chi Hsieh] Use LongType for unsigned int in JDBCRDD.
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/4f98d7a7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/4f98d7a7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/4f98d7a7
Branch: refs/heads/master
Commit: 4f98d7a7f1715273bc91f1903bb7e0f287cc7394
Parents: 6dd6458
Author: Liang-Chi Hsieh <vi...@gmail.com>
Authored: Wed May 27 00:27:39 2015 -0700
Committer: Reynold Xin <rx...@databricks.com>
Committed: Wed May 27 00:27:39 2015 -0700
----------------------------------------------------------------------
.../main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/4f98d7a7/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala
index be03a23..244bd3e 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRDD.scala
@@ -46,7 +46,11 @@ private[sql] object JDBCRDD extends Logging {
* @param sqlType - A field of java.sql.Types
* @return The Catalyst type corresponding to sqlType.
*/
- private def getCatalystType(sqlType: Int, precision: Int, scale: Int): DataType = {
+ private def getCatalystType(
+ sqlType: Int,
+ precision: Int,
+ scale: Int,
+ signed: Boolean): DataType = {
val answer = sqlType match {
case java.sql.Types.ARRAY => null
case java.sql.Types.BIGINT => LongType
@@ -64,7 +68,7 @@ private[sql] object JDBCRDD extends Logging {
case java.sql.Types.DISTINCT => null
case java.sql.Types.DOUBLE => DoubleType
case java.sql.Types.FLOAT => FloatType
- case java.sql.Types.INTEGER => IntegerType
+ case java.sql.Types.INTEGER => if (signed) { IntegerType } else { LongType }
case java.sql.Types.JAVA_OBJECT => null
case java.sql.Types.LONGNVARCHAR => StringType
case java.sql.Types.LONGVARBINARY => BinaryType
@@ -123,11 +127,12 @@ private[sql] object JDBCRDD extends Logging {
val typeName = rsmd.getColumnTypeName(i + 1)
val fieldSize = rsmd.getPrecision(i + 1)
val fieldScale = rsmd.getScale(i + 1)
+ val isSigned = rsmd.isSigned(i + 1)
val nullable = rsmd.isNullable(i + 1) != ResultSetMetaData.columnNoNulls
val metadata = new MetadataBuilder().putString("name", columnName)
val columnType =
dialect.getCatalystType(dataType, typeName, fieldSize, metadata).getOrElse(
- getCatalystType(dataType, fieldSize, fieldScale))
+ getCatalystType(dataType, fieldSize, fieldScale, isSigned))
fields(i) = StructField(columnName, columnType, nullable, metadata.build())
i = i + 1
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org