You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@livy.apache.org by mg...@apache.org on 2019/08/29 12:44:53 UTC

[incubator-livy] branch master updated: [LIVY-650][THRIFT] Remove schema from ResultSet

This is an automated email from the ASF dual-hosted git repository.

mgaido pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-livy.git


The following commit(s) were added to refs/heads/master by this push:
     new 9c34750  [LIVY-650][THRIFT] Remove schema from ResultSet
9c34750 is described below

commit 9c34750230acb3c3f72b66c82bea86ca2e33fe57
Author: Marco Gaido <mg...@apache.org>
AuthorDate: Thu Aug 29 14:44:41 2019 +0200

    [LIVY-650][THRIFT] Remove schema from ResultSet
    
    ## What changes were proposed in this pull request?
    
    The class `ResultSet` is serialized and sent over the wire. Currently this class contains a JSON string representation of the spark schema, which is never used. Hence, the PR removes it in order to avoid serializing it uselessly.
    
    ## How was this patch tested?
    
    existing UTs
    
    Author: Marco Gaido <mg...@apache.org>
    
    Closes #213 from mgaido91/LIVY-650.
---
 .../org/apache/livy/thriftserver/session/FetchResultJob.java     | 2 +-
 .../java/org/apache/livy/thriftserver/session/ResultSet.java     | 9 +--------
 .../main/java/org/apache/livy/thriftserver/session/SqlJob.java   | 3 ---
 .../org/apache/livy/thriftserver/session/StatementState.java     | 1 -
 .../org/apache/livy/thriftserver/session/ThriftSessionState.java | 1 -
 .../org/apache/livy/thriftserver/session/ColumnBufferTest.java   | 2 +-
 6 files changed, 3 insertions(+), 15 deletions(-)

diff --git a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/FetchResultJob.java b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/FetchResultJob.java
index 09b69c9..450fd86 100644
--- a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/FetchResultJob.java
+++ b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/FetchResultJob.java
@@ -49,7 +49,7 @@ public class FetchResultJob implements Job<ResultSet> {
     StatementState st = session.findStatement(statementId);
     Iterator<Row> iter = st.iter;
 
-    ResultSet rs = new ResultSet(st.types, st.schema);
+    ResultSet rs = new ResultSet(st.types);
     int count = 0;
     while (iter.hasNext() && count < maxRows) {
       Row row = iter.next();
diff --git a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ResultSet.java b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ResultSet.java
index 317c4e6..cd0f71d 100644
--- a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ResultSet.java
+++ b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ResultSet.java
@@ -37,16 +37,13 @@ import org.apache.spark.sql.types.StructField;
  */
 public class ResultSet {
 
-  private final String schema;
   private final ColumnBuffer[] columns;
 
   public ResultSet() {
-    this.schema = null;
     this.columns = null;
   }
 
-  public ResultSet(DataType[] types, String schema) {
-    this.schema = schema;
+  public ResultSet(DataType[] types) {
     this.columns = new ColumnBuffer[types.length];
     for (int i = 0; i < columns.length; i++) {
       columns[i] = new ColumnBuffer(types[i]);
@@ -69,10 +66,6 @@ public class ResultSet {
     }
   }
 
-  public String getSchema() {
-    return schema;
-  }
-
   public ColumnBuffer[] getColumns() {
     return columns;
   }
diff --git a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/SqlJob.java b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/SqlJob.java
index a0b9c85..849c057 100644
--- a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/SqlJob.java
+++ b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/SqlJob.java
@@ -18,9 +18,7 @@
 package org.apache.livy.thriftserver.session;
 
 import java.util.Iterator;
-import java.util.List;
 
-import org.apache.spark.rdd.RDD;
 import org.apache.spark.sql.Dataset;
 import org.apache.spark.sql.Row;
 import org.apache.spark.sql.SparkSession;
@@ -88,5 +86,4 @@ public class SqlJob implements Job<Void> {
     // has been executed.
     session.registerStatement(statementId, schema, iter);
   }
-
 }
diff --git a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/StatementState.java b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/StatementState.java
index 5238845..7a300e9 100644
--- a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/StatementState.java
+++ b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/StatementState.java
@@ -36,5 +36,4 @@ class StatementState {
     this.iter = iter;
     this.types = SparkUtils.translateSchema(schema);
   }
-
 }
diff --git a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ThriftSessionState.java b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ThriftSessionState.java
index 5378270..9111d94 100644
--- a/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ThriftSessionState.java
+++ b/thriftserver/session/src/main/java/org/apache/livy/thriftserver/session/ThriftSessionState.java
@@ -157,5 +157,4 @@ class ThriftSessionState {
     return new NoSuchElementException(
         String.format("Catalog job %s not found in session %s.", jobId, sessionId));
   }
-
 }
diff --git a/thriftserver/session/src/test/java/org/apache/livy/thriftserver/session/ColumnBufferTest.java b/thriftserver/session/src/test/java/org/apache/livy/thriftserver/session/ColumnBufferTest.java
index 518146a..71ed020 100644
--- a/thriftserver/session/src/test/java/org/apache/livy/thriftserver/session/ColumnBufferTest.java
+++ b/thriftserver/session/src/test/java/org/apache/livy/thriftserver/session/ColumnBufferTest.java
@@ -79,7 +79,7 @@ public class ColumnBufferTest {
 
       ds.write().format("parquet").saveAsTable("types_test");
 
-      ResultSet rs = new ResultSet(SparkUtils.translateSchema(ds.schema()), ds.schema().json());
+      ResultSet rs = new ResultSet(SparkUtils.translateSchema(ds.schema()));
       for (Row r : spark.table("types_test").collectAsList()) {
         Object[] cols = new Object[r.length()];
         for (int i = 0; i < cols.length; i++) {