You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@camel.apache.org by da...@apache.org on 2019/12/07 20:54:23 UTC

[camel] 07/08: CAMEL-14263: camel-spark-rest should use source code generated configurer to avoid reflection configuration.

This is an automated email from the ASF dual-hosted git repository.

davsclaus pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/camel.git

commit c4f93f29789686971e7f65abcb4d3a4728b6dae1
Author: Claus Ibsen <cl...@gmail.com>
AuthorDate: Sat Dec 7 21:35:08 2019 +0100

    CAMEL-14263: camel-spark-rest should use source code generated configurer to avoid reflection configuration.
---
 .../camel/component/sparkrest/SparkComponent.java  |   2 -
 .../endpoint/dsl/SparkEndpointBuilderFactory.java  | 347 +++++++++++++++------
 2 files changed, 243 insertions(+), 106 deletions(-)

diff --git a/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java b/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java
index 6446d1c..be537a4 100644
--- a/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java
+++ b/components/camel-spark-rest/src/main/java/org/apache/camel/component/sparkrest/SparkComponent.java
@@ -204,8 +204,6 @@ public class SparkComponent extends DefaultComponent implements RestConsumerFact
     @Override
     protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception {
         SparkConfiguration config = getSparkConfiguration().copy();
-        //TODO: we need to remove the usage of setProperties for populating the copy of the configuration
-        setProperties(config, parameters);
 
         SparkEndpoint answer = new SparkEndpoint(uri, this);
         answer.setSparkConfiguration(config);
diff --git a/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java b/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java
index 1c0237d..c3eea0f 100644
--- a/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java
+++ b/core/camel-endpointdsl/src/main/java/org/apache/camel/builder/endpoint/dsl/SparkEndpointBuilderFactory.java
@@ -17,13 +17,15 @@
 package org.apache.camel.builder.endpoint.dsl;
 
 import javax.annotation.Generated;
+import org.apache.camel.ExchangePattern;
 import org.apache.camel.builder.EndpointConsumerBuilder;
 import org.apache.camel.builder.EndpointProducerBuilder;
 import org.apache.camel.builder.endpoint.AbstractEndpointBuilder;
+import org.apache.camel.spi.ExceptionHandler;
 
 /**
- * The spark component can be used to send RDD or DataFrame jobs to Apache Spark
- * cluster.
+ * The spark-rest component is used for hosting REST services which has been
+ * defined using Camel rest-dsl.
  * 
  * Generated by camel-package-maven-plugin - do not edit this file!
  */
@@ -32,179 +34,262 @@ public interface SparkEndpointBuilderFactory {
 
 
     /**
-     * Builder for endpoint for the Spark component.
+     * Builder for endpoint for the Spark Rest component.
      */
-    public interface SparkEndpointBuilder extends EndpointProducerBuilder {
+    public interface SparkEndpointBuilder extends EndpointConsumerBuilder {
         default AdvancedSparkEndpointBuilder advanced() {
             return (AdvancedSparkEndpointBuilder) this;
         }
         /**
-         * Indicates if results should be collected or counted.
+         * Accept type such as: 'text/xml', or 'application/json'. By default we
+         * accept all kinds of types.
+         * 
+         * The option is a: <code>java.lang.String</code> type.
+         * 
+         * Group: consumer
+         */
+        default SparkEndpointBuilder accept(String accept) {
+            doSetProperty("accept", accept);
+            return this;
+        }
+        /**
+         * Allows for bridging the consumer to the Camel routing Error Handler,
+         * which mean any exceptions occurred while the consumer is trying to
+         * pickup incoming messages, or the likes, will now be processed as a
+         * message and handled by the routing Error Handler. By default the
+         * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
+         * with exceptions, that will be logged at WARN or ERROR level and
+         * ignored.
          * 
          * The option is a: <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder collect(boolean collect) {
-            doSetProperty("collect", collect);
+        default SparkEndpointBuilder bridgeErrorHandler(
+                boolean bridgeErrorHandler) {
+            doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
             return this;
         }
         /**
-         * Indicates if results should be collected or counted.
+         * Allows for bridging the consumer to the Camel routing Error Handler,
+         * which mean any exceptions occurred while the consumer is trying to
+         * pickup incoming messages, or the likes, will now be processed as a
+         * message and handled by the routing Error Handler. By default the
+         * consumer will use the org.apache.camel.spi.ExceptionHandler to deal
+         * with exceptions, that will be logged at WARN or ERROR level and
+         * ignored.
          * 
          * The option will be converted to a <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder collect(String collect) {
-            doSetProperty("collect", collect);
+        default SparkEndpointBuilder bridgeErrorHandler(
+                String bridgeErrorHandler) {
+            doSetProperty("bridgeErrorHandler", bridgeErrorHandler);
             return this;
         }
         /**
-         * DataFrame to compute against.
+         * Determines whether or not the raw input stream from Spark
+         * HttpRequest#getContent() is cached or not (Camel will read the stream
+         * into a in light-weight memory based Stream caching) cache. By default
+         * Camel will cache the Netty input stream to support reading it
+         * multiple times to ensure Camel can retrieve all data from the stream.
+         * However you can set this option to true when you for example need to
+         * access the raw stream, such as streaming it directly to a file or
+         * other persistent store. Mind that if you enable this option, then you
+         * cannot read the Netty stream multiple times out of the box, and you
+         * would need manually to reset the reader index on the Spark raw
+         * stream.
          * 
-         * The option is a:
-         * <code>org.apache.spark.sql.Dataset&lt;org.apache.spark.sql.Row&gt;</code> type.
+         * The option is a: <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder dataFrame(Object dataFrame) {
-            doSetProperty("dataFrame", dataFrame);
+        default SparkEndpointBuilder disableStreamCache(
+                boolean disableStreamCache) {
+            doSetProperty("disableStreamCache", disableStreamCache);
             return this;
         }
         /**
-         * DataFrame to compute against.
+         * Determines whether or not the raw input stream from Spark
+         * HttpRequest#getContent() is cached or not (Camel will read the stream
+         * into a in light-weight memory based Stream caching) cache. By default
+         * Camel will cache the Netty input stream to support reading it
+         * multiple times to ensure Camel can retrieve all data from the stream.
+         * However you can set this option to true when you for example need to
+         * access the raw stream, such as streaming it directly to a file or
+         * other persistent store. Mind that if you enable this option, then you
+         * cannot read the Netty stream multiple times out of the box, and you
+         * would need manually to reset the reader index on the Spark raw
+         * stream.
          * 
-         * The option will be converted to a
-         * <code>org.apache.spark.sql.Dataset&lt;org.apache.spark.sql.Row&gt;</code> type.
+         * The option will be converted to a <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder dataFrame(String dataFrame) {
-            doSetProperty("dataFrame", dataFrame);
+        default SparkEndpointBuilder disableStreamCache(
+                String disableStreamCache) {
+            doSetProperty("disableStreamCache", disableStreamCache);
             return this;
         }
         /**
-         * Function performing action against an DataFrame.
+         * If this option is enabled, then during binding from Spark to Camel
+         * Message then the headers will be mapped as well (eg added as header
+         * to the Camel Message as well). You can turn off this option to
+         * disable this. The headers can still be accessed from the
+         * org.apache.camel.component.sparkrest.SparkMessage message with the
+         * method getRequest() that returns the Spark HTTP request instance.
          * 
-         * The option is a:
-         * <code>org.apache.camel.component.spark.DataFrameCallback</code> type.
+         * The option is a: <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder dataFrameCallback(Object dataFrameCallback) {
-            doSetProperty("dataFrameCallback", dataFrameCallback);
+        default SparkEndpointBuilder mapHeaders(boolean mapHeaders) {
+            doSetProperty("mapHeaders", mapHeaders);
             return this;
         }
         /**
-         * Function performing action against an DataFrame.
+         * If this option is enabled, then during binding from Spark to Camel
+         * Message then the headers will be mapped as well (eg added as header
+         * to the Camel Message as well). You can turn off this option to
+         * disable this. The headers can still be accessed from the
+         * org.apache.camel.component.sparkrest.SparkMessage message with the
+         * method getRequest() that returns the Spark HTTP request instance.
          * 
-         * The option will be converted to a
-         * <code>org.apache.camel.component.spark.DataFrameCallback</code> type.
+         * The option will be converted to a <code>boolean</code> type.
+         * 
+         * Group: consumer
+         */
+        default SparkEndpointBuilder mapHeaders(String mapHeaders) {
+            doSetProperty("mapHeaders", mapHeaders);
+            return this;
+        }
+        /**
+         * If enabled and an Exchange failed processing on the consumer side,
+         * and if the caused Exception was send back serialized in the response
+         * as a application/x-java-serialized-object content type. This is by
+         * default turned off. If you enable this then be aware that Java will
+         * deserialize the incoming data from the request to Java and that can
+         * be a potential security risk.
+         * 
+         * The option is a: <code>boolean</code> type.
+         * 
+         * Group: consumer
+         */
+        default SparkEndpointBuilder transferException(boolean transferException) {
+            doSetProperty("transferException", transferException);
+            return this;
+        }
+        /**
+         * If enabled and an Exchange failed processing on the consumer side,
+         * and if the caused Exception was send back serialized in the response
+         * as a application/x-java-serialized-object content type. This is by
+         * default turned off. If you enable this then be aware that Java will
+         * deserialize the incoming data from the request to Java and that can
+         * be a potential security risk.
+         * 
+         * The option will be converted to a <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder dataFrameCallback(String dataFrameCallback) {
-            doSetProperty("dataFrameCallback", dataFrameCallback);
+        default SparkEndpointBuilder transferException(String transferException) {
+            doSetProperty("transferException", transferException);
             return this;
         }
         /**
-         * Whether the producer should be started lazy (on the first message).
-         * By starting lazy you can use this to allow CamelContext and routes to
-         * startup in situations where a producer may otherwise fail during
-         * starting and cause the route to fail being started. By deferring this
-         * startup to be lazy then the startup failure can be handled during
-         * routing messages via Camel's routing error handlers. Beware that when
-         * the first message is processed then creating and starting the
-         * producer may take a little time and prolong the total processing time
-         * of the processing.
+         * If this option is enabled, then during binding from Spark to Camel
+         * Message then the header values will be URL decoded (eg %20 will be a
+         * space character.).
          * 
          * The option is a: <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder lazyStartProducer(boolean lazyStartProducer) {
-            doSetProperty("lazyStartProducer", lazyStartProducer);
+        default SparkEndpointBuilder urlDecodeHeaders(boolean urlDecodeHeaders) {
+            doSetProperty("urlDecodeHeaders", urlDecodeHeaders);
             return this;
         }
         /**
-         * Whether the producer should be started lazy (on the first message).
-         * By starting lazy you can use this to allow CamelContext and routes to
-         * startup in situations where a producer may otherwise fail during
-         * starting and cause the route to fail being started. By deferring this
-         * startup to be lazy then the startup failure can be handled during
-         * routing messages via Camel's routing error handlers. Beware that when
-         * the first message is processed then creating and starting the
-         * producer may take a little time and prolong the total processing time
-         * of the processing.
+         * If this option is enabled, then during binding from Spark to Camel
+         * Message then the header values will be URL decoded (eg %20 will be a
+         * space character.).
          * 
          * The option will be converted to a <code>boolean</code> type.
          * 
-         * Group: producer
+         * Group: consumer
          */
-        default SparkEndpointBuilder lazyStartProducer(String lazyStartProducer) {
-            doSetProperty("lazyStartProducer", lazyStartProducer);
+        default SparkEndpointBuilder urlDecodeHeaders(String urlDecodeHeaders) {
+            doSetProperty("urlDecodeHeaders", urlDecodeHeaders);
             return this;
         }
+    }
+
+    /**
+     * Advanced builder for endpoint for the Spark Rest component.
+     */
+    public interface AdvancedSparkEndpointBuilder
+            extends
+                EndpointConsumerBuilder {
+        default SparkEndpointBuilder basic() {
+            return (SparkEndpointBuilder) this;
+        }
         /**
-         * RDD to compute against.
+         * To let the consumer use a custom ExceptionHandler. Notice if the
+         * option bridgeErrorHandler is enabled then this option is not in use.
+         * By default the consumer will deal with exceptions, that will be
+         * logged at WARN or ERROR level and ignored.
          * 
-         * The option is a: <code>org.apache.spark.api.java.JavaRDDLike</code>
+         * The option is a: <code>org.apache.camel.spi.ExceptionHandler</code>
          * type.
          * 
-         * Group: producer
+         * Group: consumer (advanced)
          */
-        default SparkEndpointBuilder rdd(Object rdd) {
-            doSetProperty("rdd", rdd);
+        default AdvancedSparkEndpointBuilder exceptionHandler(
+                ExceptionHandler exceptionHandler) {
+            doSetProperty("exceptionHandler", exceptionHandler);
             return this;
         }
         /**
-         * RDD to compute against.
+         * To let the consumer use a custom ExceptionHandler. Notice if the
+         * option bridgeErrorHandler is enabled then this option is not in use.
+         * By default the consumer will deal with exceptions, that will be
+         * logged at WARN or ERROR level and ignored.
          * 
          * The option will be converted to a
-         * <code>org.apache.spark.api.java.JavaRDDLike</code> type.
+         * <code>org.apache.camel.spi.ExceptionHandler</code> type.
          * 
-         * Group: producer
+         * Group: consumer (advanced)
          */
-        default SparkEndpointBuilder rdd(String rdd) {
-            doSetProperty("rdd", rdd);
+        default AdvancedSparkEndpointBuilder exceptionHandler(
+                String exceptionHandler) {
+            doSetProperty("exceptionHandler", exceptionHandler);
             return this;
         }
         /**
-         * Function performing action against an RDD.
+         * Sets the exchange pattern when the consumer creates an exchange.
          * 
-         * The option is a:
-         * <code>org.apache.camel.component.spark.RddCallback</code> type.
+         * The option is a: <code>org.apache.camel.ExchangePattern</code> type.
          * 
-         * Group: producer
+         * Group: consumer (advanced)
          */
-        default SparkEndpointBuilder rddCallback(Object rddCallback) {
-            doSetProperty("rddCallback", rddCallback);
+        default AdvancedSparkEndpointBuilder exchangePattern(
+                ExchangePattern exchangePattern) {
+            doSetProperty("exchangePattern", exchangePattern);
             return this;
         }
         /**
-         * Function performing action against an RDD.
+         * Sets the exchange pattern when the consumer creates an exchange.
          * 
          * The option will be converted to a
-         * <code>org.apache.camel.component.spark.RddCallback</code> type.
+         * <code>org.apache.camel.ExchangePattern</code> type.
          * 
-         * Group: producer
+         * Group: consumer (advanced)
          */
-        default SparkEndpointBuilder rddCallback(String rddCallback) {
-            doSetProperty("rddCallback", rddCallback);
+        default AdvancedSparkEndpointBuilder exchangePattern(
+                String exchangePattern) {
+            doSetProperty("exchangePattern", exchangePattern);
             return this;
         }
-    }
-
-    /**
-     * Advanced builder for endpoint for the Spark component.
-     */
-    public interface AdvancedSparkEndpointBuilder
-            extends
-                EndpointProducerBuilder {
-        default SparkEndpointBuilder basic() {
-            return (SparkEndpointBuilder) this;
-        }
         /**
          * Whether the endpoint should use basic property binding (Camel 2.x) or
          * the newer property binding with additional capabilities.
@@ -232,6 +317,56 @@ public interface SparkEndpointBuilderFactory {
             return this;
         }
         /**
+         * Whether or not the consumer should try to find a target consumer by
+         * matching the URI prefix if no exact match is found.
+         * 
+         * The option is a: <code>boolean</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedSparkEndpointBuilder matchOnUriPrefix(
+                boolean matchOnUriPrefix) {
+            doSetProperty("matchOnUriPrefix", matchOnUriPrefix);
+            return this;
+        }
+        /**
+         * Whether or not the consumer should try to find a target consumer by
+         * matching the URI prefix if no exact match is found.
+         * 
+         * The option will be converted to a <code>boolean</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedSparkEndpointBuilder matchOnUriPrefix(
+                String matchOnUriPrefix) {
+            doSetProperty("matchOnUriPrefix", matchOnUriPrefix);
+            return this;
+        }
+        /**
+         * To use a custom SparkBinding to map to/from Camel message.
+         * 
+         * The option is a:
+         * <code>org.apache.camel.component.sparkrest.SparkBinding</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedSparkEndpointBuilder sparkBinding(Object sparkBinding) {
+            doSetProperty("sparkBinding", sparkBinding);
+            return this;
+        }
+        /**
+         * To use a custom SparkBinding to map to/from Camel message.
+         * 
+         * The option will be converted to a
+         * <code>org.apache.camel.component.sparkrest.SparkBinding</code> type.
+         * 
+         * Group: advanced
+         */
+        default AdvancedSparkEndpointBuilder sparkBinding(String sparkBinding) {
+            doSetProperty("sparkBinding", sparkBinding);
+            return this;
+        }
+        /**
          * Sets whether synchronous processing should be strictly used, or Camel
          * is allowed to use asynchronous processing (if supported).
          * 
@@ -257,24 +392,28 @@ public interface SparkEndpointBuilderFactory {
         }
     }
     /**
-     * Spark (camel-spark)
-     * The spark component can be used to send RDD or DataFrame jobs to Apache
-     * Spark cluster.
+     * Spark Rest (camel-spark-rest)
+     * The spark-rest component is used for hosting REST services which has been
+     * defined using Camel rest-dsl.
+     * 
+     * Category: rest
+     * Since: 2.14
+     * Maven coordinates: org.apache.camel:camel-spark-rest
      * 
-     * Category: bigdata,iot
-     * Since: 2.17
-     * Maven coordinates: org.apache.camel:camel-spark
+     * Syntax: <code>spark-rest:verb:path</code>
      * 
-     * Syntax: <code>spark:endpointType</code>
+     * Path parameter: verb (required)
+     * get, post, put, patch, delete, head, trace, connect, or options.
+     * The value can be one of: get, post, put, patch, delete, head, trace,
+     * connect, options
      * 
-     * Path parameter: endpointType (required)
-     * Type of the endpoint (rdd, dataframe, hive).
-     * The value can be one of: rdd, dataframe, hive
+     * Path parameter: path (required)
+     * The content path which support Spark syntax.
      */
-    default SparkEndpointBuilder spark(String path) {
+    default SparkEndpointBuilder sparkRest(String path) {
         class SparkEndpointBuilderImpl extends AbstractEndpointBuilder implements SparkEndpointBuilder, AdvancedSparkEndpointBuilder {
             public SparkEndpointBuilderImpl(String path) {
-                super("spark", path);
+                super("spark-rest", path);
             }
         }
         return new SparkEndpointBuilderImpl(path);