You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nifi.apache.org by ma...@apache.org on 2021/12/10 22:00:11 UTC

[nifi] branch main updated: NIFI-9259: Adding GeohashRecord Processor (#5476)

This is an automated email from the ASF dual-hosted git repository.

markap14 pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/nifi.git


The following commit(s) were added to refs/heads/main by this push:
     new 24422c4  NIFI-9259: Adding GeohashRecord Processor (#5476)
24422c4 is described below

commit 24422c4ec8910aa3360d5521a744c87901f3635e
Author: Mikayla Yang <64...@users.noreply.github.com>
AuthorDate: Fri Dec 10 13:57:23 2021 -0800

    NIFI-9259: Adding GeohashRecord Processor (#5476)
---
 nifi-assembly/NOTICE                               |   5 +
 nifi-assembly/pom.xml                              |   6 +
 .../nifi-geohash-bundle/nifi-geohash-nar/pom.xml   |  46 ++
 .../src/main/resources/META-INF/LICENSE            | 209 ++++++++
 .../nifi-geohash-processors/pom.xml                | 100 ++++
 .../nifi/processors/geohash/GeohashRecord.java     | 542 +++++++++++++++++++++
 .../services/org.apache.nifi.processor.Processor   |  15 +
 .../additionalDetails.html                         |  55 +++
 .../nifi/processors/geohash/GeohashRecordTest.java | 191 ++++++++
 .../src/test/resources/decode-record.json          |  10 +
 .../encode-records-with-illegal-arguments.json     |  17 +
 .../encode-records-with-incorrect-format.json      |  10 +
 .../src/test/resources/record_schema.avsc          |  21 +
 nifi-nar-bundles/nifi-geohash-bundle/pom.xml       |  33 ++
 nifi-nar-bundles/pom.xml                           |   1 +
 15 files changed, 1261 insertions(+)

diff --git a/nifi-assembly/NOTICE b/nifi-assembly/NOTICE
index baa8552..0b79862 100644
--- a/nifi-assembly/NOTICE
+++ b/nifi-assembly/NOTICE
@@ -981,6 +981,11 @@ The following binary components are provided under the Apache Software License v
       GeoIP2 Java API
       This software is Copyright (c) 2013 by MaxMind, Inc.
 
+  (ASLv2) Geohash Java
+    The following NOTICE information applies:
+      Geohash Java
+      Copyright 2016 Silvio Heuberger and contributors
+
     (ASLv2) Apache XMLBeans
       The following NOTICE information applies:
         Portions of this software were originally based on the following:
diff --git a/nifi-assembly/pom.xml b/nifi-assembly/pom.xml
index dded8e4..2141897 100644
--- a/nifi-assembly/pom.xml
+++ b/nifi-assembly/pom.xml
@@ -406,6 +406,12 @@ language governing permissions and limitations under the License. -->
         </dependency>
         <dependency>
             <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-geohash-nar</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+            <type>nar</type>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
             <artifactId>nifi-aws-nar</artifactId>
             <version>1.16.0-SNAPSHOT</version>
             <type>nar</type>
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-nar/pom.xml b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-nar/pom.xml
new file mode 100644
index 0000000..a278395
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-nar/pom.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements. See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License. You may obtain a copy of the License at
+  http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.nifi</groupId>
+        <artifactId>nifi-geohash-bundle</artifactId>
+        <version>1.16.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>nifi-geohash-nar</artifactId>
+    <packaging>nar</packaging>
+    <properties>
+        <maven.javadoc.skip>true</maven.javadoc.skip>
+        <source.skip>true</source.skip>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-standard-services-api-nar</artifactId>
+            <version>${project.version}</version>
+            <type>nar</type>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-geohash-processors</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+        </dependency>
+    </dependencies>
+
+</project>
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-nar/src/main/resources/META-INF/LICENSE b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-nar/src/main/resources/META-INF/LICENSE
new file mode 100644
index 0000000..6effaa8
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-nar/src/main/resources/META-INF/LICENSE
@@ -0,0 +1,209 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+APACHE NIFI SUBCOMPONENTS:
+
+The Apache NiFi project contains subcomponents with separate copyright
+notices and license terms. Your use of the source code for the these
+subcomponents is subject to the terms and conditions of the following
+licenses.
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/pom.xml b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/pom.xml
new file mode 100644
index 0000000..0448241
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/pom.xml
@@ -0,0 +1,100 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements. See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License. You may obtain a copy of the License at
+  http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.nifi</groupId>
+        <artifactId>nifi-geohash-bundle</artifactId>
+        <version>1.16.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>nifi-geohash-processors</artifactId>
+    <packaging>jar</packaging>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-api</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-utils</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+        </dependency>
+        <dependency>
+            <groupId>ch.hsr</groupId>
+            <artifactId>geohash</artifactId>
+            <version>1.4.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-record-serialization-service-api</artifactId>
+            <scope>provided</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-record-path</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.10.0</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-mock</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-mock-record-utils</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-record-serialization-services</artifactId>
+            <version>1.16.0-SNAPSHOT</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.nifi</groupId>
+            <artifactId>nifi-schema-registry-service-api</artifactId>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.rat</groupId>
+                <artifactId>apache-rat-plugin</artifactId>
+                <configuration>
+                    <excludes combine.children="append">
+                        <exclude>src/test/resources/record_schema.avsc</exclude>
+                        <exclude>src/test/resources/decode-record.json</exclude>
+                        <exclude>src/test/resources/encode-records-with-illegal-arguments.json</exclude>
+                        <exclude>src/test/resources/encode-records-with-incorrect-format.json</exclude>
+                    </excludes>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/java/org/apache/nifi/processors/geohash/GeohashRecord.java b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/java/org/apache/nifi/processors/geohash/GeohashRecord.java
new file mode 100644
index 0000000..7d0fd18
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/java/org/apache/nifi/processors/geohash/GeohashRecord.java
@@ -0,0 +1,542 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.geohash;
+
+import org.apache.nifi.annotation.behavior.EventDriven;
+import org.apache.nifi.annotation.behavior.SideEffectFree;
+import org.apache.nifi.annotation.behavior.SupportsBatching;
+import org.apache.nifi.annotation.behavior.InputRequirement;
+import org.apache.nifi.annotation.behavior.WritesAttribute;
+import org.apache.nifi.annotation.behavior.WritesAttributes;
+import org.apache.nifi.annotation.documentation.CapabilityDescription;
+import org.apache.nifi.annotation.documentation.Tags;
+import org.apache.nifi.annotation.lifecycle.OnScheduled;
+import org.apache.nifi.components.PropertyDescriptor;
+import org.apache.nifi.expression.ExpressionLanguageScope;
+import org.apache.nifi.flowfile.FlowFile;
+import org.apache.nifi.flowfile.attributes.CoreAttributes;
+import org.apache.nifi.processor.AbstractProcessor;
+import org.apache.nifi.processor.ProcessContext;
+import org.apache.nifi.processor.ProcessSession;
+import org.apache.nifi.processor.ProcessorInitializationContext;
+import org.apache.nifi.processor.Relationship;
+import org.apache.nifi.processor.util.StandardValidators;
+import org.apache.nifi.record.path.FieldValue;
+import org.apache.nifi.record.path.RecordPath;
+import org.apache.nifi.record.path.RecordPathResult;
+import org.apache.nifi.record.path.util.RecordPathCache;
+import org.apache.nifi.record.path.validation.RecordPathValidator;
+
+import org.apache.nifi.schema.access.SchemaNotFoundException;
+import org.apache.nifi.serialization.MalformedRecordException;
+import org.apache.nifi.serialization.RecordReader;
+import org.apache.nifi.serialization.RecordReaderFactory;
+import org.apache.nifi.serialization.RecordSetWriter;
+import org.apache.nifi.serialization.RecordSetWriterFactory;
+import org.apache.nifi.serialization.record.Record;
+import org.apache.nifi.serialization.WriteResult;
+
+import ch.hsr.geohash.GeoHash;
+import ch.hsr.geohash.WGS84Point;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.Optional;
+
+@EventDriven
+@SideEffectFree
+@SupportsBatching
+@Tags({"geo", "geohash", "record"})
+@CapabilityDescription("A record-based processor that encodes and decodes Geohashes from and to latitude/longitude coordinates.")
+@InputRequirement(InputRequirement.Requirement.INPUT_REQUIRED)
+@WritesAttributes({
+        @WritesAttribute(attribute = "mime.type", description = "The MIME type indicated by the record writer"),
+        @WritesAttribute(attribute = "record.count", description = "The number of records in the resulting flow file")
+})
+public class GeohashRecord extends AbstractProcessor {
+
+    public enum ProcessingMode {
+        ENCODE, DECODE
+    }
+
+    public enum GeohashFormat {
+        BASE32, BINARY, LONG
+    }
+
+    public enum RoutingStrategy {
+        SKIP,
+        SPLIT,
+        REQUIRE
+    }
+
+    public static final PropertyDescriptor MODE = new PropertyDescriptor.Builder()
+            .name("mode")
+            .displayName("Mode")
+            .description("Specifies whether to encode latitude/longitude to geohash or decode geohash to latitude/longitude")
+            .required(true)
+            .allowableValues(ProcessingMode.values())
+            .defaultValue(ProcessingMode.ENCODE.name())
+            .build();
+
+    public static final PropertyDescriptor ROUTING_STRATEGY = new PropertyDescriptor.Builder()
+            .name("routing-strategy")
+            .displayName("Routing Strategy")
+            .description("Specifies how to route flowfiles after encoding or decoding being performed. "
+                    + "SKIP will enrich those records that can be enriched and skip the rest. "
+                    + "The SKIP strategy will route a flowfile to failure only if unable to parse the data. "
+                    + "Otherwise, it will route the enriched flowfile to success, and the original input to original. "
+                    + "SPLIT will separate the records that have been enriched from those that have not and send them to matched, while unenriched records will be sent to unmatched; "
+                    + "the original input flowfile will be sent to original. The SPLIT strategy will route a flowfile to failure only if unable to parse the data. "
+                    + "REQUIRE will route a flowfile to success only if all of its records are enriched, and the original input will be sent to original. "
+                    + "The REQUIRE strategy will route the original input flowfile to failure if any of its records cannot be enriched or unable to be parsed")
+            .required(true)
+            .allowableValues(RoutingStrategy.values())
+            .defaultValue(RoutingStrategy.SKIP.name())
+            .build();
+
+    public static final PropertyDescriptor RECORD_READER = new PropertyDescriptor.Builder()
+            .name("record-reader")
+            .displayName("Record Reader")
+            .description("Specifies the record reader service to use for reading incoming data")
+            .required(true)
+            .identifiesControllerService(RecordReaderFactory.class)
+            .build();
+
+    public static final PropertyDescriptor RECORD_WRITER = new PropertyDescriptor.Builder()
+            .name("record-writer")
+            .displayName("Record Writer")
+            .description("Specifies the record writer service to use for writing data")
+            .required(true)
+            .identifiesControllerService(RecordSetWriterFactory.class)
+            .build();
+
+    public static final PropertyDescriptor LATITUDE_RECORD_PATH = new PropertyDescriptor.Builder()
+            .name("latitude-record-path")
+            .displayName("Latitude Record Path")
+            .description("In the ENCODE mode, this property specifies the record path to retrieve the latitude values. "
+                    + "Latitude values should be in the range of [-90, 90]; invalid values will be logged at warn level. "
+                    + "In the DECODE mode, this property specifies the record path to put the latitude value")
+            .required(true)
+            .addValidator(new RecordPathValidator())
+            .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+            .build();
+
+    public static final PropertyDescriptor LONGITUDE_RECORD_PATH = new PropertyDescriptor.Builder()
+            .name("longitude-record-path")
+            .displayName("Longitude Record Path")
+            .description("In the ENCODE mode, this property specifies the record path to retrieve the longitude values; "
+                    + "Longitude values should be in the range of [-180, 180]; invalid values will be logged at warn level. "
+                    + "In the DECODE mode, this property specifies the record path to put the longitude value")
+            .required(true)
+            .addValidator(new RecordPathValidator())
+            .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+            .build();
+
+    public static final PropertyDescriptor GEOHASH_RECORD_PATH = new PropertyDescriptor.Builder()
+            .name("geohash-record-path")
+            .displayName("Geohash Record Path")
+            .description("In the ENCODE mode, this property specifies the record path to put the geohash value; "
+                    + "in the DECODE mode, this property specifies the record path to retrieve the geohash value")
+            .required(true)
+            .addValidator(new RecordPathValidator())
+            .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+            .build();
+
+    public static final PropertyDescriptor GEOHASH_FORMAT = new PropertyDescriptor.Builder()
+            .name("geohash-format")
+            .displayName("Geohash Format")
+            .description("In the ENCODE mode, this property specifies the desired format for encoding geohash; "
+                    + "in the DECODE mode, this property specifies the format of geohash provided")
+            .required(true)
+            .allowableValues(GeohashFormat.values())
+            .defaultValue(GeohashFormat.BASE32.name())
+            .build();
+
+    public static final PropertyDescriptor GEOHASH_LEVEL = new PropertyDescriptor.Builder()
+            .name("geohash-level")
+            .displayName("Geohash Level")
+            .description("The integer precision level(1-12) desired for encoding geohash")
+            .required(true)
+            .addValidator(StandardValidators.createLongValidator(1, 12, true))
+            .expressionLanguageSupported(ExpressionLanguageScope.FLOWFILE_ATTRIBUTES)
+            .dependsOn(MODE, ProcessingMode.ENCODE.name())
+            .build();
+
+    public static final Relationship REL_NOT_MATCHED = new Relationship.Builder()
+            .name("not matched")
+            .description("Using the SPLIT strategy, flowfiles that cannot be encoded or decoded due to the lack of lat/lon or geohashes will be routed to not matched")
+            .build();
+
+    public static final Relationship REL_MATCHED = new Relationship.Builder()
+            .name("matched")
+            .description("Using the SPLIT strategy, flowfiles with lat/lon or geohashes provided that are successfully encoded or decoded will be routed to matched")
+            .build();
+
+    public static final Relationship REL_FAILURE = new Relationship.Builder()
+            .name("failure")
+            .description("Flowfiles that cannot be encoded or decoded will be routed to failure")
+            .build();
+
+    public static final Relationship REL_SUCCESS = new Relationship.Builder()
+            .name("success")
+            .description("Flowfiles that are successfully encoded or decoded will be routed to success")
+            .build();
+
+    public static final Relationship REL_ORIGINAL = new Relationship.Builder()
+            .name("original")
+            .description("The original input flowfile will be sent to this relationship")
+            .build();
+
+    private static final List<PropertyDescriptor> RECORD_PATH_PROPERTIES = Collections.unmodifiableList(Arrays.asList(
+            LATITUDE_RECORD_PATH, LONGITUDE_RECORD_PATH, GEOHASH_RECORD_PATH
+    ));
+
+    private static final Set<Relationship> RELATIONSHIPS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(REL_SUCCESS, REL_ORIGINAL, REL_FAILURE)));
+    private static final Set<Relationship> SPLIT_RELATIONSHIPS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(REL_MATCHED, REL_NOT_MATCHED, REL_ORIGINAL, REL_FAILURE)));
+
+    private RoutingStrategyExecutor routingStrategyExecutor;
+    private static boolean isSplit;
+    private static Integer enrichedCount, unenrichedCount;
+
+    private final RecordPathCache cache = new RecordPathCache(100);
+
+    private List<PropertyDescriptor> descriptors;
+
+    @Override
+    protected void init(final ProcessorInitializationContext context) {
+        descriptors = new ArrayList<>();
+        descriptors.add(MODE);
+        descriptors.add(RECORD_READER);
+        descriptors.add(RECORD_WRITER);
+        descriptors.add(ROUTING_STRATEGY);
+        descriptors.add(LATITUDE_RECORD_PATH);
+        descriptors.add(LONGITUDE_RECORD_PATH);
+        descriptors.add(GEOHASH_RECORD_PATH);
+        descriptors.add(GEOHASH_FORMAT);
+        descriptors.add(GEOHASH_LEVEL);
+        descriptors = Collections.unmodifiableList(descriptors);
+    }
+
+    @Override
+    public void onPropertyModified(final PropertyDescriptor descriptor, final String oldValue, final String newValue) {
+        if (descriptor.equals(ROUTING_STRATEGY)) {
+            isSplit = RoutingStrategy.SPLIT.name().equals(newValue);
+        }
+    }
+
+    @Override
+    public Set<Relationship> getRelationships() {
+        return isSplit ? SPLIT_RELATIONSHIPS : RELATIONSHIPS;
+    }
+
+    @Override
+    public final List<PropertyDescriptor> getSupportedPropertyDescriptors() {
+        return descriptors;
+    }
+
+    @OnScheduled
+    public void setup(ProcessContext context) {
+        final RoutingStrategy routingStrategy = RoutingStrategy.valueOf(context.getProperty(ROUTING_STRATEGY).getValue());
+        switch (routingStrategy) {
+            case REQUIRE:
+                routingStrategyExecutor = new RequireRoutingStrategyExecutor();
+                break;
+            case SKIP:
+                routingStrategyExecutor = new SkipRoutingStrategyExecutor();
+                break;
+            case SPLIT:
+                routingStrategyExecutor = new SplitRoutingStrategyExecutor();
+                break;
+            default:
+                throw new AssertionError();
+        }
+        enrichedCount = 0;
+        unenrichedCount = 0;
+    }
+
+    @Override
+    public void onTrigger(final ProcessContext context, final ProcessSession session) {
+        FlowFile input = session.get();
+        if (input == null) {
+            return;
+        }
+
+        final RecordReaderFactory readerFactory = context.getProperty(RECORD_READER).asControllerService(RecordReaderFactory.class);
+        final RecordSetWriterFactory writerFactory = context.getProperty(RECORD_WRITER).asControllerService(RecordSetWriterFactory.class);
+        final boolean encode = context.getProperty(MODE).getValue().equalsIgnoreCase(ProcessingMode.ENCODE.toString());
+        final RoutingStrategy routingStrategy = RoutingStrategy.valueOf(context.getProperty(ROUTING_STRATEGY).getValue());
+        final GeohashFormat format = GeohashFormat.valueOf(context.getProperty(GEOHASH_FORMAT).getValue());
+
+        FlowFile output = session.create(input);
+        FlowFile notMatched = routingStrategy == RoutingStrategy.SPLIT ? session.create(input) : null;
+
+        try (final InputStream is = session.read(input);
+             final RecordReader reader = readerFactory.createRecordReader(input, is, getLogger());
+             final OutputStream os = session.write(output);
+             final OutputStream osNotFound = routingStrategy == RoutingStrategy.SPLIT ? session.write(notMatched) : null) {
+
+            final RecordSetWriter writer = writerFactory.createWriter(getLogger(), writerFactory.getSchema(input.getAttributes(), reader.getSchema()), os, output);
+            final RecordSetWriter notMatchedWriter = routingStrategy == RoutingStrategy.SPLIT ? writerFactory.createWriter(getLogger(), reader.getSchema(), osNotFound, notMatched) : null;
+
+            Map<PropertyDescriptor, RecordPath> paths = new HashMap<>();
+            for (PropertyDescriptor descriptor : RECORD_PATH_PROPERTIES) {
+                String rawRecordPath = context.getProperty(descriptor).evaluateAttributeExpressions(input).getValue();
+                RecordPath compiled = cache.getCompiled(rawRecordPath);
+                paths.put(descriptor, compiled);
+            }
+
+            Record record;
+
+            writer.beginRecordSet();
+
+            if (notMatchedWriter != null) {
+                notMatchedWriter.beginRecordSet();
+            }
+
+            int level = context.getProperty(GEOHASH_LEVEL).evaluateAttributeExpressions(input).asInteger();
+            final String rawLatitudePath = context.getProperty(LATITUDE_RECORD_PATH).evaluateAttributeExpressions(input).getValue();
+            RecordPath latitudePath = cache.getCompiled(rawLatitudePath);
+            final String rawLongitudePath = context.getProperty(LONGITUDE_RECORD_PATH).evaluateAttributeExpressions(input).getValue();
+            RecordPath longitudePath = cache.getCompiled(rawLongitudePath);
+            final String rawGeohashPath = context.getProperty(GEOHASH_RECORD_PATH).evaluateAttributeExpressions(input).getValue();
+            RecordPath geohashPath = cache.getCompiled(rawGeohashPath);
+
+            while ((record = reader.nextRecord()) != null) {
+                boolean updated = false;
+
+                try {
+                    if (encode) {
+                        Object encodedGeohash = getEncodedGeohash(latitudePath, longitudePath, record, format, level);
+                        updated = updateRecord(GEOHASH_RECORD_PATH, encodedGeohash, record, paths);
+                    } else {
+                        WGS84Point decodedPoint = getDecodedPointFromGeohash(geohashPath, record, format);
+                        if (decodedPoint != null) {
+                            updated = updateRecord(LATITUDE_RECORD_PATH, String.valueOf(decodedPoint.getLatitude()), record, paths)
+                                    && updateRecord(LONGITUDE_RECORD_PATH, String.valueOf(decodedPoint.getLongitude()), record, paths);
+                        }
+                    }
+                } catch (IllegalArgumentException e) {
+                    //lat/lon/geohash values out of range or is not valid
+                    getLogger().warn("Unable to " + (encode ? "encode" : "decode"), e);
+                }
+
+                routingStrategyExecutor.writeFlowFiles(record, writer, notMatchedWriter, updated);
+            }
+
+            final WriteResult writeResult = writer.finishRecordSet();
+            writer.close();
+            output = session.putAllAttributes(output, buildAttributes(writeResult.getRecordCount(), writer.getMimeType(), writeResult));
+
+            WriteResult notMatchedWriterResult;
+
+            if (notMatchedWriter != null) {
+                notMatchedWriterResult = notMatchedWriter.finishRecordSet();
+                notMatchedWriter.close();
+                if (notMatchedWriterResult.getRecordCount() > 0) {
+                    notMatched = session.putAllAttributes(notMatched, buildAttributes(notMatchedWriterResult.getRecordCount(), writer.getMimeType(), notMatchedWriterResult));
+                }
+            }
+        } catch (IOException | SchemaNotFoundException | MalformedRecordException e) {
+            //cannot parse incoming data
+            getLogger().error("Cannot parse the incoming data", e);
+            session.remove(output);
+            if (notMatched != null) {
+                session.remove(notMatched);
+            }
+            session.transfer(input, REL_FAILURE);
+            return;
+        }
+
+        //Transfer Flowfiles by routing strategy
+        routingStrategyExecutor.transferFlowFiles(session, input, output, notMatched);
+    }
+
+    private interface RoutingStrategyExecutor {
+        void writeFlowFiles(Record record, RecordSetWriter writer, RecordSetWriter notMatchedWriter, boolean updated) throws IOException;
+
+        void transferFlowFiles(final ProcessSession session, FlowFile input, FlowFile output, FlowFile notMatched);
+    }
+
+    private class SkipRoutingStrategyExecutor implements RoutingStrategyExecutor {
+        @Override
+        public void writeFlowFiles(Record record, RecordSetWriter writer, RecordSetWriter notMatchedWriter, boolean updated) throws IOException {
+            writer.write(record);
+        }
+
+        @Override
+        public void transferFlowFiles(final ProcessSession session, FlowFile input, FlowFile output, FlowFile notMatched) {
+            session.transfer(output, REL_SUCCESS);
+            session.transfer(input, REL_ORIGINAL);
+        }
+    }
+
+    private class SplitRoutingStrategyExecutor implements RoutingStrategyExecutor {
+        @Override
+        public void writeFlowFiles(Record record, RecordSetWriter writer, RecordSetWriter notMatchedWriter, boolean updated) throws IOException {
+            if (updated) {
+                enrichedCount++;
+                writer.write(record);
+            } else {
+                unenrichedCount++;
+                notMatchedWriter.write(record);
+            }
+        }
+
+        @Override
+        public void transferFlowFiles(final ProcessSession session, FlowFile input, FlowFile output, FlowFile notMatched) {
+            if (unenrichedCount > 0) {
+                session.transfer(notMatched, REL_NOT_MATCHED);
+            } else {
+                session.remove(notMatched);
+            }
+            if (enrichedCount > 0) {
+                session.transfer(output, REL_MATCHED);
+            } else {
+                session.remove(output);
+            }
+            session.transfer(input, REL_ORIGINAL);
+        }
+    }
+
+    private class RequireRoutingStrategyExecutor implements RoutingStrategyExecutor {
+        @Override
+        public void writeFlowFiles(Record record, RecordSetWriter writer, RecordSetWriter notMatchedWriter, boolean updated) throws IOException {
+            if (updated) {
+                writer.write(record);
+            } else {
+                unenrichedCount++;
+            }
+        }
+
+        @Override
+        public void transferFlowFiles(final ProcessSession session, FlowFile input, FlowFile output, FlowFile notMatched) {
+            if (unenrichedCount > 0) {
+                session.remove(output);
+                getLogger().error("There exists some records that cannot be enriched or parsed. The original input flowfile is routed to failure using the REQUIRE strategy");
+                session.transfer(input, REL_FAILURE);
+            } else {
+                session.transfer(output, REL_SUCCESS);
+                session.transfer(input, REL_ORIGINAL);
+            }
+        }
+    }
+
+    private Object getEncodedGeohash(RecordPath latitudePath, RecordPath longitudePath, Record record, GeohashFormat format, int level) {
+        RecordPathResult latitudeResult = latitudePath.evaluate(record);
+        RecordPathResult longitudeResult = longitudePath.evaluate(record);
+        Optional<FieldValue> latitudeField = latitudeResult.getSelectedFields().findFirst();
+        Optional<FieldValue> longitudeField = longitudeResult.getSelectedFields().findFirst();
+
+        if (!latitudeField.isPresent() || !longitudeField.isPresent()) {
+            return null;
+        }
+
+        FieldValue latitudeValue = latitudeField.get();
+        FieldValue longitudeValue = longitudeField.get();
+        Object latitudeVal = latitudeValue.getValue();
+        Object longitudeVal = longitudeValue.getValue();
+
+        if (latitudeVal == null || longitudeVal == null) {
+            return null;
+        }
+
+        double realLatValue = Double.parseDouble(latitudeVal.toString());
+        double realLongValue = Double.parseDouble(longitudeVal.toString());
+        GeoHash gh = GeoHash.withCharacterPrecision(realLatValue, realLongValue, level);
+
+        switch (format) {
+            case BINARY:
+                return gh.toBinaryString();
+            case LONG:
+                return gh.longValue();
+            default:
+                return gh.toBase32();
+        }
+    }
+
+    private WGS84Point getDecodedPointFromGeohash(RecordPath geohashPath, Record record, GeohashFormat format) {
+        RecordPathResult geohashResult = geohashPath.evaluate(record);
+        Optional<FieldValue> geohashField = geohashResult.getSelectedFields().findFirst();
+
+        if (!geohashField.isPresent()) {
+            return null;
+        }
+
+        FieldValue geohashFieldValue = geohashField.get();
+        Object geohashVal = geohashFieldValue.getValue();
+        if (geohashVal == null) {
+            return null;
+        }
+
+        String geohashString = geohashVal.toString();
+        GeoHash decodedHash;
+
+        switch (format) {
+            case BINARY:
+                decodedHash = GeoHash.fromBinaryString(geohashString);
+                break;
+            case LONG:
+                String binaryString = Long.toBinaryString(Long.parseLong(geohashString));
+                decodedHash = GeoHash.fromBinaryString(binaryString);
+                break;
+            default:
+                decodedHash = GeoHash.fromGeohashString(geohashString);
+        }
+
+        return decodedHash.getBoundingBoxCenter();
+    }
+
+    private boolean updateRecord(PropertyDescriptor descriptor, Object newValue, Record record, Map<PropertyDescriptor, RecordPath> cached) {
+        if (!cached.containsKey(descriptor) || newValue == null) {
+            return false;
+        }
+        RecordPath path = cached.get(descriptor);
+        RecordPathResult result = path.evaluate(record);
+
+        final Optional<FieldValue> fieldValueOption = result.getSelectedFields().findFirst();
+        if (!fieldValueOption.isPresent()) {
+            return false;
+        }
+
+        final FieldValue fieldValue = fieldValueOption.get();
+
+        if (!fieldValue.getParent().isPresent() || fieldValue.getParent().get().getValue() == null) {
+            return false;
+        }
+
+        fieldValue.updateValue(newValue);
+        return true;
+    }
+
+    private Map<String, String> buildAttributes(int recordCount, String mimeType, WriteResult writeResult) {
+        Map<String, String> retVal = new HashMap<>();
+        retVal.put(CoreAttributes.MIME_TYPE.key(), mimeType);
+        retVal.put("record.count", String.valueOf(recordCount));
+        retVal.putAll(writeResult.getAttributes());
+        return retVal;
+    }
+}
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor
new file mode 100644
index 0000000..1ff4ff7
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/resources/META-INF/services/org.apache.nifi.processor.Processor
@@ -0,0 +1,15 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+org.apache.nifi.processors.geohash.GeohashRecord
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/resources/docs/org.apache.nifi.processors.geohash.GeohashRecord/additionalDetails.html b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/resources/docs/org.apache.nifi.processors.geohash.GeohashRecord/additionalDetails.html
new file mode 100644
index 0000000..410e7d2
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/main/resources/docs/org.apache.nifi.processors.geohash.GeohashRecord/additionalDetails.html
@@ -0,0 +1,55 @@
+<!DOCTYPE html>
+<html lang="en">
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+      http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<head>
+    <meta charset="utf-8"/>
+    <title>GeohashRecord</title>
+    <link rel="stylesheet" href="../../../../../css/component-usage.css" type="text/css"/>
+</head>
+
+<body>
+<h1>Overview</h1>
+<p>
+    A Geohash value corresponds to a specific area with pre-defined granularity and is widely used in identifying,
+    representing and indexing geospatial objects.
+    This GeohashRecord processor provides the ability to encode and decode Geohashes with desired format and precision.
+</p>
+<h3>Formats supported</h3>
+<p>
+<ul>
+    <li>BASE32: The most commonly used alphanumeric version. It is compact and more human-readable by discarding
+        some letters(such as "a" and "o", "i" and "l") that might cause confusion.
+    </li>
+    <li>BINARY: This format is generated by directly interleaving latitude and longitude binary strings.
+        The even bits in the binary strings correspond to the longitude, while the odd digits correspond to the latitude.
+    </li>
+    <li>LONG: Although this 64-bit number format is not human-readable, it can be calculated very fast and is
+        more efficient.
+    </li>
+</ul>
+</p>
+<h3>Precision supported</h3>
+
+<p>
+    In <b>ENCODE</b> mode, users specify the desired precision level, which should be an integer number between 1 and
+    12. A greater level will generate a longer Geohash with higher precision.
+</p>
+<p>
+    In DECODE mode, users are not asked to provide a precision level because this information is contained in the length
+    of Geohash values given.
+</p>
+</body>
+</html>
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/java/org/apache/nifi/processors/geohash/GeohashRecordTest.java b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/java/org/apache/nifi/processors/geohash/GeohashRecordTest.java
new file mode 100644
index 0000000..ef607e2
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/java/org/apache/nifi/processors/geohash/GeohashRecordTest.java
@@ -0,0 +1,191 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.nifi.processors.geohash;
+
+import org.apache.avro.Schema;
+import org.apache.commons.io.IOUtils;
+import org.apache.nifi.avro.AvroTypeUtil;
+import org.apache.nifi.controller.ControllerService;
+import org.apache.nifi.json.JsonRecordSetWriter;
+import org.apache.nifi.json.JsonTreeReader;
+import org.apache.nifi.reporting.InitializationException;
+import org.apache.nifi.schema.access.SchemaAccessUtils;
+import org.apache.nifi.serialization.record.MockSchemaRegistry;
+import org.apache.nifi.serialization.record.RecordSchema;
+import org.apache.nifi.util.MockFlowFile;
+import org.apache.nifi.util.TestRunner;
+import org.apache.nifi.util.TestRunners;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class GeohashRecordTest {
+
+    private TestRunner runner;
+
+    @BeforeEach
+    public void setUp() throws InitializationException {
+        ControllerService reader = new JsonTreeReader();
+        ControllerService writer = new JsonRecordSetWriter();
+        ControllerService registry = new MockSchemaRegistry();
+        runner = TestRunners.newTestRunner(GeohashRecord.class);
+        runner.addControllerService("reader", reader);
+        runner.addControllerService("writer", writer);
+        runner.addControllerService("registry", registry);
+
+        try (InputStream is = getClass().getResourceAsStream("/record_schema.avsc")) {
+            String raw = IOUtils.toString(is, "UTF-8");
+            RecordSchema parsed = AvroTypeUtil.createSchema(new Schema.Parser().parse(raw));
+            ((MockSchemaRegistry) registry).addSchema("record", parsed);
+
+        } catch (Exception ex) {
+            throw new RuntimeException(ex);
+        }
+
+        runner.setProperty(reader, SchemaAccessUtils.SCHEMA_REGISTRY, "registry");
+        runner.setProperty(writer, SchemaAccessUtils.SCHEMA_ACCESS_STRATEGY, SchemaAccessUtils.SCHEMA_NAME_PROPERTY);
+        runner.setProperty(writer, SchemaAccessUtils.SCHEMA_REGISTRY, "registry");
+
+        runner.setProperty(GeohashRecord.RECORD_READER, "reader");
+        runner.setProperty(GeohashRecord.RECORD_WRITER, "writer");
+        runner.enableControllerService(registry);
+        runner.enableControllerService(reader);
+        runner.enableControllerService(writer);
+
+        runner.setProperty(GeohashRecord.LATITUDE_RECORD_PATH, "/latitude");
+        runner.setProperty(GeohashRecord.LONGITUDE_RECORD_PATH, "/longitude");
+        runner.setProperty(GeohashRecord.GEOHASH_RECORD_PATH, "/geohash");
+        runner.setProperty(GeohashRecord.GEOHASH_FORMAT, GeohashRecord.GeohashFormat.BASE32.toString());
+        runner.setProperty(GeohashRecord.GEOHASH_LEVEL, "12");
+    }
+
+    private void assertTransfers(String path, int failure, int success, int matched, int notMatched, int original) {
+        Map<String, String> attrs = new HashMap<>();
+        attrs.put("schema.name", "record");
+        runner.enqueue(getClass().getResourceAsStream(path), attrs);
+        runner.run();
+
+        runner.assertTransferCount(GeohashRecord.REL_FAILURE, failure);
+        runner.assertTransferCount(GeohashRecord.REL_SUCCESS, success);
+        runner.assertTransferCount(GeohashRecord.REL_MATCHED, matched);
+        runner.assertTransferCount(GeohashRecord.REL_NOT_MATCHED, notMatched);
+        runner.assertTransferCount(GeohashRecord.REL_ORIGINAL, original);
+    }
+
+    @Test
+    public void testSkipUnEnrichedEncodeIllegalLatLon() throws Exception {
+        runner.setProperty(GeohashRecord.MODE, GeohashRecord.ProcessingMode.ENCODE.toString());
+        runner.setProperty(GeohashRecord.ROUTING_STRATEGY, GeohashRecord.RoutingStrategy.SKIP.toString());
+        runner.assertValid();
+
+        assertTransfers("/encode-records-with-illegal-arguments.json", 0, 1, 0, 0, 1);
+
+        MockFlowFile outSuccess = runner.getFlowFilesForRelationship(GeohashRecord.REL_SUCCESS).get(0);
+        byte[] raw = runner.getContentAsByteArray(outSuccess);
+        String content = new String(raw);
+        ObjectMapper mapper = new ObjectMapper();
+        List<Map<String, Object>> result = (List<Map<String, Object>>) mapper.readValue(content, List.class);
+
+        assertNotNull(result);
+        assertEquals(3, result.size());
+
+        Map<String, Object> element = result.get(0);
+        String geohash = (String) element.get("geohash");
+        assertNotNull(geohash);
+    }
+
+    @Test
+    public void testSkipUnEnrichedEncodeParseFailure() {
+        runner.setProperty(GeohashRecord.MODE, GeohashRecord.ProcessingMode.ENCODE.toString());
+        runner.setProperty(GeohashRecord.ROUTING_STRATEGY, GeohashRecord.RoutingStrategy.SKIP.toString());
+        runner.assertValid();
+
+        assertTransfers("/encode-records-with-incorrect-format.json", 1, 0, 0, 0, 0);
+    }
+
+    @Test
+    public void testSplitEncodeIllegalLatLon() throws IOException {
+        runner.setProperty(GeohashRecord.MODE, GeohashRecord.ProcessingMode.ENCODE.toString());
+        runner.setProperty(GeohashRecord.ROUTING_STRATEGY, GeohashRecord.RoutingStrategy.SPLIT.toString());
+        runner.assertValid();
+
+        assertTransfers("/encode-records-with-illegal-arguments.json", 0, 0, 1, 1, 1);
+
+        final MockFlowFile outNotMatched = runner.getFlowFilesForRelationship(GeohashRecord.REL_NOT_MATCHED).get(0);
+        final MockFlowFile outMatched = runner.getFlowFilesForRelationship(GeohashRecord.REL_MATCHED).get(0);
+
+        byte[] rawNotMatched = runner.getContentAsByteArray(outNotMatched);
+        byte[] rawMatched = runner.getContentAsByteArray(outMatched);
+        String contentNotMatched = new String(rawNotMatched);
+        String contentMatched = new String(rawMatched);
+        ObjectMapper mapper = new ObjectMapper();
+        List<Map<String, Object>> resultNotMatched = (List<Map<String, Object>>) mapper.readValue(contentNotMatched, List.class);
+        List<Map<String, Object>> resultMatched = (List<Map<String, Object>>) mapper.readValue(contentMatched, List.class);
+
+        assertNotNull(resultNotMatched);
+        assertNotNull(resultMatched);
+        assertEquals(2, resultNotMatched.size());
+        assertEquals(1, resultMatched.size());
+
+        for (Map<String, Object> elementNotMatched : resultNotMatched) {
+            String geohashNotMatched = (String) elementNotMatched.get("geohash");
+            assertNull(geohashNotMatched);
+        }
+
+        Map<String, Object> elementMatched = resultMatched.get(0);
+        String geohashMatched = (String) elementMatched.get("geohash");
+        assertNotNull(geohashMatched);
+    }
+
+    @Test
+    public void testSplitRemoveEmptyFlowFiles() {
+        runner.setProperty(GeohashRecord.MODE, GeohashRecord.ProcessingMode.DECODE.toString());
+        runner.setProperty(GeohashRecord.ROUTING_STRATEGY, GeohashRecord.RoutingStrategy.SPLIT.toString());
+        runner.assertValid();
+        assertTransfers("/decode-record.json", 0, 0, 1, 0, 1);
+    }
+
+    @Test
+    public void testRequireAllEnrichedSendToSuccess() {
+        runner.setProperty(GeohashRecord.MODE, GeohashRecord.ProcessingMode.DECODE.toString());
+        runner.setProperty(GeohashRecord.ROUTING_STRATEGY, GeohashRecord.RoutingStrategy.REQUIRE.toString());
+        runner.assertValid();
+
+        assertTransfers("/decode-record.json", 0, 1, 0, 0, 1);
+    }
+
+    @Test
+    public void testRequireAllEnrichedSendToFailure() {
+        runner.setProperty(GeohashRecord.MODE, GeohashRecord.ProcessingMode.ENCODE.toString());
+        runner.setProperty(GeohashRecord.ROUTING_STRATEGY, GeohashRecord.RoutingStrategy.REQUIRE.toString());
+        runner.assertValid();
+
+        assertTransfers("/encode-records-with-illegal-arguments.json", 1, 0, 0, 0, 0);
+    }
+
+}
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/decode-record.json b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/decode-record.json
new file mode 100644
index 0000000..566d242
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/decode-record.json
@@ -0,0 +1,10 @@
+[
+  {
+    "id": "0",
+    "geohash": "sp2j1z6205km"
+  },
+  {
+    "id": "1",
+    "geohash": "sp2"
+  }
+]
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/encode-records-with-illegal-arguments.json b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/encode-records-with-illegal-arguments.json
new file mode 100644
index 0000000..88d3ed3
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/encode-records-with-illegal-arguments.json
@@ -0,0 +1,17 @@
+[
+  {
+    "id": "0",
+    "latitude": 41.7,
+    "longitude": 0.08
+  },
+  {
+    "id": "1",
+    "latitude": 441.7,
+    "longitude": 0.08
+  },
+  {
+    "id": "2",
+    "latitude": "hello",
+    "longitude": 0.08
+  }
+]
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/encode-records-with-incorrect-format.json b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/encode-records-with-incorrect-format.json
new file mode 100644
index 0000000..9e5caa3
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/encode-records-with-incorrect-format.json
@@ -0,0 +1,10 @@
+[
+  {
+    "id": "0",
+    "latitude": 41.7,
+    "longitude": 0.08
+  },
+  {
+    "id": "1",
+  }
+]
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/record_schema.avsc b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/record_schema.avsc
new file mode 100644
index 0000000..6600d28
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/nifi-geohash-processors/src/test/resources/record_schema.avsc
@@ -0,0 +1,21 @@
+{
+  "type": "record",
+  "name": "TestSchema",
+  "fields": [{
+    "name": "id",
+    "type": "string"
+  },
+    {
+      "name": "latitude",
+      "type": ["string", "double", "null"]
+    },
+    {
+      "name": "longitude",
+      "type": ["string", "double", "null"]
+    },
+    {
+      "name": "geohash",
+      "type": ["string", "long", "null"]
+    }
+  ]
+}
\ No newline at end of file
diff --git a/nifi-nar-bundles/nifi-geohash-bundle/pom.xml b/nifi-nar-bundles/nifi-geohash-bundle/pom.xml
new file mode 100644
index 0000000..b4f3527
--- /dev/null
+++ b/nifi-nar-bundles/nifi-geohash-bundle/pom.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements. See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License. You may obtain a copy of the License at
+  http://www.apache.org/licenses/LICENSE-2.0
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.nifi</groupId>
+        <artifactId>nifi-nar-bundles</artifactId>
+        <version>1.16.0-SNAPSHOT</version>
+    </parent>
+
+    <artifactId>nifi-geohash-bundle</artifactId>
+    <packaging>pom</packaging>
+
+    <modules>
+        <module>nifi-geohash-processors</module>
+        <module>nifi-geohash-nar</module>
+    </modules>
+
+</project>
diff --git a/nifi-nar-bundles/pom.xml b/nifi-nar-bundles/pom.xml
index a1b6299..fc82b7a 100755
--- a/nifi-nar-bundles/pom.xml
+++ b/nifi-nar-bundles/pom.xml
@@ -108,6 +108,7 @@
         <module>nifi-pgp-bundle</module>
         <module>nifi-hashicorp-vault-bundle</module>
         <module>nifi-stateless-processor-bundle</module>
+        <module>nifi-geohash-bundle</module>
     </modules>
 
     <build>