You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hugegraph.apache.org by ji...@apache.org on 2024/04/04 09:37:23 UTC

(incubator-hugegraph) branch master updated (483bca52e -> 3a1618faa)

This is an automated email from the ASF dual-hosted git repository.

jin pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git


    from 483bca52e chore: remove required ci with java8 (#2503)
     new 37e4405c0 refact: prepare for integrating pd modules
     new a560a6efe feat(pd): integrate `pd-grpc` submodule
     new b5d9dd2f0 feat(pd): integrate `pd-common` submodule
     new bd1d9db77 feat(pd): integrate `pd-test` submodule
     new 3a1618faa feat(pd): integrate `pd-client` submodule

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/check-dependencies.yml           |    3 +-
 .github/workflows/ci.yml                           |    2 +-
 .github/workflows/pd-store.yml                     |   51 +
 hugegraph-pd/.gitignore                            |    2 +
 hugegraph-pd/README.md                             |    9 +-
 .../hg-pd-client}/pom.xml                          |   49 +-
 .../apache/hugegraph/pd/client/AbstractClient.java |  265 ++++
 .../pd/client/AbstractClientStubProxy.java         |   72 ++
 .../org/apache/hugegraph/pd/client/Channels.java   |   35 +-
 .../apache/hugegraph/pd/client/ClientCache.java    |  338 +++++
 .../apache/hugegraph/pd/client/Discoverable.java   |   15 +-
 .../hugegraph/pd/client/DiscoveryClient.java       |  221 ++++
 .../hugegraph/pd/client/DiscoveryClientImpl.java   |  137 ++
 .../org/apache/hugegraph/pd/client/KvClient.java   |  343 +++++
 .../apache/hugegraph/pd/client/LicenseClient.java  |   71 ++
 .../org/apache/hugegraph/pd/client/PDClient.java   | 1347 ++++++++++++++++++++
 .../org/apache/hugegraph/pd/client/PDConfig.java   |   83 ++
 .../org/apache/hugegraph/pd/client/PDPulse.java    |  154 +++
 .../apache/hugegraph/pd/client/PDPulseImpl.java    |  197 +++
 .../org/apache/hugegraph/pd/client/PDWatch.java    |  140 ++
 .../apache/hugegraph/pd/client/PDWatchImpl.java    |  204 +++
 .../apache/hugegraph/pd/pulse/PartitionNotice.java |   34 +-
 .../hugegraph/pd/pulse/PulseServerNotice.java      |   21 +-
 .../org/apache/hugegraph/pd/watch/NodeEvent.java   |  100 ++
 .../org/apache/hugegraph/pd/watch/PDWatcher.java   |    7 +-
 .../apache/hugegraph/pd/watch/PartitionEvent.java  |   94 ++
 .../org/apache/hugegraph/pd/watch/WatchType.java   |   13 +-
 .../hg-pd-common}/pom.xml                          |   32 +-
 .../org/apache/hugegraph/pd/common/GraphCache.java |   62 +
 .../org/apache/hugegraph/pd/common/HgAssert.java   |  117 ++
 .../org/apache/hugegraph/pd/common/KVPair.java     |  132 ++
 .../apache/hugegraph/pd/common/PDException.java    |   37 +-
 .../hugegraph/pd/common/PDRuntimeException.java    |   35 +-
 .../apache/hugegraph/pd/common/PartitionCache.java |  458 +++++++
 .../apache/hugegraph/pd/common/PartitionUtils.java |   36 +-
 hugegraph-pd/hg-pd-grpc/pom.xml                    |  138 ++
 .../hg-pd-grpc/src/main/proto/discovery.proto      |   71 ++
 hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto    |  143 +++
 .../hg-pd-grpc/src/main/proto/metaTask.proto       |   64 +
 .../hg-pd-grpc/src/main/proto/metapb.proto         |  394 ++++++
 .../hg-pd-grpc/src/main/proto/pd_common.proto      |   46 +-
 .../hg-pd-grpc/src/main/proto/pd_pulse.proto       |  172 +++
 .../hg-pd-grpc/src/main/proto/pd_watch.proto       |  103 ++
 hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto  |  607 +++++++++
 hugegraph-pd/hg-pd-test/pom.xml                    |  259 ++++
 .../apache/hugegraph/pd/common/BaseCommonTest.java |   17 +-
 .../hugegraph/pd/common/CommonSuiteTest.java       |   20 +-
 .../apache/hugegraph/pd/common/HgAssertTest.java   |  132 ++
 .../org/apache/hugegraph/pd/common/KVPairTest.java |   72 ++
 .../hugegraph/pd/common/PartitionCacheTest.java    |  388 ++++++
 .../hugegraph/pd/common/PartitionUtilsTest.java    |   54 +
 .../hg-pd-test/src/main/resources}/log4j2.xml      |  137 +-
 hugegraph-pd/pom.xml                               |  184 +++
 pom.xml                                            |   23 +-
 54 files changed, 7705 insertions(+), 235 deletions(-)
 create mode 100644 .github/workflows/pd-store.yml
 create mode 100644 hugegraph-pd/.gitignore
 copy {hugegraph-server/hugegraph-palo => hugegraph-pd/hg-pd-client}/pom.xml (55%)
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java
 copy hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/MetaDispatcher.java => hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java (51%)
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java
 copy hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/Transaction.java => hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java (74%)
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java
 copy hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/job/computer/TriangleCountComputer.java => hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java (54%)
 copy hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/auth/AuthConstant.java => hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java (72%)
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java
 copy hugegraph-server/hugegraph-dist/docker/scripts/remote-connect.groovy => hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java (92%)
 create mode 100644 hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java
 copy hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/define/Checkable.java => hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java (82%)
 copy {hugegraph-server/hugegraph-palo => hugegraph-pd/hg-pd-common}/pom.xml (67%)
 create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java
 create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java
 create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java
 copy hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/schema/builder/SchemaBuilder.java => hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java (58%)
 copy hugegraph-server/hugegraph-core/src/main/java/org/apache/hugegraph/backend/store/BackendAction.java => hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java (54%)
 create mode 100644 hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java
 copy hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/api/filter/RedirectFilterDynamicFeature.java => hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java (53%)
 create mode 100644 hugegraph-pd/hg-pd-grpc/pom.xml
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
 copy hugegraph-server/hugegraph-cassandra/src/main/java/org/apache/hugegraph/backend/store/cassandra/CassandraBackendEntry.java => hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto (53%)
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
 create mode 100644 hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
 create mode 100644 hugegraph-pd/hg-pd-test/pom.xml
 copy hugegraph-server/hugegraph-api/src/main/java/org/apache/hugegraph/define/Checkable.java => hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java (77%)
 copy hugegraph-server/hugegraph-test/src/main/java/org/apache/hugegraph/tinkerpop/StructureStandardTest.java => hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java (72%)
 create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java
 create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java
 create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java
 create mode 100644 hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java
 copy {hugegraph-server/hugegraph-dist/src/assembly/static/conf => hugegraph-pd/hg-pd-test/src/main/resources}/log4j2.xml (58%)
 create mode 100644 hugegraph-pd/pom.xml


(incubator-hugegraph) 03/05: feat(pd): integrate `pd-common` submodule

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit b5d9dd2f0201847a771fe886611af3ace1729488
Author: VGalaxies <vg...@apache.org>
AuthorDate: Thu Apr 4 00:00:09 2024 +0800

    feat(pd): integrate `pd-common` submodule
---
 hugegraph-pd/hg-pd-common/pom.xml                  |  54 +++
 .../org/apache/hugegraph/pd/common/GraphCache.java |  62 +++
 .../org/apache/hugegraph/pd/common/HgAssert.java   | 117 ++++++
 .../org/apache/hugegraph/pd/common/KVPair.java     | 132 ++++++
 .../apache/hugegraph/pd/common/PDException.java    |  47 +++
 .../hugegraph/pd/common/PDRuntimeException.java    |  49 +++
 .../apache/hugegraph/pd/common/PartitionCache.java | 458 +++++++++++++++++++++
 .../apache/hugegraph/pd/common/PartitionUtils.java |  47 +++
 8 files changed, 966 insertions(+)

diff --git a/hugegraph-pd/hg-pd-common/pom.xml b/hugegraph-pd/hg-pd-common/pom.xml
new file mode 100644
index 000000000..918c8deab
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/pom.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-pd</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <artifactId>hg-pd-common</artifactId>
+
+    <properties>
+        <maven.compiler.source>11</maven.compiler.source>
+        <maven.compiler.target>11</maven.compiler.target>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-grpc</artifactId>
+            <version>${revision}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>1.18.24</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-collections4</artifactId>
+            <version>4.4</version>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java
new file mode 100644
index 000000000..07c7c332d
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/GraphCache.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.hugegraph.pd.grpc.Metapb.Graph;
+import org.apache.hugegraph.pd.grpc.Metapb.Partition;
+
+import com.google.common.collect.RangeMap;
+import com.google.common.collect.TreeRangeMap;
+
+import lombok.Data;
+
+@Data
+public class GraphCache {
+
+    private Graph graph;
+    private AtomicBoolean initialized = new AtomicBoolean(false);
+    private AtomicBoolean writing = new AtomicBoolean(false);
+    private ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    private Map<Integer, AtomicBoolean> state = new ConcurrentHashMap<>();
+    private Map<Integer, Partition> partitions = new ConcurrentHashMap<>();
+    private RangeMap<Long, Integer> range = TreeRangeMap.create();
+
+    public GraphCache(Graph graph) {
+        this.graph = graph;
+    }
+
+    public GraphCache() {
+    }
+
+    public Partition getPartition(Integer id) {
+        return partitions.get(id);
+    }
+
+    public Partition addPartition(Integer id, Partition p) {
+        return partitions.put(id, p);
+    }
+
+    public Partition removePartition(Integer id) {
+        return partitions.remove(id);
+    }
+}
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java
new file mode 100644
index 000000000..710f96f28
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/HgAssert.java
@@ -0,0 +1,117 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import java.util.Collection;
+import java.util.Map;
+
+public final class HgAssert {
+
+    public static void isTrue(boolean expression, String message) {
+        if (message == null) {
+            throw new IllegalArgumentException("message is null");
+        }
+
+        if (!expression) {
+            throw new IllegalArgumentException(message);
+        }
+    }
+
+    public static void isFalse(boolean expression, String message) {
+        isTrue(!expression, message);
+    }
+
+    public static void isArgumentValid(byte[] bytes, String parameter) {
+        isFalse(isInvalid(bytes), "The argument is invalid: " + parameter);
+    }
+
+    public static void isArgumentValid(String str, String parameter) {
+        isFalse(isInvalid(str), "The argument is invalid: " + parameter);
+    }
+
+    public static void isArgumentNotNull(Object obj, String parameter) {
+        isTrue(obj != null, "The argument is null: " + parameter);
+    }
+
+    public static void istValid(byte[] bytes, String msg) {
+        isFalse(isInvalid(bytes), msg);
+    }
+
+    public static void isValid(String str, String msg) {
+        isFalse(isInvalid(str), msg);
+    }
+
+    public static void isNotNull(Object obj, String msg) {
+        isTrue(obj != null, msg);
+    }
+
+    public static boolean isContains(Object[] objs, Object obj) {
+        if (objs == null || objs.length == 0 || obj == null) {
+            return false;
+        }
+        for (Object item : objs) {
+            if (obj.equals(item)) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public static boolean isInvalid(String... strs) {
+        if (strs == null || strs.length == 0) {
+            return true;
+        }
+        for (String item : strs) {
+            if (item == null || "".equals(item.trim())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
+    public static boolean isInvalid(byte[] bytes) {
+        return bytes == null || bytes.length == 0;
+    }
+
+    public static boolean isInvalid(Map<?, ?> map) {
+        return map == null || map.isEmpty();
+    }
+
+    public static boolean isInvalid(Collection<?> list) {
+        return list == null || list.isEmpty();
+    }
+
+    public static <T> boolean isContains(Collection<T> list, T item) {
+        if (list == null || item == null) {
+            return false;
+        }
+        return list.contains(item);
+    }
+
+    public static boolean isNull(Object... objs) {
+        if (objs == null) {
+            return true;
+        }
+        for (Object item : objs) {
+            if (item == null) {
+                return true;
+            }
+        }
+        return false;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java
new file mode 100644
index 000000000..b5e916c48
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/KVPair.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import java.io.Serializable;
+import java.util.Objects;
+
+public class KVPair<K, V> implements Serializable {
+
+    /**
+     * Key of this <code>Pair</code>.
+     */
+    private K key;
+    /**
+     * Value of this this <code>Pair</code>.
+     */
+    private V value;
+
+    /**
+     * Creates a new pair
+     *
+     * @param key   The key for this pair
+     * @param value The value to use for this pair
+     */
+    public KVPair(K key, V value) {
+        this.key = key;
+        this.value = value;
+    }
+
+    /**
+     * Gets the key for this pair.
+     *
+     * @return key for this pair
+     */
+    public K getKey() {
+        return key;
+    }
+
+    public void setKey(K key) {
+        this.key = key;
+    }
+
+    /**
+     * Gets the value for this pair.
+     *
+     * @return value for this pair
+     */
+    public V getValue() {
+        return value;
+    }
+
+    public void setValue(V value) {
+        this.value = value;
+    }
+
+    /**
+     * <p><code>String</code> representation of this
+     * <code>Pair</code>.</p>
+     *
+     * <p>The default name/value delimiter '=' is always used.</p>
+     *
+     * @return <code>String</code> representation of this <code>Pair</code>
+     */
+    @Override
+    public String toString() {
+        return key + "=" + value;
+    }
+
+    /**
+     * <p>Generate a hash code for this <code>Pair</code>.</p>
+     *
+     * <p>The hash code is calculated using both the name and
+     * the value of the <code>Pair</code>.</p>
+     *
+     * @return hash code for this <code>Pair</code>
+     */
+    @Override
+    public int hashCode() {
+        // name's hashCode is multiplied by an arbitrary prime number (13)
+        // in order to make sure there is a difference in the hashCode between
+        // these two parameters:
+        //  name: a  value: aa
+        //  name: aa value: a
+        return key.hashCode() * 13 + (value == null ? 0 : value.hashCode());
+    }
+
+    /**
+     * <p>Test this <code>Pair</code> for equality with another
+     * <code>Object</code>.</p>
+     *
+     * <p>If the <code>Object</code> to be tested is not a
+     * <code>Pair</code> or is <code>null</code>, then this method
+     * returns <code>false</code>.</p>
+     *
+     * <p>Two <code>Pair</code>s are considered equal if and only if
+     * both the names and values are equal.</p>
+     *
+     * @param o the <code>Object</code> to test for
+     *          equality with this <code>Pair</code>
+     * @return <code>true</code> if the given <code>Object</code> is
+     * equal to this <code>Pair</code> else <code>false</code>
+     */
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o instanceof KVPair) {
+            KVPair pair = (KVPair) o;
+            if (!Objects.equals(key, pair.key)) {
+                return false;
+            }
+            return Objects.equals(value, pair.value);
+        }
+        return false;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java
new file mode 100644
index 000000000..b398137e8
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+public class PDException extends Exception {
+
+    private final int errorCode;
+
+    public PDException(int error) {
+        super(String.format("Error code = %d", error));
+        this.errorCode = error;
+    }
+
+    public PDException(int error, String msg) {
+        super(msg);
+        this.errorCode = error;
+    }
+
+    public PDException(int error, Throwable e) {
+        super(e);
+        this.errorCode = error;
+    }
+
+    public PDException(int error, String msg, Throwable e) {
+        super(msg, e);
+        this.errorCode = error;
+    }
+
+    public int getErrorCode() {
+        return errorCode;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java
new file mode 100644
index 000000000..0bd90241d
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PDRuntimeException.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+public class PDRuntimeException extends RuntimeException {
+
+    // public static final int LICENSE_ERROR = -11;
+
+    private int errorCode = 0;
+
+    public PDRuntimeException(int error) {
+        super(String.format("Error code = %d", error));
+        this.errorCode = error;
+    }
+
+    public PDRuntimeException(int error, String msg) {
+        super(msg);
+        this.errorCode = error;
+    }
+
+    public PDRuntimeException(int error, Throwable e) {
+        super(e);
+        this.errorCode = error;
+    }
+
+    public PDRuntimeException(int error, String msg, Throwable e) {
+        super(msg, e);
+        this.errorCode = error;
+    }
+
+    public int getErrorCode() {
+        return errorCode;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java
new file mode 100644
index 000000000..9bd233fd2
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionCache.java
@@ -0,0 +1,458 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+
+import org.apache.hugegraph.pd.grpc.Metapb;
+
+import com.google.common.collect.Range;
+import com.google.common.collect.RangeMap;
+import com.google.common.collect.TreeRangeMap;
+
+/**
+ * 放弃 copy on write 的方式
+ * 1. 在 graph * partition 数量极多的时候,效率严重下降,不能用
+ */
+public class PartitionCache {
+
+    // 读写锁对象
+    private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+    private final Map<String, AtomicBoolean> locks = new HashMap<>();
+    Lock writeLock = readWriteLock.writeLock();
+    // 每张图一个缓存
+    private volatile Map<String, RangeMap<Long, Integer>> keyToPartIdCache;
+    // graphName + PartitionID 组成 key
+    private volatile Map<String, Map<Integer, Metapb.Partition>> partitionCache;
+    private volatile Map<Integer, Metapb.ShardGroup> shardGroupCache;
+    private volatile Map<Long, Metapb.Store> storeCache;
+    private volatile Map<String, Metapb.Graph> graphCache;
+
+    public PartitionCache() {
+        keyToPartIdCache = new HashMap<>();
+        partitionCache = new HashMap<>();
+        shardGroupCache = new ConcurrentHashMap<>();
+        storeCache = new ConcurrentHashMap<>();
+        graphCache = new ConcurrentHashMap<>();
+    }
+
+    private AtomicBoolean getOrCreateGraphLock(String graphName) {
+        var lock = this.locks.get(graphName);
+        if (lock == null) {
+            try {
+                writeLock.lock();
+                if ((lock = this.locks.get(graphName)) == null) {
+                    lock = new AtomicBoolean();
+                    locks.put(graphName, lock);
+                }
+            } finally {
+                writeLock.unlock();
+            }
+        }
+        return lock;
+    }
+
+    public void waitGraphLock(String graphName) {
+        var lock = getOrCreateGraphLock(graphName);
+        while (lock.get()) {
+            Thread.onSpinWait();
+        }
+    }
+
+    public void lockGraph(String graphName) {
+        var lock = getOrCreateGraphLock(graphName);
+        while (lock.compareAndSet(false, true)) {
+            Thread.onSpinWait();
+        }
+    }
+
+    public void unlockGraph(String graphName) {
+        var lock = getOrCreateGraphLock(graphName);
+        lock.set(false);
+    }
+
+    /**
+     * 根据 partitionId 返回分区信息
+     *
+     * @param graphName
+     * @param partId
+     * @return
+     */
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartitionById(String graphName, int partId) {
+        waitGraphLock(graphName);
+        var graphs = partitionCache.get(graphName);
+        if (graphs != null) {
+            var partition = graphs.get(partId);
+            if (partition != null) {
+                return new KVPair<>(partition, getLeaderShard(partId));
+            }
+        }
+
+        return null;
+    }
+
+    /**
+     * 返回 key 所在的分区信息
+     *
+     * @param key
+     * @return
+     */
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartitionByKey(String graphName, byte[] key) {
+        int code = PartitionUtils.calcHashcode(key);
+        return getPartitionByCode(graphName, code);
+    }
+
+    /**
+     * 根据 key 的 hashcode 返回分区信息
+     *
+     * @param graphName
+     * @param code
+     * @return
+     */
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartitionByCode(String graphName, long code) {
+        waitGraphLock(graphName);
+        RangeMap<Long, Integer> rangeMap = keyToPartIdCache.get(graphName);
+        if (rangeMap != null) {
+            Integer partId = rangeMap.get(code);
+            if (partId != null) {
+                return getPartitionById(graphName, partId);
+            }
+        }
+        return null;
+    }
+
+    public List<Metapb.Partition> getPartitions(String graphName) {
+        waitGraphLock(graphName);
+
+        List<Metapb.Partition> partitions = new ArrayList<>();
+        if (!partitionCache.containsKey(graphName)) {
+            return partitions;
+        }
+        partitionCache.get(graphName).forEach((k, v) -> {
+            partitions.add(v);
+        });
+
+        return partitions;
+    }
+
+    public boolean addPartition(String graphName, int partId, Metapb.Partition partition) {
+        waitGraphLock(graphName);
+        Metapb.Partition old = null;
+
+        if (partitionCache.containsKey(graphName)) {
+            old = partitionCache.get(graphName).get(partId);
+        }
+
+        if (old != null && old.equals(partition)) {
+            return false;
+        }
+        try {
+
+            lockGraph(graphName);
+
+            partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition);
+
+            if (old != null) {
+                // old [1-3) 被 [2-3) 覆盖了。当 [1-3) 变成 [1-2) 不应该删除原先的 [1-3)
+                // 当确认老的 start, end 都是自己的时候,才可以删除老的。(即还没覆盖)
+                var graphRange = keyToPartIdCache.get(graphName);
+                if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) &&
+                    Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) {
+                    graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey());
+                }
+            }
+
+            keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create())
+                            .put(Range.closedOpen(partition.getStartKey(),
+                                                  partition.getEndKey()), partId);
+        } finally {
+            unlockGraph(graphName);
+        }
+        return true;
+    }
+
+    public void updatePartition(String graphName, int partId, Metapb.Partition partition) {
+        try {
+            lockGraph(graphName);
+            Metapb.Partition old = null;
+            var graphs = partitionCache.get(graphName);
+            if (graphs != null) {
+                old = graphs.get(partId);
+            }
+
+            if (old != null) {
+                var graphRange = keyToPartIdCache.get(graphName);
+                if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) &&
+                    Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) {
+                    graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey());
+                }
+            }
+
+            partitionCache.computeIfAbsent(graphName, k -> new HashMap<>()).put(partId, partition);
+            keyToPartIdCache.computeIfAbsent(graphName, k -> TreeRangeMap.create())
+                            .put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()),
+                                 partId);
+        } finally {
+            unlockGraph(graphName);
+        }
+    }
+
+    public boolean updatePartition(Metapb.Partition partition) {
+
+        var graphName = partition.getGraphName();
+        var partitionId = partition.getId();
+
+        var old = getPartitionById(graphName, partitionId);
+        if (old != null && Objects.equals(partition, old.getKey())) {
+            return false;
+        }
+
+        updatePartition(graphName, partitionId, partition);
+        return true;
+    }
+
+    public void removePartition(String graphName, int partId) {
+        try {
+            lockGraph(graphName);
+            var partition = partitionCache.get(graphName).remove(partId);
+            if (partition != null) {
+                var graphRange = keyToPartIdCache.get(graphName);
+
+                if (Objects.equals(partition.getId(), graphRange.get(partition.getStartKey())) &&
+                    Objects.equals(partition.getId(), graphRange.get(partition.getEndKey() - 1))) {
+                    graphRange.remove(graphRange.getEntry(partition.getStartKey()).getKey());
+                }
+            }
+        } finally {
+            unlockGraph(graphName);
+        }
+    }
+
+    /**
+     * remove partition id of graph name
+     *
+     * @param graphName
+     * @param id
+     */
+    public void remove(String graphName, int id) {
+        removePartition(graphName, id);
+    }
+
+    /**
+     * remove all partitions
+     */
+    public void removePartitions() {
+        writeLock.lock();
+        try {
+            partitionCache = new HashMap<>();
+            keyToPartIdCache = new HashMap<>();
+            locks.clear();
+        } finally {
+            writeLock.unlock();
+        }
+    }
+
+    /**
+     * remove partition cache of graphName
+     *
+     * @param graphName
+     */
+    public void removeAll(String graphName) {
+        try {
+            lockGraph(graphName);
+            partitionCache.remove(graphName);
+            keyToPartIdCache.remove(graphName);
+            locks.remove(graphName);
+        } finally {
+            unlockGraph(graphName);
+        }
+    }
+
+    private String makePartitionKey(String graphName, int partId) {
+        return graphName + "/" + partId;
+    }
+
+    public boolean updateShardGroup(Metapb.ShardGroup shardGroup) {
+        Metapb.ShardGroup oldShardGroup = shardGroupCache.get(shardGroup.getId());
+        if (oldShardGroup != null && oldShardGroup.equals(shardGroup)) {
+            return false;
+        }
+        shardGroupCache.put(shardGroup.getId(), shardGroup);
+        return true;
+    }
+
+    public void deleteShardGroup(int shardGroupId) {
+        shardGroupCache.remove(shardGroupId);
+    }
+
+    public Metapb.ShardGroup getShardGroup(int groupId) {
+        return shardGroupCache.get(groupId);
+    }
+
+    public boolean addStore(Long storeId, Metapb.Store store) {
+        Metapb.Store oldStore = storeCache.get(storeId);
+        if (oldStore != null && oldStore.equals(store)) {
+            return false;
+        }
+        storeCache.put(storeId, store);
+        return true;
+    }
+
+    public Metapb.Store getStoreById(Long storeId) {
+        return storeCache.get(storeId);
+    }
+
+    public void removeStore(Long storeId) {
+        storeCache.remove(storeId);
+    }
+
+    public boolean hasGraph(String graphName) {
+        return getPartitions(graphName).size() > 0;
+    }
+
+    public void updateGraph(Metapb.Graph graph) {
+        if (Objects.equals(graph, getGraph(graph.getGraphName()))) {
+            return;
+        }
+        graphCache.put(graph.getGraphName(), graph);
+    }
+
+    public Metapb.Graph getGraph(String graphName) {
+        return graphCache.get(graphName);
+    }
+
+    public List<Metapb.Graph> getGraphs() {
+        List<Metapb.Graph> graphs = new ArrayList<>();
+        graphCache.forEach((k, v) -> {
+            graphs.add(v);
+        });
+        return graphs;
+    }
+
+    public void reset() {
+        writeLock.lock();
+        try {
+            partitionCache = new HashMap<>();
+            keyToPartIdCache = new HashMap<>();
+            shardGroupCache = new ConcurrentHashMap<>();
+            storeCache = new ConcurrentHashMap<>();
+            graphCache = new ConcurrentHashMap<>();
+            locks.clear();
+        } finally {
+            writeLock.unlock();
+        }
+    }
+
+    public void clear() {
+        reset();
+    }
+
+    public String debugCacheByGraphName(String graphName) {
+        StringBuilder builder = new StringBuilder();
+        builder.append("Graph:").append(graphName).append(", cache info: range info: {");
+        var rangeMap = keyToPartIdCache.get(graphName);
+        builder.append(rangeMap == null ? "" : rangeMap).append("}");
+
+        if (rangeMap != null) {
+            builder.append(", partition info : {");
+            rangeMap.asMapOfRanges().forEach((k, v) -> {
+                var partition = partitionCache.get(graphName).get(v);
+                builder.append("[part_id:").append(v);
+                if (partition != null) {
+                    builder.append(", start_key:").append(partition.getStartKey())
+                           .append(", end_key:").append(partition.getEndKey())
+                           .append(", state:").append(partition.getState().name());
+                }
+                builder.append("], ");
+            });
+            builder.append("}");
+        }
+
+        builder.append(", graph info:{");
+        var graph = graphCache.get(graphName);
+        if (graph != null) {
+            builder.append("partition_count:").append(graph.getPartitionCount())
+                   .append(", state:").append(graph.getState().name());
+        }
+        builder.append("}]");
+        return builder.toString();
+    }
+
+    public Metapb.Shard getLeaderShard(int partitionId) {
+        var shardGroup = shardGroupCache.get(partitionId);
+        if (shardGroup != null) {
+            for (Metapb.Shard shard : shardGroup.getShardsList()) {
+                if (shard.getRole() == Metapb.ShardRole.Leader) {
+                    return shard;
+                }
+            }
+        }
+
+        return null;
+    }
+
+    public void updateShardGroupLeader(int partitionId, Metapb.Shard leader) {
+        if (shardGroupCache.containsKey(partitionId) && leader != null) {
+            if (!Objects.equals(getLeaderShard(partitionId), leader)) {
+                var shardGroup = shardGroupCache.get(partitionId);
+                var builder = Metapb.ShardGroup.newBuilder(shardGroup).clearShards();
+                for (var shard : shardGroup.getShardsList()) {
+                    builder.addShards(
+                            Metapb.Shard.newBuilder()
+                                        .setStoreId(shard.getStoreId())
+                                        .setRole(shard.getStoreId() == leader.getStoreId() ?
+                                                 Metapb.ShardRole.Leader :
+                                                 Metapb.ShardRole.Follower)
+                                        .build()
+                    );
+                }
+                shardGroupCache.put(partitionId, builder.build());
+            }
+        }
+    }
+
+    public String debugShardGroup() {
+        StringBuilder builder = new StringBuilder();
+        builder.append("shard group cache:{");
+        shardGroupCache.forEach((partitionId, shardGroup) -> {
+            builder.append(partitionId).append("::{")
+                   .append("version:").append(shardGroup.getVersion())
+                   .append(", conf_version:").append(shardGroup.getConfVer())
+                   .append(", state:").append(shardGroup.getState().name())
+                   .append(", shards:[");
+
+            for (var shard : shardGroup.getShardsList()) {
+                builder.append("{store_id:").append(shard.getStoreId())
+                       .append(", role:").append(shard.getRole().name())
+                       .append("},");
+            }
+            builder.append("], ");
+        });
+        builder.append("}");
+        return builder.toString();
+    }
+}
diff --git a/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java
new file mode 100644
index 000000000..0e35cc555
--- /dev/null
+++ b/hugegraph-pd/hg-pd-common/src/main/java/org/apache/hugegraph/pd/common/PartitionUtils.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+public class PartitionUtils {
+
+    public static final int MAX_VALUE = 0xffff;
+
+    /**
+     * 计算key的hashcode
+     *
+     * @param key
+     * @return hashcode
+     */
+    public static int calcHashcode(byte[] key) {
+        final int p = 16777619;
+        int hash = (int) 2166136261L;
+        for (byte element : key) {
+            hash = (hash ^ element) * p;
+        }
+        hash += hash << 13;
+        hash ^= hash >> 7;
+        hash += hash << 3;
+        hash ^= hash >> 17;
+        hash += hash << 5;
+        hash = hash & PartitionUtils.MAX_VALUE;
+        if (hash == PartitionUtils.MAX_VALUE) {
+            hash = PartitionUtils.MAX_VALUE - 1;
+        }
+        return hash;
+    }
+}


(incubator-hugegraph) 05/05: feat(pd): integrate `pd-client` submodule

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit 3a1618faa28b271da6ff8d262f69b7a67d364a86
Author: VGalaxies <vg...@apache.org>
AuthorDate: Thu Apr 4 00:00:57 2024 +0800

    feat(pd): integrate `pd-client` submodule
---
 hugegraph-pd/hg-pd-client/pom.xml                  |   73 ++
 .../apache/hugegraph/pd/client/AbstractClient.java |  265 ++++
 .../pd/client/AbstractClientStubProxy.java         |   72 ++
 .../org/apache/hugegraph/pd/client/Channels.java   |   44 +
 .../apache/hugegraph/pd/client/ClientCache.java    |  338 +++++
 .../apache/hugegraph/pd/client/Discoverable.java   |   30 +
 .../hugegraph/pd/client/DiscoveryClient.java       |  221 ++++
 .../hugegraph/pd/client/DiscoveryClientImpl.java   |  137 ++
 .../org/apache/hugegraph/pd/client/KvClient.java   |  343 +++++
 .../apache/hugegraph/pd/client/LicenseClient.java  |   71 ++
 .../org/apache/hugegraph/pd/client/PDClient.java   | 1347 ++++++++++++++++++++
 .../org/apache/hugegraph/pd/client/PDConfig.java   |   83 ++
 .../org/apache/hugegraph/pd/client/PDPulse.java    |  154 +++
 .../apache/hugegraph/pd/client/PDPulseImpl.java    |  197 +++
 .../org/apache/hugegraph/pd/client/PDWatch.java    |  140 ++
 .../apache/hugegraph/pd/client/PDWatchImpl.java    |  204 +++
 .../apache/hugegraph/pd/pulse/PartitionNotice.java |   50 +
 .../hugegraph/pd/pulse/PulseServerNotice.java      |   36 +
 .../org/apache/hugegraph/pd/watch/NodeEvent.java   |  100 ++
 .../org/apache/hugegraph/pd/watch/PDWatcher.java   |   22 +
 .../apache/hugegraph/pd/watch/PartitionEvent.java  |   94 ++
 .../org/apache/hugegraph/pd/watch/WatchType.java   |   30 +
 22 files changed, 4051 insertions(+)

diff --git a/hugegraph-pd/hg-pd-client/pom.xml b/hugegraph-pd/hg-pd-client/pom.xml
new file mode 100644
index 000000000..a64756fe9
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/pom.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-pd</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <artifactId>hg-pd-client</artifactId>
+
+    <dependencies>
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>1.18.20</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-slf4j-impl</artifactId>
+            <version>2.17.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-grpc</artifactId>
+            <version>${revision}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-common</artifactId>
+            <version>${revision}</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.13.2</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.8.0</version>
+        </dependency>
+        <dependency>
+            <groupId>org.yaml</groupId>
+            <artifactId>snakeyaml</artifactId>
+            <version>1.28</version>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+</project>
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java
new file mode 100644
index 000000000..874ef6f67
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClient.java
@@ -0,0 +1,265 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.LinkedList;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.function.Predicate;
+import java.util.stream.Stream;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.PDGrpc.PDBlockingStub;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetMembersResponse;
+
+import io.grpc.Channel;
+import io.grpc.ClientCall;
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import io.grpc.MethodDescriptor;
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import io.grpc.stub.ClientCalls;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public abstract class AbstractClient implements Closeable {
+
+    private static final ConcurrentHashMap<String, ManagedChannel> chs = new ConcurrentHashMap<>();
+    public static Pdpb.ResponseHeader okHeader = Pdpb.ResponseHeader.newBuilder().setError(
+            Pdpb.Error.newBuilder().setType(Pdpb.ErrorType.OK)).build();
+    protected final Pdpb.RequestHeader header;
+    protected final AbstractClientStubProxy stubProxy;
+    protected final PDConfig config;
+    protected ManagedChannel channel = null;
+    protected volatile ConcurrentMap<String, AbstractBlockingStub> stubs = null;
+
+    protected AbstractClient(PDConfig config) {
+        String[] hosts = config.getServerHost().split(",");
+        this.stubProxy = new AbstractClientStubProxy(hosts);
+        this.header = Pdpb.RequestHeader.getDefaultInstance();
+        this.config = config;
+    }
+
+    public static Pdpb.ResponseHeader newErrorHeader(int errorCode, String errorMsg) {
+        Pdpb.ResponseHeader header = Pdpb.ResponseHeader.newBuilder().setError(
+                Pdpb.Error.newBuilder().setTypeValue(errorCode).setMessage(errorMsg)).build();
+        return header;
+    }
+
+    protected static void handleErrors(Pdpb.ResponseHeader header) throws PDException {
+        if (header.hasError() && header.getError().getType() != Pdpb.ErrorType.OK) {
+            throw new PDException(header.getError().getTypeValue(),
+                                  String.format("PD request error, error code = %d, msg = %s",
+                                                header.getError().getTypeValue(),
+                                                header.getError().getMessage()));
+        }
+    }
+
+    protected AbstractBlockingStub getBlockingStub() throws PDException {
+        if (stubProxy.getBlockingStub() == null) {
+            synchronized (this) {
+                if (stubProxy.getBlockingStub() == null) {
+                    String host = resetStub();
+                    if (host.isEmpty()) {
+                        throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE,
+                                              "PD unreachable, pd.peers=" +
+                                              config.getServerHost());
+                    }
+                }
+            }
+        }
+        return (AbstractBlockingStub) stubProxy.getBlockingStub()
+                                               .withDeadlineAfter(config.getGrpcTimeOut(),
+                                                                  TimeUnit.MILLISECONDS);
+    }
+
+    protected AbstractStub getStub() throws PDException {
+        if (stubProxy.getStub() == null) {
+            synchronized (this) {
+                if (stubProxy.getStub() == null) {
+                    String host = resetStub();
+                    if (host.isEmpty()) {
+                        throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE,
+                                              "PD unreachable, pd.peers=" +
+                                              config.getServerHost());
+                    }
+                }
+            }
+        }
+        return stubProxy.getStub();
+    }
+
+    protected abstract AbstractStub createStub();
+
+    protected abstract AbstractBlockingStub createBlockingStub();
+
+    private String resetStub() {
+        String leaderHost = "";
+        for (int i = 0; i < stubProxy.getHostCount(); i++) {
+            String host = stubProxy.nextHost();
+            channel = ManagedChannelBuilder.forTarget(host).usePlaintext().build();
+            PDBlockingStub blockingStub = PDGrpc.newBlockingStub(channel)
+                                                .withDeadlineAfter(config.getGrpcTimeOut(),
+                                                                   TimeUnit.MILLISECONDS);
+            try {
+                GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+                                                                  .setHeader(header).build();
+                GetMembersResponse members = blockingStub.getMembers(request);
+                Metapb.Member leader = members.getLeader();
+                leaderHost = leader.getGrpcUrl();
+                close();
+                channel = ManagedChannelBuilder.forTarget(leaderHost).usePlaintext().build();
+                stubProxy.setBlockingStub(createBlockingStub());
+                stubProxy.setStub(createStub());
+                log.info("PDClient connect to host = {} success", leaderHost);
+                break;
+            } catch (Exception e) {
+                log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(),
+                          e.getCause() != null ? e.getCause().getMessage() : "");
+            }
+        }
+        return leaderHost;
+    }
+
+    protected <ReqT, RespT, StubT extends AbstractBlockingStub<StubT>> RespT blockingUnaryCall(
+            MethodDescriptor<ReqT, RespT> method, ReqT req) throws PDException {
+        return blockingUnaryCall(method, req, 5);
+    }
+
+    protected <ReqT, RespT, StubT extends AbstractBlockingStub<StubT>> RespT blockingUnaryCall(
+            MethodDescriptor<ReqT, RespT> method, ReqT req, int retry) throws PDException {
+        AbstractBlockingStub stub = getBlockingStub();
+        try {
+            RespT resp =
+                    ClientCalls.blockingUnaryCall(stub.getChannel(), method, stub.getCallOptions(),
+                                                  req);
+            return resp;
+        } catch (Exception e) {
+            log.error(method.getFullMethodName() + " exception, {}", e.getMessage());
+            if (e instanceof StatusRuntimeException) {
+                if (retry < stubProxy.getHostCount()) {
+                    // 网络不通,关掉之前连接,换host重新连接
+                    synchronized (this) {
+                        stubProxy.setBlockingStub(null);
+                    }
+                    return blockingUnaryCall(method, req, ++retry);
+                }
+            }
+        }
+        return null;
+    }
+
+    // this.stubs = new ConcurrentHashMap<String,AbstractBlockingStub>(hosts.length);
+    private AbstractBlockingStub getConcurrentBlockingStub(String address) {
+        AbstractBlockingStub stub = stubs.get(address);
+        if (stub != null) {
+            return stub;
+        }
+        Channel ch = ManagedChannelBuilder.forTarget(address).usePlaintext().build();
+        PDBlockingStub blockingStub =
+                PDGrpc.newBlockingStub(ch).withDeadlineAfter(config.getGrpcTimeOut(),
+                                                             TimeUnit.MILLISECONDS);
+        stubs.put(address, blockingStub);
+        return blockingStub;
+
+    }
+
+    protected <ReqT, RespT> KVPair<Boolean, RespT> concurrentBlockingUnaryCall(
+            MethodDescriptor<ReqT, RespT> method, ReqT req, Predicate<RespT> predicate) {
+        LinkedList<String> hostList = this.stubProxy.getHostList();
+        if (this.stubs == null) {
+            synchronized (this) {
+                if (this.stubs == null) {
+                    this.stubs = new ConcurrentHashMap<>(hostList.size());
+                }
+            }
+        }
+        Stream<RespT> respTStream = hostList.parallelStream().map((address) -> {
+            AbstractBlockingStub stub = getConcurrentBlockingStub(address);
+            RespT resp = ClientCalls.blockingUnaryCall(stub.getChannel(),
+                                                       method, stub.getCallOptions(), req);
+            return resp;
+        });
+        KVPair<Boolean, RespT> pair;
+        AtomicReference<RespT> response = new AtomicReference<>();
+        boolean result = respTStream.anyMatch((r) -> {
+            response.set(r);
+            return predicate.test(r);
+        });
+        if (result) {
+            pair = new KVPair<>(true, null);
+        } else {
+            pair = new KVPair<>(false, response.get());
+        }
+        return pair;
+    }
+
+    protected <ReqT, RespT> void streamingCall(MethodDescriptor<ReqT, RespT> method, ReqT request,
+                                               StreamObserver<RespT> responseObserver,
+                                               int retry) throws PDException {
+        AbstractStub stub = getStub();
+        try {
+            ClientCall<ReqT, RespT> call = stub.getChannel().newCall(method, stub.getCallOptions());
+            ClientCalls.asyncServerStreamingCall(call, request, responseObserver);
+        } catch (Exception e) {
+            if (e instanceof StatusRuntimeException) {
+                if (retry < stubProxy.getHostCount()) {
+                    synchronized (this) {
+                        stubProxy.setStub(null);
+                    }
+                    streamingCall(method, request, responseObserver, ++retry);
+                    return;
+                }
+            }
+            log.error("rpc call with exception, {}", e.getMessage());
+        }
+    }
+
+    @Override
+    public void close() {
+        closeChannel(channel);
+        if (stubs != null) {
+            for (AbstractBlockingStub stub : stubs.values()) {
+                closeChannel((ManagedChannel) stub.getChannel());
+            }
+        }
+
+    }
+
+    private void closeChannel(ManagedChannel channel) {
+        try {
+            while (channel != null &&
+                   !channel.shutdownNow().awaitTermination(100, TimeUnit.MILLISECONDS)) {
+                continue;
+            }
+        } catch (Exception e) {
+            log.info("Close channel with error : ", e);
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java
new file mode 100644
index 000000000..6ee3fcb62
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/AbstractClientStubProxy.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.util.LinkedList;
+
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+
+public class AbstractClientStubProxy {
+
+    private final LinkedList<String> hostList = new LinkedList<>();
+    private AbstractBlockingStub blockingStub;
+    private AbstractStub stub;
+
+    public AbstractClientStubProxy(String[] hosts) {
+        for (String host : hosts) {
+            if (!host.isEmpty()) {
+                hostList.offer(host);
+            }
+        }
+    }
+
+    public LinkedList<String> getHostList() {
+        return hostList;
+    }
+
+    public String nextHost() {
+        String host = hostList.poll();
+        hostList.offer(host);   //移到尾部
+        return host;
+    }
+
+    public AbstractBlockingStub getBlockingStub() {
+        return this.blockingStub;
+    }
+
+    public void setBlockingStub(AbstractBlockingStub stub) {
+        this.blockingStub = stub;
+    }
+
+    public String getHost() {
+        return hostList.peek();
+    }
+
+    public int getHostCount() {
+        return hostList.size();
+    }
+
+    public AbstractStub getStub() {
+        return stub;
+    }
+
+    public void setStub(AbstractStub stub) {
+        this.stub = stub;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java
new file mode 100644
index 000000000..34616e637
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Channels.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+
+public class Channels {
+
+    private static final ConcurrentHashMap<String, ManagedChannel> chs = new ConcurrentHashMap<>();
+
+    public static ManagedChannel getChannel(String target) {
+
+        ManagedChannel channel;
+        if ((channel = chs.get(target)) == null || channel.isShutdown() || channel.isTerminated()) {
+            synchronized (chs) {
+                if ((channel = chs.get(target)) == null || channel.isShutdown() ||
+                    channel.isTerminated()) {
+                    channel = ManagedChannelBuilder.forTarget(target).usePlaintext().build();
+                    chs.put(target, channel);
+                }
+            }
+        }
+
+        return channel;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java
new file mode 100644
index 000000000..d4fd50ffe
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/ClientCache.java
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hugegraph.pd.common.GraphCache;
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Metapb.Partition;
+import org.apache.hugegraph.pd.grpc.Metapb.Shard;
+import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup;
+import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse;
+
+import com.google.common.collect.Range;
+import com.google.common.collect.RangeMap;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class ClientCache {
+
+    private final AtomicBoolean initialized = new AtomicBoolean(false);
+    private final org.apache.hugegraph.pd.client.PDClient client;
+    private volatile Map<Integer, KVPair<ShardGroup, Shard>> groups;
+    private volatile Map<Long, Metapb.Store> stores;
+    private volatile Map<String, GraphCache> caches = new ConcurrentHashMap<>();
+
+    public ClientCache(org.apache.hugegraph.pd.client.PDClient pdClient) {
+        groups = new ConcurrentHashMap<>();
+        stores = new ConcurrentHashMap<>();
+        client = pdClient;
+    }
+
+    private GraphCache getGraphCache(String graphName) {
+        GraphCache graph;
+        if ((graph = caches.get(graphName)) == null) {
+            synchronized (caches) {
+                if ((graph = caches.get(graphName)) == null) {
+                    graph = new GraphCache();
+                    caches.put(graphName, graph);
+                }
+            }
+        }
+        return graph;
+    }
+
+    public KVPair<Partition, Shard> getPartitionById(String graphName, int partId) {
+        try {
+            GraphCache graph = initGraph(graphName);
+            Partition partition = graph.getPartition(partId);
+            Shard shard = groups.get(partId).getValue();
+            if (partition == null || shard == null) {
+                return null;
+            }
+            return new KVPair<>(partition, shard);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private KVPair<Partition, Shard> getPair(int partId, GraphCache graph) {
+        Partition p = graph.getPartition(partId);
+        KVPair<ShardGroup, Shard> pair = groups.get(partId);
+        if (p != null && pair != null) {
+            Shard s = pair.getValue();
+            if (s == null) {
+                pair.setValue(getLeader(partId));
+                return new KVPair<>(p, pair.getValue());
+            } else {
+                return new KVPair<>(p, s);
+            }
+        }
+        return null;
+    }
+
+    /**
+     * 根据key的hashcode返回分区信息
+     *
+     * @param graphName
+     * @param code
+     * @return
+     */
+    public KVPair<Partition, Shard> getPartitionByCode(String graphName, long code) {
+        try {
+            GraphCache graph = initGraph(graphName);
+            RangeMap<Long, Integer> range = graph.getRange();
+            Integer pId = range.get(code);
+            if (pId != null) {
+                return getPair(pId, graph);
+            }
+            return null;
+        } catch (PDException e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    private GraphCache initGraph(String graphName) throws PDException {
+        initCache();
+        GraphCache graph = getGraphCache(graphName);
+        if (!graph.getInitialized().get()) {
+            synchronized (graph) {
+                if (!graph.getInitialized().get()) {
+                    CachePartitionResponse pc = client.getPartitionCache(graphName);
+                    RangeMap<Long, Integer> range = graph.getRange();
+                    List<Partition> ps = pc.getPartitionsList();
+                    HashMap<Integer, Partition> gps = new HashMap<>(ps.size(), 1);
+                    for (Partition p : ps) {
+                        gps.put(p.getId(), p);
+                        range.put(Range.closedOpen(p.getStartKey(), p.getEndKey()), p.getId());
+                    }
+                    graph.setPartitions(gps);
+                    graph.getInitialized().set(true);
+                }
+            }
+        }
+        return graph;
+    }
+
+    private void initCache() throws PDException {
+        if (!initialized.get()) {
+            synchronized (this) {
+                if (!initialized.get()) {
+                    CacheResponse cache = client.getClientCache();
+                    List<ShardGroup> shardGroups = cache.getShardsList();
+                    for (ShardGroup s : shardGroups) {
+                        this.groups.put(s.getId(), new KVPair<>(s, getLeader(s.getId())));
+                    }
+                    List<Metapb.Store> stores = cache.getStoresList();
+                    for (Metapb.Store store : stores) {
+                        this.stores.put(store.getId(), store);
+                    }
+                    List<Metapb.Graph> graphs = cache.getGraphsList();
+                    for (Metapb.Graph g : graphs) {
+                        GraphCache c = new GraphCache(g);
+                        caches.put(g.getGraphName(), c);
+                    }
+                    initialized.set(true);
+                }
+            }
+        }
+    }
+
+    /**
+     * 返回key所在的分区信息
+     *
+     * @param key
+     * @return
+     */
+    public KVPair<Partition, Shard> getPartitionByKey(String graphName, byte[] key) {
+        int code = PartitionUtils.calcHashcode(key);
+        return getPartitionByCode(graphName, code);
+    }
+
+    public boolean update(String graphName, int partId, Partition partition) {
+        GraphCache graph = getGraphCache(graphName);
+        try {
+            Partition p = graph.getPartition(partId);
+            if (p != null && p.equals(partition)) {
+                return false;
+            }
+            RangeMap<Long, Integer> range = graph.getRange();
+            graph.addPartition(partId, partition);
+            if (p != null) {
+                // old [1-3) 被 [2-3)覆盖了。当 [1-3) 变成[1-2) 不应该删除原先的[1-3)
+                // 当确认老的 start, end 都是自己的时候,才可以删除老的. (即还没覆盖)
+                if (Objects.equals(partition.getId(), range.get(partition.getStartKey())) &&
+                    Objects.equals(partition.getId(), range.get(partition.getEndKey() - 1))) {
+                    range.remove(range.getEntry(partition.getStartKey()).getKey());
+                }
+            }
+            range.put(Range.closedOpen(partition.getStartKey(), partition.getEndKey()), partId);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+        return true;
+    }
+
+    public void removePartition(String graphName, int partId) {
+        GraphCache graph = getGraphCache(graphName);
+        Partition p = graph.removePartition(partId);
+        if (p != null) {
+            RangeMap<Long, Integer> range = graph.getRange();
+            if (Objects.equals(p.getId(), range.get(p.getStartKey())) &&
+                Objects.equals(p.getId(), range.get(p.getEndKey() - 1))) {
+                range.remove(range.getEntry(p.getStartKey()).getKey());
+            }
+        }
+    }
+
+    /**
+     * remove all partitions
+     */
+    public void removePartitions() {
+        for (Entry<String, GraphCache> entry : caches.entrySet()) {
+            removePartitions(entry.getValue());
+        }
+    }
+
+    private void removePartitions(GraphCache graph) {
+        graph.getState().clear();
+        graph.getRange().clear();
+    }
+
+    /**
+     * remove partition cache of graphName
+     *
+     * @param graphName
+     */
+    public void removeAll(String graphName) {
+        GraphCache graph = caches.get(graphName);
+        if (graph != null) {
+            removePartitions(graph);
+        }
+    }
+
+    public boolean updateShardGroup(ShardGroup shardGroup) {
+        KVPair<ShardGroup, Shard> old = groups.get(shardGroup.getId());
+        Shard leader = getLeader(shardGroup);
+        if (old != null) {
+            old.setKey(shardGroup);
+            old.setValue(leader);
+            return false;
+        }
+        groups.put(shardGroup.getId(), new KVPair<>(shardGroup, leader));
+        return true;
+    }
+
+    public void deleteShardGroup(int shardGroupId) {
+        groups.remove(shardGroupId);
+    }
+
+    public ShardGroup getShardGroup(int groupId) {
+        KVPair<ShardGroup, Shard> pair = groups.get(groupId);
+        if (pair != null) {
+            return pair.getKey();
+        }
+        return null;
+    }
+
+    public boolean addStore(Long storeId, Metapb.Store store) {
+        Metapb.Store oldStore = stores.get(storeId);
+        if (oldStore != null && oldStore.equals(store)) {
+            return false;
+        }
+        stores.put(storeId, store);
+        return true;
+    }
+
+    public Metapb.Store getStoreById(Long storeId) {
+        return stores.get(storeId);
+    }
+
+    public void removeStore(Long storeId) {
+        stores.remove(storeId);
+    }
+
+    public void reset() {
+        groups = new ConcurrentHashMap<>();
+        stores = new ConcurrentHashMap<>();
+        caches = new ConcurrentHashMap<>();
+    }
+
+    public Shard getLeader(int partitionId) {
+        KVPair<ShardGroup, Shard> pair = groups.get(partitionId);
+        if (pair != null) {
+            if (pair.getValue() != null) {
+                return pair.getValue();
+            }
+            for (Shard shard : pair.getKey().getShardsList()) {
+                if (shard.getRole() == Metapb.ShardRole.Leader) {
+                    pair.setValue(shard);
+                    return shard;
+                }
+            }
+        }
+
+        return null;
+    }
+
+    public Shard getLeader(ShardGroup shardGroup) {
+        if (shardGroup != null) {
+            for (Shard shard : shardGroup.getShardsList()) {
+                if (shard.getRole() == Metapb.ShardRole.Leader) {
+                    return shard;
+                }
+            }
+        }
+
+        return null;
+    }
+
+    public void updateLeader(int partitionId, Shard leader) {
+        KVPair<ShardGroup, Shard> pair = groups.get(partitionId);
+        if (pair != null && leader != null) {
+            Shard l = getLeader(partitionId);
+            if (l == null || leader.getStoreId() != l.getStoreId()) {
+                ShardGroup shardGroup = pair.getKey();
+                ShardGroup.Builder builder = ShardGroup.newBuilder(shardGroup).clearShards();
+                for (var shard : shardGroup.getShardsList()) {
+                    builder.addShards(
+                            Shard.newBuilder()
+                                 .setStoreId(shard.getStoreId())
+                                 .setRole(shard.getStoreId() == leader.getStoreId() ?
+                                          Metapb.ShardRole.Leader : Metapb.ShardRole.Follower)
+                                 .build()
+                    );
+                }
+                pair.setKey(builder.build());
+                pair.setValue(leader);
+            }
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
new file mode 100644
index 000000000..abdcac414
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/Discoverable.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+
+public interface Discoverable {
+
+    NodeInfos getNodeInfos(Query query);
+
+    void scheduleTask();
+
+    void cancelTask();
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
new file mode 100644
index 000000000..7a9f28c01
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClient.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.LinkedList;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.function.Consumer;
+import java.util.function.Function;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.discovery.DiscoveryServiceGrpc;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfos;
+import org.apache.hugegraph.pd.grpc.discovery.Query;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterInfo;
+
+import io.grpc.ManagedChannel;
+import io.grpc.ManagedChannelBuilder;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public abstract class DiscoveryClient implements Closeable, Discoverable {
+
+    private final Timer timer = new Timer("serverHeartbeat", true);
+    private final AtomicBoolean requireResetStub = new AtomicBoolean(false);
+    protected int period; //心跳周期
+    LinkedList<String> pdAddresses = new LinkedList<>();
+    ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock();
+    private volatile int currentIndex; // 当前在用pd地址位置
+    private int maxTime = 6;
+    private ManagedChannel channel = null;
+    private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub registerStub;
+    private DiscoveryServiceGrpc.DiscoveryServiceBlockingStub blockingStub;
+
+    public DiscoveryClient(String centerAddress, int delay) {
+        String[] addresses = centerAddress.split(",");
+        for (int i = 0; i < addresses.length; i++) {
+            String singleAddress = addresses[i];
+            if (singleAddress == null || singleAddress.length() <= 0) {
+                continue;
+            }
+            pdAddresses.add(addresses[i]);
+        }
+        this.period = delay;
+        if (maxTime < addresses.length) {
+            maxTime = addresses.length;
+        }
+    }
+
+    private <V, R> R tryWithTimes(Function<V, R> function, V v) {
+        R r;
+        Exception ex = null;
+        for (int i = 0; i < maxTime; i++) {
+            try {
+                r = function.apply(v);
+                return r;
+            } catch (Exception e) {
+                requireResetStub.set(true);
+                resetStub();
+                ex = e;
+            }
+        }
+        if (ex != null) {
+            log.error("Try discovery method with error: {}", ex.getMessage());
+        }
+        return null;
+    }
+
+    /***
+     * 按照pd列表重置stub
+     */
+    private void resetStub() {
+        String errLog = null;
+        for (int i = currentIndex + 1; i <= pdAddresses.size() + currentIndex; i++) {
+            currentIndex = i % pdAddresses.size();
+            String singleAddress = pdAddresses.get(currentIndex);
+            try {
+                if (requireResetStub.get()) {
+                    resetChannel(singleAddress);
+                }
+                errLog = null;
+                break;
+            } catch (Exception e) {
+                requireResetStub.set(true);
+                if (errLog == null) {
+                    errLog = e.getMessage();
+                }
+                continue;
+            }
+        }
+        if (errLog != null) {
+            log.error(errLog);
+        }
+    }
+
+    /***
+     * 按照某个pd的地址重置channel和stub
+     * @param singleAddress
+     * @throws PDException
+     */
+    private void resetChannel(String singleAddress) throws PDException {
+
+        readWriteLock.writeLock().lock();
+        try {
+            if (requireResetStub.get()) {
+                while (channel != null && !channel.shutdownNow().awaitTermination(
+                        100, TimeUnit.MILLISECONDS)) {
+                    continue;
+                }
+                channel = ManagedChannelBuilder.forTarget(
+                        singleAddress).usePlaintext().build();
+                this.registerStub = DiscoveryServiceGrpc.newBlockingStub(
+                        channel);
+                this.blockingStub = DiscoveryServiceGrpc.newBlockingStub(
+                        channel);
+                requireResetStub.set(false);
+            }
+        } catch (Exception e) {
+            throw new PDException(-1, String.format(
+                    "Reset channel with error : %s.", e.getMessage()));
+        } finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+
+    /***
+     * 获取注册节点信息
+     * @param query
+     * @return
+     */
+    @Override
+    public NodeInfos getNodeInfos(Query query) {
+        return tryWithTimes((q) -> {
+            this.readWriteLock.readLock().lock();
+            NodeInfos nodes;
+            try {
+                nodes = this.blockingStub.getNodes(q);
+            } catch (Exception e) {
+                throw e;
+            } finally {
+                this.readWriteLock.readLock().unlock();
+            }
+            return nodes;
+        }, query);
+    }
+
+    /***
+     * 启动心跳任务
+     */
+    @Override
+    public void scheduleTask() {
+        timer.schedule(new TimerTask() {
+            @Override
+            public void run() {
+                NodeInfo nodeInfo = getRegisterNode();
+                tryWithTimes((t) -> {
+                    RegisterInfo register;
+                    readWriteLock.readLock().lock();
+                    try {
+                        register = registerStub.register(t);
+                        log.debug("Discovery Client work done.");
+                        Consumer<RegisterInfo> consumer = getRegisterConsumer();
+                        if (consumer != null) {
+                            consumer.accept(register);
+                        }
+                    } catch (Exception e) {
+                        throw e;
+                    } finally {
+                        readWriteLock.readLock().unlock();
+                    }
+                    return register;
+                }, nodeInfo);
+            }
+        }, 0, period);
+    }
+
+    abstract NodeInfo getRegisterNode();
+
+    abstract Consumer<RegisterInfo> getRegisterConsumer();
+
+    @Override
+    public void cancelTask() {
+        this.timer.cancel();
+    }
+
+    @Override
+    public void close() {
+        this.timer.cancel();
+        readWriteLock.writeLock().lock();
+        try {
+            while (channel != null && !channel.shutdownNow().awaitTermination(
+                    100, TimeUnit.MILLISECONDS)) {
+                continue;
+            }
+        } catch (Exception e) {
+            log.info("Close channel with error : {}.", e);
+        } finally {
+            readWriteLock.writeLock().unlock();
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
new file mode 100644
index 000000000..0ded328c1
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/DiscoveryClientImpl.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.util.Map;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.grpc.discovery.NodeInfo;
+import org.apache.hugegraph.pd.grpc.discovery.RegisterType;
+
+public class DiscoveryClientImpl extends DiscoveryClient {
+
+    private final String id;
+    private final RegisterType type; // 心跳类型,备用
+    private final String version;
+    private final String appName;
+    private final int times; // 心跳过期次数,备用
+    private final String address;
+    private final Map labels;
+    private final Consumer registerConsumer;
+
+    private DiscoveryClientImpl(Builder builder) {
+        super(builder.centerAddress, builder.delay);
+        period = builder.delay;
+        id = builder.id;
+        type = builder.type;
+        version = builder.version;
+        appName = builder.appName;
+        times = builder.times;
+        address = builder.address;
+        labels = builder.labels;
+        registerConsumer = builder.registerConsumer;
+    }
+
+    public static Builder newBuilder() {
+        return new Builder();
+    }
+
+    @Override
+    NodeInfo getRegisterNode() {
+        return NodeInfo.newBuilder().setAddress(this.address)
+                       .setVersion(this.version)
+                       .setAppName(this.appName).setInterval(this.period)
+                       .setId(this.id).putAllLabels(labels).build();
+    }
+
+    @Override
+    Consumer getRegisterConsumer() {
+        return registerConsumer;
+    }
+
+    public static final class Builder {
+
+        private int delay;
+        private String centerAddress;
+        private String id;
+        private RegisterType type;
+        private String address;
+        private Map labels;
+        private String version;
+        private String appName;
+        private int times;
+        private Consumer registerConsumer;
+
+        private Builder() {
+        }
+
+        public Builder setDelay(int val) {
+            delay = val;
+            return this;
+        }
+
+        public Builder setCenterAddress(String val) {
+            centerAddress = val;
+            return this;
+        }
+
+        public Builder setId(String val) {
+            id = val;
+            return this;
+        }
+
+        public Builder setType(RegisterType val) {
+            type = val;
+            return this;
+        }
+
+        public Builder setAddress(String val) {
+            address = val;
+            return this;
+        }
+
+        public Builder setLabels(Map val) {
+            labels = val;
+            return this;
+        }
+
+        public Builder setVersion(String val) {
+            version = val;
+            return this;
+        }
+
+        public Builder setAppName(String val) {
+            appName = val;
+            return this;
+        }
+
+        public Builder setTimes(int val) {
+            times = val;
+            return this;
+        }
+
+        public Builder setRegisterConsumer(Consumer registerConsumer) {
+            this.registerConsumer = registerConsumer;
+            return this;
+        }
+
+        public DiscoveryClientImpl build() {
+            return new DiscoveryClientImpl(this);
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java
new file mode 100644
index 000000000..7e0795b2e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/KvClient.java
@@ -0,0 +1,343 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.BiConsumer;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.grpc.kv.K;
+import org.apache.hugegraph.pd.grpc.kv.KResponse;
+import org.apache.hugegraph.pd.grpc.kv.Kv;
+import org.apache.hugegraph.pd.grpc.kv.KvResponse;
+import org.apache.hugegraph.pd.grpc.kv.KvServiceGrpc;
+import org.apache.hugegraph.pd.grpc.kv.LockRequest;
+import org.apache.hugegraph.pd.grpc.kv.LockResponse;
+import org.apache.hugegraph.pd.grpc.kv.ScanPrefixResponse;
+import org.apache.hugegraph.pd.grpc.kv.TTLRequest;
+import org.apache.hugegraph.pd.grpc.kv.TTLResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchEvent;
+import org.apache.hugegraph.pd.grpc.kv.WatchKv;
+import org.apache.hugegraph.pd.grpc.kv.WatchRequest;
+import org.apache.hugegraph.pd.grpc.kv.WatchResponse;
+import org.apache.hugegraph.pd.grpc.kv.WatchType;
+
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class KvClient<T extends WatchResponse> extends AbstractClient implements Closeable {
+
+    private final AtomicLong clientId = new AtomicLong(0);
+    private final Semaphore semaphore = new Semaphore(1);
+    private final ConcurrentHashMap<Long, StreamObserver> observers = new ConcurrentHashMap<>();
+
+    public KvClient(PDConfig pdConfig) {
+        super(pdConfig);
+    }
+
+    @Override
+    protected AbstractStub createStub() {
+        return KvServiceGrpc.newStub(channel);
+    }
+
+    @Override
+    protected AbstractBlockingStub createBlockingStub() {
+        return KvServiceGrpc.newBlockingStub(channel);
+    }
+
+    public KvResponse put(String key, String value) throws PDException {
+        Kv kv = Kv.newBuilder().setKey(key).setValue(value).build();
+        KvResponse response = blockingUnaryCall(KvServiceGrpc.getPutMethod(), kv);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public KResponse get(String key) throws PDException {
+        K k = K.newBuilder().setKey(key).build();
+        KResponse response = blockingUnaryCall(KvServiceGrpc.getGetMethod(), k);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public KvResponse delete(String key) throws PDException {
+        K k = K.newBuilder().setKey(key).build();
+        KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeleteMethod(), k);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public KvResponse deletePrefix(String prefix) throws PDException {
+        K k = K.newBuilder().setKey(prefix).build();
+        KvResponse response = blockingUnaryCall(KvServiceGrpc.getDeletePrefixMethod(), k);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public ScanPrefixResponse scanPrefix(String prefix) throws PDException {
+        K k = K.newBuilder().setKey(prefix).build();
+        ScanPrefixResponse response = blockingUnaryCall(KvServiceGrpc.getScanPrefixMethod(), k);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public TTLResponse keepTTLAlive(String key) throws PDException {
+        TTLRequest request = TTLRequest.newBuilder().setKey(key).build();
+        TTLResponse response = blockingUnaryCall(KvServiceGrpc.getKeepTTLAliveMethod(), request);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public TTLResponse putTTL(String key, String value, long ttl) throws PDException {
+        TTLRequest request =
+                TTLRequest.newBuilder().setKey(key).setValue(value).setTtl(ttl).build();
+        TTLResponse response = blockingUnaryCall(KvServiceGrpc.getPutTTLMethod(), request);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    private void onEvent(WatchResponse value, Consumer<T> consumer) {
+        log.info("receive message for {},event Count:{}", value, value.getEventsCount());
+        clientId.compareAndSet(0L, value.getClientId());
+        if (value.getEventsCount() != 0) {
+            consumer.accept((T) value);
+        }
+    }
+
+    private StreamObserver<WatchResponse> getObserver(String key, Consumer<T> consumer,
+                                                      BiConsumer<String, Consumer> listenWrapper,
+                                                      long client) {
+        StreamObserver<WatchResponse> observer;
+        if ((observer = observers.get(client)) == null) {
+            synchronized (this) {
+                if ((observer = observers.get(client)) == null) {
+                    observer = getObserver(key, consumer, listenWrapper);
+                    observers.put(client, observer);
+                }
+            }
+        }
+        return observer;
+    }
+
+    private StreamObserver<WatchResponse> getObserver(String key, Consumer<T> consumer,
+                                                      BiConsumer<String, Consumer> listenWrapper) {
+        return new StreamObserver<WatchResponse>() {
+            @Override
+            public void onNext(WatchResponse value) {
+                switch (value.getState()) {
+                    case Starting:
+                        boolean b = clientId.compareAndSet(0, value.getClientId());
+                        if (b) {
+                            observers.put(value.getClientId(), this);
+                            log.info("set watch client id to :{}", value.getClientId());
+                        }
+                        semaphore.release();
+                        break;
+                    case Started:
+                        onEvent(value, consumer);
+                        break;
+                    case Leader_Changed:
+                        listenWrapper.accept(key, consumer);
+                        break;
+                    case Alive:
+                        // only for check client is alive, do nothing
+                        break;
+                    default:
+                        break;
+                }
+            }
+
+            @Override
+            public void onError(Throwable t) {
+                listenWrapper.accept(key, consumer);
+            }
+
+            @Override
+            public void onCompleted() {
+
+            }
+        };
+    }
+
+    public void listen(String key, Consumer<T> consumer) throws PDException {
+        long value = clientId.get();
+        StreamObserver<WatchResponse> observer = getObserver(key, consumer, listenWrapper, value);
+        acquire();
+        WatchRequest k = WatchRequest.newBuilder().setClientId(value).setKey(key).build();
+        streamingCall(KvServiceGrpc.getWatchMethod(), k, observer, 1);
+    }
+
+    public void listenPrefix(String prefix, Consumer<T> consumer) throws PDException {
+        long value = clientId.get();
+        StreamObserver<WatchResponse> observer =
+                getObserver(prefix, consumer, prefixListenWrapper, value);
+        acquire();
+        WatchRequest k =
+                WatchRequest.newBuilder().setClientId(clientId.get()).setKey(prefix).build();
+        streamingCall(KvServiceGrpc.getWatchPrefixMethod(), k, observer, 1);
+    }
+
+    private void acquire() {
+        if (clientId.get() == 0L) {
+            try {
+                semaphore.acquire();
+                if (clientId.get() != 0L) {
+                    semaphore.release();
+                }
+            } catch (InterruptedException e) {
+                log.error("get semaphore with error:", e);
+            }
+        }
+    }
+
+    public List<String> getWatchList(T response) {
+        List<String> values = new LinkedList<>();
+        List<WatchEvent> eventsList = response.getEventsList();
+        for (WatchEvent event : eventsList) {
+            if (event.getType() != WatchType.Put) {
+                return null;
+            }
+            String value = event.getCurrent().getValue();
+            values.add(value);
+        }
+        return values;
+    }
+
+    public Map<String, String> getWatchMap(T response) {
+        Map<String, String> values = new HashMap<>();
+        List<WatchEvent> eventsList = response.getEventsList();
+        for (WatchEvent event : eventsList) {
+            if (event.getType() != WatchType.Put) {
+                return null;
+            }
+            WatchKv current = event.getCurrent();
+            String key = current.getKey();
+            String value = current.getValue();
+            values.put(key, value);
+        }
+        return values;
+    }
+
+    public LockResponse lock(String key, long ttl) throws PDException {
+        acquire();
+        LockResponse response;
+        try {
+            LockRequest k =
+                    LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl)
+                               .build();
+            response = blockingUnaryCall(KvServiceGrpc.getLockMethod(), k);
+            handleErrors(response.getHeader());
+            if (clientId.compareAndSet(0L, response.getClientId())) {
+                semaphore.release();
+            }
+        } catch (Exception e) {
+            if (clientId.get() == 0L) {
+                semaphore.release();
+            }
+            throw e;
+        }
+        return response;
+    }
+
+    public LockResponse lockWithoutReentrant(String key, long ttl) throws PDException {
+        acquire();
+        LockResponse response;
+        try {
+            LockRequest k =
+                    LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).setTtl(ttl)
+                               .build();
+            response = blockingUnaryCall(KvServiceGrpc.getLockWithoutReentrantMethod(), k);
+            handleErrors(response.getHeader());
+            if (clientId.compareAndSet(0L, response.getClientId())) {
+                semaphore.release();
+            }
+        } catch (Exception e) {
+            if (clientId.get() == 0L) {
+                semaphore.release();
+            }
+            throw e;
+        }
+        return response;
+    }
+
+    public LockResponse isLocked(String key) throws PDException {
+        LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build();
+        LockResponse response = blockingUnaryCall(KvServiceGrpc.getIsLockedMethod(), k);
+        handleErrors(response.getHeader());
+        return response;
+    }
+
+    public LockResponse unlock(String key) throws PDException {
+        assert clientId.get() != 0;
+        LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build();
+        LockResponse response = blockingUnaryCall(KvServiceGrpc.getUnlockMethod(), k);
+        handleErrors(response.getHeader());
+        clientId.compareAndSet(0L, response.getClientId());
+        assert clientId.get() == response.getClientId();
+        return response;
+    }
+
+    public LockResponse keepAlive(String key) throws PDException {
+        assert clientId.get() != 0;
+        LockRequest k = LockRequest.newBuilder().setKey(key).setClientId(clientId.get()).build();
+        LockResponse response = blockingUnaryCall(KvServiceGrpc.getKeepAliveMethod(), k);
+        handleErrors(response.getHeader());
+        clientId.compareAndSet(0L, response.getClientId());
+        assert clientId.get() == response.getClientId();
+        return response;
+    }
+
+    @Override
+    public void close() {
+        super.close();
+    }
+
+    BiConsumer<String, Consumer> listenWrapper = (key, consumer) -> {
+        try {
+            listen(key, consumer);
+        } catch (PDException e) {
+            try {
+                log.warn("start listen with warning:", e);
+                Thread.sleep(1000);
+            } catch (InterruptedException ex) {
+            }
+        }
+    };
+
+    BiConsumer<String, Consumer> prefixListenWrapper = (key, consumer) -> {
+        try {
+            listenPrefix(key, consumer);
+        } catch (PDException e) {
+            try {
+                log.warn("start listenPrefix with warning:", e);
+                Thread.sleep(1000);
+            } catch (InterruptedException ex) {
+            }
+        }
+    };
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
new file mode 100644
index 000000000..a96185e5a
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/LicenseClient.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.stub.AbstractBlockingStub;
+import io.grpc.stub.AbstractStub;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class LicenseClient extends AbstractClient {
+
+    public LicenseClient(PDConfig config) {
+        super(config);
+    }
+
+    @Override
+    protected AbstractStub createStub() {
+        return PDGrpc.newStub(channel);
+    }
+
+    @Override
+    protected AbstractBlockingStub createBlockingStub() {
+        return PDGrpc.newBlockingStub(channel);
+    }
+
+    public Pdpb.PutLicenseResponse putLicense(byte[] content) {
+        Pdpb.PutLicenseRequest request = Pdpb.PutLicenseRequest.newBuilder()
+                                                               .setContent(
+                                                                       ByteString.copyFrom(content))
+                                                               .build();
+        try {
+            KVPair<Boolean, Pdpb.PutLicenseResponse> pair = concurrentBlockingUnaryCall(
+                    PDGrpc.getPutLicenseMethod(), request,
+                    (rs) -> rs.getHeader().getError().getType().equals(Pdpb.ErrorType.OK));
+            if (pair.getKey()) {
+                Pdpb.PutLicenseResponse.Builder builder = Pdpb.PutLicenseResponse.newBuilder();
+                builder.setHeader(okHeader);
+                return builder.build();
+            } else {
+                return pair.getValue();
+            }
+        } catch (Exception e) {
+            e.printStackTrace();
+            log.debug("put license with error:{} ", e);
+            Pdpb.ResponseHeader rh =
+                    newErrorHeader(Pdpb.ErrorType.LICENSE_ERROR_VALUE, e.getMessage());
+            return Pdpb.PutLicenseResponse.newBuilder().setHeader(rh).build();
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
new file mode 100644
index 000000000..6c3eae425
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDClient.java
@@ -0,0 +1,1347 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import static org.apache.hugegraph.pd.watch.NodeEvent.EventType.NODE_PD_LEADER_CHANGE;
+
+import java.util.ArrayList;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hugegraph.pd.common.KVPair;
+import org.apache.hugegraph.pd.common.PDException;
+import org.apache.hugegraph.pd.common.PartitionUtils;
+import org.apache.hugegraph.pd.grpc.MetaTask;
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.apache.hugegraph.pd.grpc.Metapb.ShardGroup;
+import org.apache.hugegraph.pd.grpc.PDGrpc;
+import org.apache.hugegraph.pd.grpc.Pdpb;
+import org.apache.hugegraph.pd.grpc.Pdpb.CachePartitionResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.CacheResponse;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetGraphRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionByCodeRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionRequest;
+import org.apache.hugegraph.pd.grpc.Pdpb.GetPartitionResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.PartitionEvent;
+
+import com.google.protobuf.ByteString;
+
+import io.grpc.ManagedChannel;
+import io.grpc.MethodDescriptor;
+import io.grpc.StatusRuntimeException;
+import io.grpc.stub.AbstractBlockingStub;
+import lombok.extern.slf4j.Slf4j;
+
+/**
+ * PD客户端实现类
+ */
+@Slf4j
+public class PDClient {
+
+    private final PDConfig config;
+    private final Pdpb.RequestHeader header;
+    private final ClientCache cache;
+    private final StubProxy stubProxy;
+    private final List<PDEventListener> eventListeners;
+    private PDWatch.Watcher partitionWatcher;
+    private PDWatch.Watcher storeWatcher;
+    private PDWatch.Watcher graphWatcher;
+    private PDWatch.Watcher shardGroupWatcher;
+    private PDWatch pdWatch;
+
+    private PDClient(PDConfig config) {
+        this.config = config;
+        this.header = Pdpb.RequestHeader.getDefaultInstance();
+        this.stubProxy = new StubProxy(config.getServerHost().split(","));
+        this.eventListeners = new CopyOnWriteArrayList<>();
+        this.cache = new ClientCache(this);
+    }
+
+    /**
+     * 创建PDClient对象,并初始化stub
+     *
+     * @param config
+     * @return
+     */
+    public static PDClient create(PDConfig config) {
+        return new PDClient(config);
+    }
+
+    private synchronized void newBlockingStub() throws PDException {
+        if (stubProxy.get() != null) {
+            return;
+        }
+
+        String host = newLeaderStub();
+        if (host.isEmpty()) {
+            throw new PDException(Pdpb.ErrorType.PD_UNREACHABLE_VALUE,
+                                  "PD unreachable, pd.peers=" + config.getServerHost());
+        }
+
+        log.info("PDClient enable cache, init PDWatch object");
+        connectPdWatch(host);
+    }
+
+    public void connectPdWatch(String leader) {
+
+        if (pdWatch != null && Objects.equals(pdWatch.getCurrentHost(), leader) &&
+            pdWatch.checkChannel()) {
+            return;
+        }
+
+        log.info("PDWatch client connect host:{}", leader);
+        pdWatch = new PDWatchImpl(leader);
+
+        partitionWatcher = pdWatch.watchPartition(new PDWatch.Listener<>() {
+            @Override
+            public void onNext(PartitionEvent response) {
+                // log.info("PDClient receive partition event {}-{} {}",
+                //        response.getGraph(), response.getPartitionId(), response.getChangeType());
+                invalidPartitionCache(response.getGraph(), response.getPartitionId());
+
+                if (response.getChangeType() == PartitionEvent.ChangeType.DEL) {
+                    cache.removeAll(response.getGraph());
+                }
+
+                eventListeners.forEach(listener -> {
+                    listener.onPartitionChanged(response);
+                });
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                log.error("watchPartition exception {}", throwable.getMessage());
+                closeStub(false);
+            }
+        });
+
+        storeWatcher = pdWatch.watchNode(new PDWatch.Listener<>() {
+            @Override
+            public void onNext(NodeEvent response) {
+                log.info("PDClient receive store event {} {}",
+                         response.getEventType(), Long.toHexString(response.getNodeId()));
+
+                if (response.getEventType() == NODE_PD_LEADER_CHANGE) {
+                    // pd raft change
+                    var leaderIp = response.getGraph();
+                    log.info("watchNode: pd leader changed to {}, current watch:{}",
+                             leaderIp, pdWatch.getCurrentHost());
+                    closeStub(!Objects.equals(pdWatch.getCurrentHost(), leaderIp));
+                    connectPdWatch(leaderIp);
+                }
+
+                invalidStoreCache(response.getNodeId());
+                eventListeners.forEach(listener -> {
+                    listener.onStoreChanged(response);
+                });
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                log.error("watchNode exception {}", throwable.getMessage());
+                closeStub(false);
+            }
+
+        });
+
+        graphWatcher = pdWatch.watchGraph(new PDWatch.Listener<>() {
+            @Override
+            public void onNext(WatchResponse response) {
+                eventListeners.forEach(listener -> {
+                    listener.onGraphChanged(response);
+                });
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                log.warn("graphWatcher exception {}", throwable.getMessage());
+            }
+        });
+
+        shardGroupWatcher = pdWatch.watchShardGroup(new PDWatch.Listener<>() {
+            @Override
+            public void onNext(WatchResponse response) {
+                var shardResponse = response.getShardGroupResponse();
+                // log.info("PDClient receive shard group event: raft {}-{}", shardResponse
+                // .getShardGroupId(),
+                //        shardResponse.getType());
+                if (config.isEnableCache()) {
+                    switch (shardResponse.getType()) {
+                        case WATCH_CHANGE_TYPE_DEL:
+                            cache.deleteShardGroup(shardResponse.getShardGroupId());
+                            break;
+                        case WATCH_CHANGE_TYPE_ALTER:
+                            cache.updateShardGroup(
+                                    response.getShardGroupResponse().getShardGroup());
+                            break;
+                        default:
+                            break;
+                    }
+                }
+                eventListeners.forEach(listener -> listener.onShardGroupChanged(response));
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                log.warn("shardGroupWatcher exception {}", throwable.getMessage());
+            }
+        });
+
+    }
+
+    private synchronized void closeStub(boolean closeWatcher) {
+        // TODO ManagedChannel  没有正常关闭
+        stubProxy.set(null);
+        cache.reset();
+
+        if (closeWatcher) {
+            if (partitionWatcher != null) {
+                partitionWatcher.close();
+                partitionWatcher = null;
+            }
+            if (storeWatcher != null) {
+                storeWatcher.close();
+                storeWatcher = null;
+            }
+            if (graphWatcher != null) {
+                graphWatcher.close();
+                graphWatcher = null;
+            }
+
+            if (shardGroupWatcher != null) {
+                shardGroupWatcher.close();
+                shardGroupWatcher = null;
+            }
+
+            pdWatch = null;
+        }
+    }
+
+    private PDGrpc.PDBlockingStub getStub() throws PDException {
+        if (stubProxy.get() == null) {
+            newBlockingStub();
+        }
+        return stubProxy.get().withDeadlineAfter(config.getGrpcTimeOut(), TimeUnit.MILLISECONDS);
+    }
+
+    private PDGrpc.PDBlockingStub newStub() throws PDException {
+        if (stubProxy.get() == null) {
+            newBlockingStub();
+        }
+        return PDGrpc.newBlockingStub(stubProxy.get().getChannel())
+                     .withDeadlineAfter(config.getGrpcTimeOut(),
+                                        TimeUnit.MILLISECONDS);
+    }
+
+    private String newLeaderStub() {
+        String leaderHost = "";
+        for (int i = 0; i < stubProxy.getHostCount(); i++) {
+            String host = stubProxy.nextHost();
+            ManagedChannel channel = Channels.getChannel(host);
+
+            PDGrpc.PDBlockingStub stub = PDGrpc.newBlockingStub(channel)
+                                               .withDeadlineAfter(config.getGrpcTimeOut(),
+                                                                  TimeUnit.MILLISECONDS);
+            try {
+                var leaderIp = getLeaderIp(stub);
+                if (!leaderIp.equalsIgnoreCase(host)) {
+                    leaderHost = leaderIp;
+                    stubProxy.set(PDGrpc.newBlockingStub(channel)
+                                        .withDeadlineAfter(config.getGrpcTimeOut(),
+                                                           TimeUnit.MILLISECONDS));
+                } else {
+                    stubProxy.set(stub);
+                    leaderHost = host;
+                }
+                stubProxy.setLeader(leaderIp);
+
+                log.info("PDClient connect to host = {} success", leaderHost);
+                break;
+            } catch (Exception e) {
+                log.error("PDClient connect to {} exception {}, {}", host, e.getMessage(),
+                          e.getCause() != null ? e.getCause().getMessage() : "");
+            }
+        }
+        return leaderHost;
+    }
+
+    public String getLeaderIp() {
+
+        return getLeaderIp(stubProxy.get());
+    }
+
+    private String getLeaderIp(PDGrpc.PDBlockingStub stub) {
+        if (stub == null) {
+            try {
+                getStub();
+                return stubProxy.getLeader();
+            } catch (PDException e) {
+                throw new RuntimeException(e);
+            }
+        }
+
+        Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+                                                               .setHeader(header)
+                                                               .build();
+        Metapb.Member leader = stub.getMembers(request).getLeader();
+        return leader.getGrpcUrl();
+    }
+
+    /**
+     * Store注册,返回storeID,初次注册会返回新ID
+     *
+     * @param store
+     * @return
+     */
+    public long registerStore(Metapb.Store store) throws PDException {
+        Pdpb.RegisterStoreRequest request = Pdpb.RegisterStoreRequest.newBuilder()
+                                                                     .setHeader(header)
+                                                                     .setStore(store).build();
+
+        Pdpb.RegisterStoreResponse response =
+                blockingUnaryCall(PDGrpc.getRegisterStoreMethod(), request);
+        handleResponseError(response.getHeader());
+        return response.getStoreId();
+    }
+
+    /**
+     * 根据storeId返回Store对象
+     *
+     * @param storeId
+     * @return
+     * @throws PDException
+     */
+    public Metapb.Store getStore(long storeId) throws PDException {
+        Metapb.Store store = cache.getStoreById(storeId);
+        if (store == null) {
+            Pdpb.GetStoreRequest request = Pdpb.GetStoreRequest.newBuilder()
+                                                               .setHeader(header)
+                                                               .setStoreId(storeId).build();
+            Pdpb.GetStoreResponse response = getStub().getStore(request);
+            handleResponseError(response.getHeader());
+            store = response.getStore();
+            if (config.isEnableCache()) {
+                cache.addStore(storeId, store);
+            }
+        }
+        return store;
+    }
+
+    /**
+     * 更新Store信息,包括上下线等
+     *
+     * @param store
+     * @return
+     */
+    public Metapb.Store updateStore(Metapb.Store store) throws PDException {
+        Pdpb.SetStoreRequest request = Pdpb.SetStoreRequest.newBuilder()
+                                                           .setHeader(header)
+                                                           .setStore(store).build();
+
+        Pdpb.SetStoreResponse response = getStub().setStore(request);
+        handleResponseError(response.getHeader());
+        store = response.getStore();
+        if (config.isEnableCache()) {
+            cache.addStore(store.getId(), store);
+        }
+        return store;
+    }
+
+    /**
+     * 返回活跃的Store
+     *
+     * @param graphName
+     * @return
+     */
+    public List<Metapb.Store> getActiveStores(String graphName) throws PDException {
+        List<Metapb.Store> stores = new ArrayList<>();
+        KVPair<Metapb.Partition, Metapb.Shard> ptShard = this.getPartitionByCode(graphName, 0);
+        while (ptShard != null) {
+            stores.add(this.getStore(ptShard.getValue().getStoreId()));
+            if (ptShard.getKey().getEndKey() < PartitionUtils.MAX_VALUE) {
+                ptShard = this.getPartitionByCode(graphName, ptShard.getKey().getEndKey());
+            } else {
+                ptShard = null;
+            }
+        }
+        return stores;
+    }
+
+    public List<Metapb.Store> getActiveStores() throws PDException {
+        Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+                                                                   .setHeader(header)
+                                                                   .setGraphName("")
+                                                                   .setExcludeOfflineStores(true)
+                                                                   .build();
+        Pdpb.GetAllStoresResponse response = getStub().getAllStores(request);
+        handleResponseError(response.getHeader());
+        return response.getStoresList();
+
+    }
+
+    /**
+     * 返回活跃的Store
+     *
+     * @param graphName
+     * @return
+     */
+    public List<Metapb.Store> getAllStores(String graphName) throws PDException {
+        Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+                                                                   .setHeader(header)
+                                                                   .setGraphName(graphName)
+                                                                   .setExcludeOfflineStores(false)
+                                                                   .build();
+        Pdpb.GetAllStoresResponse response = getStub().getAllStores(request);
+        handleResponseError(response.getHeader());
+        return response.getStoresList();
+
+    }
+
+    /**
+     * Store心跳,定期调用,保持在线状态
+     *
+     * @param stats
+     * @throws PDException
+     */
+    public Metapb.ClusterStats storeHeartbeat(Metapb.StoreStats stats) throws PDException {
+        Pdpb.StoreHeartbeatRequest request = Pdpb.StoreHeartbeatRequest.newBuilder()
+                                                                       .setHeader(header)
+                                                                       .setStats(stats).build();
+        Pdpb.StoreHeartbeatResponse response = getStub().storeHeartbeat(request);
+        handleResponseError(response.getHeader());
+        return response.getClusterStats();
+    }
+
+    private KVPair<Metapb.Partition, Metapb.Shard> getKvPair(String graphName, byte[] key,
+                                                             KVPair<Metapb.Partition,
+                                                                     Metapb.Shard> partShard) throws
+                                                                                              PDException {
+        if (partShard == null) {
+            GetPartitionRequest request = GetPartitionRequest.newBuilder()
+                                                             .setHeader(header)
+                                                             .setGraphName(graphName)
+                                                             .setKey(ByteString.copyFrom(key))
+                                                             .build();
+            GetPartitionResponse response =
+                    blockingUnaryCall(PDGrpc.getGetPartitionMethod(), request);
+            handleResponseError(response.getHeader());
+            partShard = new KVPair<>(response.getPartition(), response.getLeader());
+            cache.update(graphName, partShard.getKey().getId(), partShard.getKey());
+        }
+        return partShard;
+    }
+
+    /**
+     * 查询Key所属分区信息
+     *
+     * @param graphName
+     * @param key
+     * @return
+     * @throws PDException
+     */
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartition(String graphName, byte[] key) throws
+                                                                                             PDException {
+        // 先查cache,cache没有命中,在调用PD
+        KVPair<Metapb.Partition, Metapb.Shard> partShard = cache.getPartitionByKey(graphName, key);
+        partShard = getKvPair(graphName, key, partShard);
+        return partShard;
+    }
+
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartition(String graphName, byte[] key,
+                                                               int code) throws
+                                                                         PDException {
+        KVPair<Metapb.Partition, Metapb.Shard> partShard =
+                cache.getPartitionByCode(graphName, code);
+        partShard = getKvPair(graphName, key, partShard);
+        return partShard;
+    }
+
+    /**
+     * 根据hashcode查询所属分区信息
+     *
+     * @param graphName
+     * @param hashCode
+     * @return
+     * @throws PDException
+     */
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartitionByCode(String graphName,
+                                                                     long hashCode)
+            throws PDException {
+        // 先查cache,cache没有命中,在调用PD
+        KVPair<Metapb.Partition, Metapb.Shard> partShard =
+                cache.getPartitionByCode(graphName, hashCode);
+        if (partShard == null) {
+            GetPartitionByCodeRequest request = GetPartitionByCodeRequest.newBuilder()
+                                                                         .setHeader(header)
+                                                                         .setGraphName(graphName)
+                                                                         .setCode(hashCode).build();
+            GetPartitionResponse response =
+                    blockingUnaryCall(PDGrpc.getGetPartitionByCodeMethod(), request);
+            handleResponseError(response.getHeader());
+            partShard = new KVPair<>(response.getPartition(), response.getLeader());
+            cache.update(graphName, partShard.getKey().getId(), partShard.getKey());
+            cache.updateShardGroup(getShardGroup(partShard.getKey().getId()));
+        }
+
+        if (partShard.getValue() == null) {
+            ShardGroup shardGroup = getShardGroup(partShard.getKey().getId());
+            if (shardGroup != null) {
+                for (var shard : shardGroup.getShardsList()) {
+                    if (shard.getRole() == Metapb.ShardRole.Leader) {
+                        partShard.setValue(shard);
+                    }
+                }
+            } else {
+                log.error("getPartitionByCode: get shard group failed, {}",
+                          partShard.getKey().getId());
+            }
+        }
+        return partShard;
+    }
+
+    /**
+     * 获取Key的哈希值
+     */
+    public int keyToCode(String graphName, byte[] key) {
+        return PartitionUtils.calcHashcode(key);
+    }
+
+    /**
+     * 根据分区id返回分区信息, RPC请求
+     *
+     * @param graphName
+     * @param partId
+     * @return
+     * @throws PDException
+     */
+    public KVPair<Metapb.Partition, Metapb.Shard> getPartitionById(String graphName,
+                                                                   int partId) throws PDException {
+        KVPair<Metapb.Partition, Metapb.Shard> partShard =
+                cache.getPartitionById(graphName, partId);
+        if (partShard == null) {
+            Pdpb.GetPartitionByIDRequest request = Pdpb.GetPartitionByIDRequest.newBuilder()
+                                                                               .setHeader(header)
+                                                                               .setGraphName(
+                                                                                       graphName)
+                                                                               .setPartitionId(
+                                                                                       partId)
+                                                                               .build();
+            GetPartitionResponse response =
+                    blockingUnaryCall(PDGrpc.getGetPartitionByIDMethod(), request);
+            handleResponseError(response.getHeader());
+            partShard = new KVPair<>(response.getPartition(), response.getLeader());
+            if (config.isEnableCache()) {
+                cache.update(graphName, partShard.getKey().getId(), partShard.getKey());
+                cache.updateShardGroup(getShardGroup(partShard.getKey().getId()));
+            }
+        }
+        if (partShard.getValue() == null) {
+            var shardGroup = getShardGroup(partShard.getKey().getId());
+            if (shardGroup != null) {
+                for (var shard : shardGroup.getShardsList()) {
+                    if (shard.getRole() == Metapb.ShardRole.Leader) {
+                        partShard.setValue(shard);
+                    }
+                }
+            } else {
+                log.error("getPartitionById: get shard group failed, {}",
+                          partShard.getKey().getId());
+            }
+        }
+        return partShard;
+    }
+
+    public ShardGroup getShardGroup(int partId) throws PDException {
+        ShardGroup group = cache.getShardGroup(partId);
+        if (group == null) {
+            Pdpb.GetShardGroupRequest request = Pdpb.GetShardGroupRequest.newBuilder()
+                                                                         .setHeader(header)
+                                                                         .setGroupId(partId)
+                                                                         .build();
+            Pdpb.GetShardGroupResponse response =
+                    blockingUnaryCall(PDGrpc.getGetShardGroupMethod(), request);
+            handleResponseError(response.getHeader());
+            group = response.getShardGroup();
+            if (config.isEnableCache()) {
+                cache.updateShardGroup(group);
+            }
+        }
+        return group;
+    }
+
+    public void updateShardGroup(ShardGroup shardGroup) throws PDException {
+        Pdpb.UpdateShardGroupRequest request = Pdpb.UpdateShardGroupRequest.newBuilder()
+                                                                           .setHeader(header)
+                                                                           .setShardGroup(
+                                                                                   shardGroup)
+                                                                           .build();
+        Pdpb.UpdateShardGroupResponse response =
+                blockingUnaryCall(PDGrpc.getUpdateShardGroupMethod(), request);
+        handleResponseError(response.getHeader());
+
+        if (config.isEnableCache()) {
+            cache.updateShardGroup(shardGroup);
+        }
+    }
+
+    /**
+     * 返回startKey和endKey跨越的所有分区信息
+     *
+     * @param graphName
+     * @param startKey
+     * @param endKey
+     * @return
+     * @throws PDException
+     */
+    public List<KVPair<Metapb.Partition, Metapb.Shard>> scanPartitions(String graphName,
+                                                                       byte[] startKey,
+                                                                       byte[] endKey) throws
+                                                                                      PDException {
+        List<KVPair<Metapb.Partition, Metapb.Shard>> partitions = new ArrayList<>();
+        KVPair<Metapb.Partition, Metapb.Shard> startPartShard = getPartition(graphName, startKey);
+        KVPair<Metapb.Partition, Metapb.Shard> endPartShard = getPartition(graphName, endKey);
+        if (startPartShard == null || endPartShard == null) {
+            return null;
+        }
+
+        partitions.add(startPartShard);
+        while (startPartShard.getKey().getEndKey() < endPartShard.getKey().getEndKey()
+               && startPartShard.getKey().getEndKey() <
+                  PartitionUtils.MAX_VALUE /*排除最后一个分区*/) {
+            startPartShard = getPartitionByCode(graphName, startPartShard.getKey().getEndKey());
+            partitions.add(startPartShard);
+        }
+        return partitions;
+    }
+
+    /**
+     * 根据条件查询分区信息
+     *
+     * @return
+     * @throws PDException
+     */
+    public List<Metapb.Partition> getPartitionsByStore(long storeId) throws PDException {
+
+        Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder()
+                                                           .setStoreId(storeId)
+                                                           .build();
+        Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder()
+                                                                         .setQuery(query).build();
+        Pdpb.QueryPartitionsResponse response =
+                blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request);
+
+        handleResponseError(response.getHeader());
+        return response.getPartitionsList();
+    }
+
+    /**
+     * 查找指定store上的指定partitionId
+     *
+     * @return
+     * @throws PDException
+     */
+    public List<Metapb.Partition> queryPartitions(long storeId, int partitionId) throws
+                                                                                 PDException {
+
+        Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder()
+                                                           .setStoreId(storeId)
+                                                           .setPartitionId(partitionId)
+                                                           .build();
+        Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder()
+                                                                         .setQuery(query).build();
+        Pdpb.QueryPartitionsResponse response =
+                blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request);
+
+        handleResponseError(response.getHeader());
+        return response.getPartitionsList();
+    }
+
+    public List<Metapb.Partition> getPartitions(long storeId, String graphName) throws PDException {
+
+        Metapb.PartitionQuery query = Metapb.PartitionQuery.newBuilder()
+                                                           .setStoreId(storeId)
+                                                           .setGraphName(graphName).build();
+        Pdpb.QueryPartitionsRequest request = Pdpb.QueryPartitionsRequest.newBuilder()
+                                                                         .setQuery(query).build();
+        Pdpb.QueryPartitionsResponse response =
+                blockingUnaryCall(PDGrpc.getQueryPartitionsMethod(), request);
+
+        handleResponseError(response.getHeader());
+        return response.getPartitionsList();
+
+    }
+
+    public Metapb.Graph setGraph(Metapb.Graph graph) throws PDException {
+        Pdpb.SetGraphRequest request = Pdpb.SetGraphRequest.newBuilder()
+                                                           .setGraph(graph)
+                                                           .build();
+        Pdpb.SetGraphResponse response =
+                blockingUnaryCall(PDGrpc.getSetGraphMethod(), request);
+
+        handleResponseError(response.getHeader());
+        return response.getGraph();
+    }
+
+    public Metapb.Graph getGraph(String graphName) throws PDException {
+        GetGraphRequest request = GetGraphRequest.newBuilder()
+                                                 .setGraphName(graphName)
+                                                 .build();
+        Pdpb.GetGraphResponse response =
+                blockingUnaryCall(PDGrpc.getGetGraphMethod(), request);
+
+        handleResponseError(response.getHeader());
+        return response.getGraph();
+    }
+
+    public Metapb.Graph getGraphWithOutException(String graphName) throws
+                                                                   PDException {
+        GetGraphRequest request = GetGraphRequest.newBuilder()
+                                                 .setGraphName(
+                                                         graphName)
+                                                 .build();
+        Pdpb.GetGraphResponse response = blockingUnaryCall(
+                PDGrpc.getGetGraphMethod(), request);
+        return response.getGraph();
+    }
+
+    public Metapb.Graph delGraph(String graphName) throws PDException {
+        Pdpb.DelGraphRequest request = Pdpb.DelGraphRequest.newBuilder()
+                                                           .setGraphName(graphName)
+                                                           .build();
+        Pdpb.DelGraphResponse response =
+                blockingUnaryCall(PDGrpc.getDelGraphMethod(), request);
+
+        handleResponseError(response.getHeader());
+        return response.getGraph();
+    }
+
+    public List<Metapb.Partition> updatePartition(List<Metapb.Partition> partitions) throws
+                                                                                     PDException {
+
+        Pdpb.UpdatePartitionRequest request = Pdpb.UpdatePartitionRequest.newBuilder()
+                                                                         .addAllPartition(
+                                                                                 partitions)
+                                                                         .build();
+        Pdpb.UpdatePartitionResponse response =
+                blockingUnaryCall(PDGrpc.getUpdatePartitionMethod(), request);
+        handleResponseError(response.getHeader());
+        invalidPartitionCache();
+
+        return response.getPartitionList();
+    }
+
+    public Metapb.Partition delPartition(String graphName, int partitionId) throws PDException {
+
+        Pdpb.DelPartitionRequest request = Pdpb.DelPartitionRequest.newBuilder()
+                                                                   .setGraphName(graphName)
+                                                                   .setPartitionId(partitionId)
+                                                                   .build();
+        Pdpb.DelPartitionResponse response =
+                blockingUnaryCall(PDGrpc.getDelPartitionMethod(), request);
+
+        handleResponseError(response.getHeader());
+        invalidPartitionCache(graphName, partitionId);
+        return response.getPartition();
+    }
+
+    /**
+     * 删除分区缓存
+     */
+    public void invalidPartitionCache(String graphName, int partitionId) {
+        // 检查是否存在缓存
+        if (null != cache.getPartitionById(graphName, partitionId)) {
+            cache.removePartition(graphName, partitionId);
+        }
+    }
+
+    /**
+     * 删除分区缓存
+     */
+    public void invalidPartitionCache() {
+        // 检查是否存在缓存
+        cache.removePartitions();
+    }
+
+    /**
+     * 删除分区缓存
+     */
+    public void invalidStoreCache(long storeId) {
+        cache.removeStore(storeId);
+    }
+
+    /**
+     * Hugegraph server 调用,Leader发生改变,更新缓存
+     */
+    public void updatePartitionLeader(String graphName, int partId, long leaderStoreId) {
+        KVPair<Metapb.Partition, Metapb.Shard> partShard = null;
+        try {
+            partShard = this.getPartitionById(graphName, partId);
+
+            if (partShard != null && partShard.getValue().getStoreId() != leaderStoreId) {
+                var shardGroup = this.getShardGroup(partId);
+                Metapb.Shard shard = null;
+                List<Metapb.Shard> shards = new ArrayList<>();
+
+                for (Metapb.Shard s : shardGroup.getShardsList()) {
+                    if (s.getStoreId() == leaderStoreId) {
+                        shard = s;
+                        shards.add(Metapb.Shard.newBuilder(s)
+                                               .setStoreId(s.getStoreId())
+                                               .setRole(Metapb.ShardRole.Leader).build());
+                    } else {
+                        shards.add(Metapb.Shard.newBuilder(s)
+                                               .setStoreId(s.getStoreId())
+                                               .setRole(Metapb.ShardRole.Follower).build());
+                    }
+                }
+
+                if (config.isEnableCache()) {
+                    if (shard == null) {
+                        // 分区的shard中未找到leader,说明分区发生了迁移
+                        cache.removePartition(graphName, partId);
+                    }
+                }
+            }
+        } catch (PDException e) {
+            log.error("getPartitionException: {}", e.getMessage());
+        }
+    }
+
+    /**
+     * Hugegraph-store调用,更新缓存
+     *
+     * @param partition
+     */
+    public void updatePartitionCache(Metapb.Partition partition, Metapb.Shard leader) {
+        if (config.isEnableCache()) {
+            cache.update(partition.getGraphName(), partition.getId(), partition);
+            cache.updateLeader(partition.getId(), leader);
+        }
+    }
+
+    public Pdpb.GetIdResponse getIdByKey(String key, int delta) throws PDException {
+        Pdpb.GetIdRequest request = Pdpb.GetIdRequest.newBuilder()
+                                                     .setHeader(header)
+                                                     .setKey(key)
+                                                     .setDelta(delta)
+                                                     .build();
+        Pdpb.GetIdResponse response = blockingUnaryCall(PDGrpc.getGetIdMethod(), request);
+        handleResponseError(response.getHeader());
+        return response;
+    }
+
+    public Pdpb.ResetIdResponse resetIdByKey(String key) throws PDException {
+        Pdpb.ResetIdRequest request = Pdpb.ResetIdRequest.newBuilder()
+                                                         .setHeader(header)
+                                                         .setKey(key)
+                                                         .build();
+        Pdpb.ResetIdResponse response = blockingUnaryCall(PDGrpc.getResetIdMethod(), request);
+        handleResponseError(response.getHeader());
+        return response;
+    }
+
+    public Metapb.Member getLeader() throws PDException {
+        Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+                                                               .setHeader(header)
+                                                               .build();
+        Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request);
+        handleResponseError(response.getHeader());
+        return response.getLeader();
+    }
+
+    public Pdpb.GetMembersResponse getMembers() throws PDException {
+        Pdpb.GetMembersRequest request = Pdpb.GetMembersRequest.newBuilder()
+                                                               .setHeader(header)
+                                                               .build();
+        Pdpb.GetMembersResponse response = blockingUnaryCall(PDGrpc.getGetMembersMethod(), request);
+        handleResponseError(response.getHeader());
+        return response;
+    }
+
+    public Metapb.ClusterStats getClusterStats() throws PDException {
+        Pdpb.GetClusterStatsRequest request = Pdpb.GetClusterStatsRequest.newBuilder()
+                                                                         .setHeader(header)
+                                                                         .build();
+        Pdpb.GetClusterStatsResponse response =
+                blockingUnaryCall(PDGrpc.getGetClusterStatsMethod(), request);
+        handleResponseError(response.getHeader());
+        return response.getCluster();
+    }
+
+    private <ReqT, RespT, StubT extends AbstractBlockingStub<StubT>> RespT
+    blockingUnaryCall(MethodDescriptor<ReqT, RespT> method, ReqT req) throws PDException {
+        return blockingUnaryCall(method, req, 1);
+    }
+
+    private <ReqT, RespT, StubT extends AbstractBlockingStub<StubT>> RespT
+    blockingUnaryCall(MethodDescriptor<ReqT, RespT> method, ReqT req, int retry) throws
+                                                                                 PDException {
+        io.grpc.stub.AbstractBlockingStub<StubT> stub = (AbstractBlockingStub<StubT>) getStub();
+        try {
+            RespT resp = io.grpc.stub.ClientCalls.blockingUnaryCall(stub.getChannel(), method,
+                                                                    stub.getCallOptions(), req);
+            return resp;
+        } catch (Exception e) {
+            log.error(method.getFullMethodName() + " exception, {}", e.getMessage());
+            if (e instanceof StatusRuntimeException) {
+                StatusRuntimeException se = (StatusRuntimeException) e;
+                //se.getStatus() == Status.UNAVAILABLE &&
+                if (retry < stubProxy.getHostCount()) {
+                    // 网络不通,关掉之前连接,换host重新连接
+                    closeStub(true);
+                    return blockingUnaryCall(method, req, ++retry);
+                }
+            }
+        }
+        return null;
+    }
+
+    private void handleResponseError(Pdpb.ResponseHeader header) throws
+                                                                 PDException {
+        var errorType = header.getError().getType();
+        if (header.hasError() && errorType != Pdpb.ErrorType.OK) {
+
+            throw new PDException(header.getError().getTypeValue(),
+                                  String.format(
+                                          "PD request error, error code = %d, msg = %s",
+                                          header.getError().getTypeValue(),
+                                          header.getError().getMessage()));
+        }
+    }
+
+    public void addEventListener(PDEventListener listener) {
+        eventListeners.add(listener);
+    }
+
+    public PDWatch getWatchClient() {
+        return new PDWatchImpl(stubProxy.getHost());
+    }
+
+    /**
+     * 返回Store状态信息
+     */
+    public List<Metapb.Store> getStoreStatus(boolean offlineExcluded) throws PDException {
+        Pdpb.GetAllStoresRequest request = Pdpb.GetAllStoresRequest.newBuilder()
+                                                                   .setHeader(header)
+                                                                   .setExcludeOfflineStores(
+                                                                           offlineExcluded)
+                                                                   .build();
+        Pdpb.GetAllStoresResponse response = getStub().getStoreStatus(request);
+        handleResponseError(response.getHeader());
+        List<Metapb.Store> stores = response.getStoresList();
+        return stores;
+    }
+
+    public void setGraphSpace(String graphSpaceName, long storageLimit) throws PDException {
+        Metapb.GraphSpace graphSpace = Metapb.GraphSpace.newBuilder().setName(graphSpaceName)
+                                                        .setStorageLimit(storageLimit)
+                                                        .setTimestamp(System.currentTimeMillis())
+                                                        .build();
+        Pdpb.SetGraphSpaceRequest request = Pdpb.SetGraphSpaceRequest.newBuilder()
+                                                                     .setHeader(header)
+                                                                     .setGraphSpace(graphSpace)
+                                                                     .build();
+        Pdpb.SetGraphSpaceResponse response = getStub().setGraphSpace(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public List<Metapb.GraphSpace> getGraphSpace(String graphSpaceName) throws
+                                                                        PDException {
+        Pdpb.GetGraphSpaceRequest.Builder builder = Pdpb.GetGraphSpaceRequest.newBuilder();
+        Pdpb.GetGraphSpaceRequest request;
+        builder.setHeader(header);
+        if (graphSpaceName != null && graphSpaceName.length() > 0) {
+            builder.setGraphSpaceName(graphSpaceName);
+        }
+        request = builder.build();
+        Pdpb.GetGraphSpaceResponse response = getStub().getGraphSpace(request);
+        List<Metapb.GraphSpace> graphSpaceList = response.getGraphSpaceList();
+        handleResponseError(response.getHeader());
+        return graphSpaceList;
+    }
+
+    public void setPDConfig(int partitionCount, String peerList, int shardCount,
+                            long version) throws PDException {
+        Metapb.PDConfig pdConfig = Metapb.PDConfig.newBuilder().setPartitionCount(partitionCount)
+                                                  .setPeersList(peerList).setShardCount(shardCount)
+                                                  .setVersion(version)
+                                                  .setTimestamp(System.currentTimeMillis())
+                                                  .build();
+        Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder()
+                                                                 .setHeader(header)
+                                                                 .setPdConfig(pdConfig)
+                                                                 .build();
+        Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public Metapb.PDConfig getPDConfig() throws PDException {
+        Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder()
+                                                                 .setHeader(header)
+                                                                 .build();
+        Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request);
+        handleResponseError(response.getHeader());
+        return response.getPdConfig();
+    }
+
+    public void setPDConfig(Metapb.PDConfig pdConfig) throws PDException {
+        Pdpb.SetPDConfigRequest request = Pdpb.SetPDConfigRequest.newBuilder()
+                                                                 .setHeader(header)
+                                                                 .setPdConfig(pdConfig)
+                                                                 .build();
+        Pdpb.SetPDConfigResponse response = getStub().setPDConfig(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public Metapb.PDConfig getPDConfig(long version) throws PDException {
+        Pdpb.GetPDConfigRequest request = Pdpb.GetPDConfigRequest.newBuilder().setHeader(
+                header).setVersion(version).build();
+        Pdpb.GetPDConfigResponse response = getStub().getPDConfig(request);
+        handleResponseError(response.getHeader());
+        return response.getPdConfig();
+    }
+
+    public void changePeerList(String peerList) throws PDException {
+        Pdpb.ChangePeerListRequest request = Pdpb.ChangePeerListRequest.newBuilder()
+                                                                       .setPeerList(peerList)
+                                                                       .setHeader(header).build();
+        Pdpb.getChangePeerListResponse response =
+                blockingUnaryCall(PDGrpc.getChangePeerListMethod(), request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 工作模式
+     * Auto:自动分裂,每个Store上分区数达到最大值
+     *
+     * @throws PDException
+     */
+    public void splitData() throws PDException {
+        Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder()
+                                                             .setHeader(header)
+                                                             .setMode(Pdpb.OperationMode.Auto)
+                                                             .build();
+        Pdpb.SplitDataResponse response = getStub().splitData(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 工作模式
+     * Auto:自动分裂,每个Store上分区数达到最大值
+     * Expert:专家模式,需要指定splitParams
+     *
+     * @param mode
+     * @param params
+     * @throws PDException
+     */
+    public void splitData(Pdpb.OperationMode mode, List<Pdpb.SplitDataParam> params) throws
+                                                                                     PDException {
+        Pdpb.SplitDataRequest request = Pdpb.SplitDataRequest.newBuilder()
+                                                             .setHeader(header)
+                                                             .setMode(mode)
+                                                             .addAllParam(params).build();
+        Pdpb.SplitDataResponse response = getStub().splitData(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public void splitGraphData(String graphName, int toCount) throws PDException {
+        Pdpb.SplitGraphDataRequest request = Pdpb.SplitGraphDataRequest.newBuilder()
+                                                                       .setHeader(header)
+                                                                       .setGraphName(graphName)
+                                                                       .setToCount(toCount)
+                                                                       .build();
+        Pdpb.SplitDataResponse response = getStub().splitGraphData(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 自动转移,达到每个Store上分区数量相同
+     *
+     * @throws PDException
+     */
+    public void balancePartition() throws PDException {
+        Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder()
+                                                                     .setHeader(header)
+                                                                     .setMode(
+                                                                             Pdpb.OperationMode.Auto)
+                                                                     .build();
+        Pdpb.MovePartitionResponse response = getStub().movePartition(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * //工作模式
+     * //  Auto:自动转移,达到每个Store上分区数量相同
+     * //  Expert:专家模式,需要指定transferParams
+     *
+     * @param mode
+     * @param params
+     * @throws PDException
+     */
+    public void movePartition(Pdpb.OperationMode mode, List<Pdpb.MovePartitionParam> params) throws
+                                                                                             PDException {
+        Pdpb.MovePartitionRequest request = Pdpb.MovePartitionRequest.newBuilder()
+                                                                     .setHeader(header)
+                                                                     .setMode(mode)
+                                                                     .addAllParam(params).build();
+        Pdpb.MovePartitionResponse response = getStub().movePartition(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public void reportTask(MetaTask.Task task) throws PDException {
+        Pdpb.ReportTaskRequest request = Pdpb.ReportTaskRequest.newBuilder()
+                                                               .setHeader(header)
+                                                               .setTask(task).build();
+        Pdpb.ReportTaskResponse response = blockingUnaryCall(PDGrpc.getReportTaskMethod(), request);
+        handleResponseError(response.getHeader());
+    }
+
+    public Metapb.PartitionStats getPartitionsStats(String graph, int partId) throws PDException {
+        Pdpb.GetPartitionStatsRequest request = Pdpb.GetPartitionStatsRequest.newBuilder()
+                                                                             .setHeader(header)
+                                                                             .setGraphName(graph)
+                                                                             .setPartitionId(partId)
+                                                                             .build();
+        Pdpb.GetPartitionStatsResponse response = getStub().getPartitionStats(request);
+        handleResponseError(response.getHeader());
+        return response.getPartitionStats();
+    }
+
+    /**
+     * 平衡不同store中leader的数量
+     */
+    public void balanceLeaders() throws PDException {
+        Pdpb.BalanceLeadersRequest request = Pdpb.BalanceLeadersRequest.newBuilder()
+                                                                       .setHeader(header)
+                                                                       .build();
+        Pdpb.BalanceLeadersResponse response = getStub().balanceLeaders(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 从pd中删除store
+     */
+    public Metapb.Store delStore(long storeId) throws PDException {
+        Pdpb.DetStoreRequest request = Pdpb.DetStoreRequest.newBuilder()
+                                                           .setHeader(header)
+                                                           .setStoreId(storeId)
+                                                           .build();
+        Pdpb.DetStoreResponse response = getStub().delStore(request);
+        handleResponseError(response.getHeader());
+        return response.getStore();
+    }
+
+    /**
+     * 对rocksdb整体进行compaction
+     *
+     * @throws PDException
+     */
+    public void dbCompaction() throws PDException {
+        Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest
+                .newBuilder()
+                .setHeader(header)
+                .build();
+        Pdpb.DbCompactionResponse response = getStub().dbCompaction(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 对rocksdb指定表进行compaction
+     *
+     * @param tableName
+     * @throws PDException
+     */
+    public void dbCompaction(String tableName) throws PDException {
+        Pdpb.DbCompactionRequest request = Pdpb.DbCompactionRequest
+                .newBuilder()
+                .setHeader(header)
+                .setTableName(tableName)
+                .build();
+        Pdpb.DbCompactionResponse response = getStub().dbCompaction(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 分区合并,把当前的分区缩容至toCount个
+     *
+     * @param toCount 缩容到分区的个数
+     * @throws PDException
+     */
+    public void combineCluster(int toCount) throws PDException {
+        Pdpb.CombineClusterRequest request = Pdpb.CombineClusterRequest
+                .newBuilder()
+                .setHeader(header)
+                .setToCount(toCount)
+                .build();
+        Pdpb.CombineClusterResponse response = getStub().combineCluster(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 将单图缩容到 toCount个
+     *
+     * @param graphName graph name
+     * @param toCount   target count
+     * @throws PDException
+     */
+    public void combineGraph(String graphName, int toCount) throws PDException {
+        Pdpb.CombineGraphRequest request = Pdpb.CombineGraphRequest
+                .newBuilder()
+                .setHeader(header)
+                .setGraphName(graphName)
+                .setToCount(toCount)
+                .build();
+        Pdpb.CombineGraphResponse response = getStub().combineGraph(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public void deleteShardGroup(int groupId) throws PDException {
+        Pdpb.DeleteShardGroupRequest request = Pdpb.DeleteShardGroupRequest
+                .newBuilder()
+                .setHeader(header)
+                .setGroupId(groupId)
+                .build();
+        Pdpb.DeleteShardGroupResponse response =
+                blockingUnaryCall(PDGrpc.getDeleteShardGroupMethod(), request);
+
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * 用于 store的 shard list重建
+     *
+     * @param groupId shard group id
+     * @param shards  shard list,delete when shards size is 0
+     */
+    public void updateShardGroupOp(int groupId, List<Metapb.Shard> shards) throws PDException {
+        Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder()
+                                                                 .setHeader(header)
+                                                                 .setGroupId(groupId)
+                                                                 .addAllShards(shards)
+                                                                 .build();
+        Pdpb.ChangeShardResponse response = getStub().updateShardGroupOp(request);
+        handleResponseError(response.getHeader());
+    }
+
+    /**
+     * invoke fireChangeShard command
+     *
+     * @param groupId shard group id
+     * @param shards  shard list
+     */
+    public void changeShard(int groupId, List<Metapb.Shard> shards) throws PDException {
+        Pdpb.ChangeShardRequest request = Pdpb.ChangeShardRequest.newBuilder()
+                                                                 .setHeader(header)
+                                                                 .setGroupId(groupId)
+                                                                 .addAllShards(shards)
+                                                                 .build();
+        Pdpb.ChangeShardResponse response = getStub().changeShard(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public ClientCache getCache() {
+        return cache;
+    }
+
+    public CacheResponse getClientCache() throws PDException {
+        GetGraphRequest request = GetGraphRequest.newBuilder().setHeader(header).build();
+        CacheResponse cache = getStub().getCache(request);
+        handleResponseError(cache.getHeader());
+        return cache;
+    }
+
+    public CachePartitionResponse getPartitionCache(String graph) throws PDException {
+        GetGraphRequest request =
+                GetGraphRequest.newBuilder().setHeader(header).setGraphName(graph).build();
+        CachePartitionResponse ps = getStub().getPartitions(request);
+        handleResponseError(ps.getHeader());
+        return ps;
+    }
+
+    public void updatePdRaft(String raftConfig) throws PDException {
+        Pdpb.UpdatePdRaftRequest request = Pdpb.UpdatePdRaftRequest.newBuilder()
+                                                                   .setHeader(header)
+                                                                   .setConfig(raftConfig)
+                                                                   .build();
+        Pdpb.UpdatePdRaftResponse response = getStub().updatePdRaft(request);
+        handleResponseError(response.getHeader());
+    }
+
+    public interface PDEventListener {
+
+        void onStoreChanged(NodeEvent event);
+
+        void onPartitionChanged(PartitionEvent event);
+
+        void onGraphChanged(WatchResponse event);
+
+        default void onShardGroupChanged(WatchResponse event) {
+        }
+
+    }
+
+    static class StubProxy {
+
+        private final LinkedList<String> hostList = new LinkedList<>();
+        private volatile PDGrpc.PDBlockingStub stub;
+        private String leader;
+
+        public StubProxy(String[] hosts) {
+            for (String host : hosts) {
+                if (!host.isEmpty()) {
+                    hostList.offer(host);
+                }
+            }
+        }
+
+        public String nextHost() {
+            String host = hostList.poll();
+            hostList.offer(host);   //移到尾部
+            return host;
+        }
+
+        public void set(PDGrpc.PDBlockingStub stub) {
+            this.stub = stub;
+        }
+
+        public PDGrpc.PDBlockingStub get() {
+            return this.stub;
+        }
+
+        public String getHost() {
+            return hostList.peek();
+        }
+
+        public int getHostCount() {
+            return hostList.size();
+        }
+
+        public String getLeader() {
+            return leader;
+        }
+
+        public void setLeader(String leader) {
+            this.leader = leader;
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java
new file mode 100644
index 000000000..a1c72a2bc
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDConfig.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+public final class PDConfig {
+
+    //TODO multi-server
+    private String serverHost = "localhost:9000";
+    private long grpcTimeOut = 60000;   // grpc调用超时时间 10秒
+
+    // 是否接收PD异步通知
+    private boolean enablePDNotify = false;
+
+    private boolean enableCache = false;
+
+    private PDConfig() {
+    }
+
+    public static PDConfig of() {
+        return new PDConfig();
+    }
+
+    public static PDConfig of(String serverHost) {
+        PDConfig config = new PDConfig();
+        config.serverHost = serverHost;
+        return config;
+    }
+
+    public static PDConfig of(String serverHost, long timeOut) {
+        PDConfig config = new PDConfig();
+        config.serverHost = serverHost;
+        config.grpcTimeOut = timeOut;
+        return config;
+    }
+
+    public String getServerHost() {
+        return serverHost;
+    }
+
+    public long getGrpcTimeOut() {
+        return grpcTimeOut;
+    }
+
+    @Deprecated
+    public PDConfig setEnablePDNotify(boolean enablePDNotify) {
+        this.enablePDNotify = enablePDNotify;
+
+        // TODO 临时代码,hugegraph修改完后删除
+        this.enableCache = enablePDNotify;
+        return this;
+    }
+
+    public boolean isEnableCache() {
+        return enableCache;
+    }
+
+    public PDConfig setEnableCache(boolean enableCache) {
+        this.enableCache = enableCache;
+        return this;
+    }
+
+    @Override
+    public String toString() {
+        return "PDConfig{" +
+               "serverHost='" + serverHost + '\'' +
+               '}';
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java
new file mode 100644
index 000000000..485417b91
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulse.java
@@ -0,0 +1,154 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.pulse.PulseServerNotice;
+
+/**
+ * Bidirectional communication interface of pd-client and pd-server
+ */
+public interface PDPulse {
+
+    /*** inner static methods ***/
+    static <T> Listener<T> listener(Consumer<T> onNext) {
+        return listener(onNext, t -> {
+        }, () -> {
+        });
+    }
+
+    static <T> Listener<T> listener(Consumer<T> onNext, Consumer<Throwable> onError) {
+        return listener(onNext, onError, () -> {
+        });
+    }
+
+    static <T> Listener<T> listener(Consumer<T> onNext, Runnable onCompleted) {
+        return listener(onNext, t -> {
+        }, onCompleted);
+    }
+
+    static <T> Listener<T> listener(Consumer<T> onNext, Consumer<Throwable> onError,
+                                    Runnable onCompleted) {
+        return new Listener<>() {
+            @Override
+            public void onNext(T response) {
+                onNext.accept(response);
+            }
+
+            @Override
+            public void onNotice(PulseServerNotice<T> notice) {
+
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                onError.accept(throwable);
+            }
+
+            @Override
+            public void onCompleted() {
+                onCompleted.run();
+            }
+        };
+    }
+
+    /**
+     * @param listener
+     * @return
+     */
+    Notifier<PartitionHeartbeatRequest.Builder> connectPartition(Listener<PulseResponse> listener);
+
+    /**
+     * 切换成新的host。做 channel/host的检查,如果需要关闭,notifier调用close方法。
+     *
+     * @param host     new host
+     * @param notifier notifier
+     * @return true if create new stub, otherwise false
+     */
+    boolean resetStub(String host, Notifier notifier);
+
+    /**
+     * Interface of pulse.
+     */
+    interface Listener<T> {
+
+        /**
+         * Invoked on new events.
+         *
+         * @param response the response.
+         */
+        @Deprecated
+        default void onNext(T response) {
+        }
+
+        /**
+         * Invoked on new events.
+         *
+         * @param notice a wrapper of response
+         */
+        default void onNotice(PulseServerNotice<T> notice) {
+            notice.ack();
+        }
+
+        /**
+         * Invoked on errors.
+         *
+         * @param throwable the error.
+         */
+        void onError(Throwable throwable);
+
+        /**
+         * Invoked on completion.
+         */
+        void onCompleted();
+
+    }
+
+    /**
+     * Interface of notifier that can send notice to server.
+     *
+     * @param <T>
+     */
+    interface Notifier<T> extends Closeable {
+
+        /**
+         * closes this watcher and all its resources.
+         */
+        @Override
+        void close();
+
+        /**
+         * Send notice to pd-server.
+         *
+         * @return
+         */
+        void notifyServer(T t);
+
+        /**
+         * Send an error report to pd-server.
+         *
+         * @param error
+         */
+        void crash(String error);
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java
new file mode 100644
index 000000000..0afc10c83
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDPulseImpl.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.util.Objects;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+
+import org.apache.hugegraph.pd.grpc.pulse.HgPdPulseGrpc;
+import org.apache.hugegraph.pd.grpc.pulse.PartitionHeartbeatRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseAckRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseCreateRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseNoticeRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseRequest;
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+import org.apache.hugegraph.pd.grpc.pulse.PulseType;
+import org.apache.hugegraph.pd.pulse.PartitionNotice;
+
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+import io.grpc.ManagedChannel;
+import io.grpc.stub.StreamObserver;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public final class PDPulseImpl implements PDPulse {
+
+    private static final ConcurrentHashMap<String, ManagedChannel> chs = new ConcurrentHashMap<>();
+    private final ExecutorService threadPool;
+    private HgPdPulseGrpc.HgPdPulseStub stub;
+    private String pdServerAddress;
+
+    // TODO: support several servers.
+    public PDPulseImpl(String pdServerAddress) {
+        this.pdServerAddress = pdServerAddress;
+        this.stub = HgPdPulseGrpc.newStub(Channels.getChannel(pdServerAddress));
+        var namedThreadFactory =
+                new ThreadFactoryBuilder().setNameFormat("ack-notice-pool-%d").build();
+        threadPool = Executors.newSingleThreadExecutor(namedThreadFactory);
+    }
+
+    private String getCurrentHost() {
+        return this.pdServerAddress;
+    }
+
+    private boolean checkChannel() {
+        return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown();
+    }
+
+    /* TODO: handle this override problem */
+    @Override
+    public Notifier<PartitionHeartbeatRequest.Builder> connectPartition(Listener<PulseResponse>
+                                                                                listener) {
+        return new PartitionHeartbeat(listener);
+    }
+
+    @Override
+    public boolean resetStub(String host, Notifier notifier) {
+        log.info("reset stub: current, {}, new: {}, channel state:{}", getCurrentHost(), host,
+                 checkChannel());
+        if (Objects.equals(host, getCurrentHost()) && checkChannel()) {
+            return false;
+        }
+
+        if (notifier != null) {
+            notifier.close();
+        }
+
+        this.stub = HgPdPulseGrpc.newStub(Channels.getChannel(host));
+        log.info("pd pulse connect to {}", host);
+        this.pdServerAddress = host;
+        return true;
+    }
+
+    /*** PartitionHeartbeat's implement  ***/
+    private class PartitionHeartbeat extends
+                                     AbstractConnector<PartitionHeartbeatRequest.Builder,
+                                             PulseResponse> {
+
+        private long observerId = -1;
+
+        PartitionHeartbeat(Listener<PulseResponse> listener) {
+            super(listener, PulseType.PULSE_TYPE_PARTITION_HEARTBEAT);
+        }
+
+        private void setObserverId(long observerId) {
+            if (this.observerId == -1) {
+                this.observerId = observerId;
+            }
+        }
+
+        @Override
+        public void notifyServer(PartitionHeartbeatRequest.Builder requestBuilder) {
+            this.reqStream.onNext(PulseRequest.newBuilder()
+                                              .setNoticeRequest(
+                                                      PulseNoticeRequest.newBuilder()
+                                                                        .setPartitionHeartbeatRequest(
+                                                                                requestBuilder.build()
+                                                                        ).build()
+                                              ).build()
+            );
+        }
+
+        @Override
+        public void onNext(PulseResponse pulseResponse) {
+            this.setObserverId(pulseResponse.getObserverId());
+            long noticeId = pulseResponse.getNoticeId();
+            this.listener.onNext(pulseResponse);
+            this.listener.onNotice(new PartitionNotice(noticeId,
+                                                       e -> super.ackNotice(e, observerId),
+                                                       pulseResponse));
+        }
+
+    }
+
+    private abstract class AbstractConnector<N, L> implements Notifier<N>,
+                                                              StreamObserver<PulseResponse> {
+
+        Listener<L> listener;
+        StreamObserver<PulseRequest> reqStream;
+        PulseType pulseType;
+        PulseRequest.Builder reqBuilder = PulseRequest.newBuilder();
+        PulseAckRequest.Builder ackBuilder = PulseAckRequest.newBuilder();
+
+        private AbstractConnector(Listener<L> listener, PulseType pulseType) {
+            this.listener = listener;
+            this.pulseType = pulseType;
+            this.init();
+        }
+
+        void init() {
+            PulseCreateRequest.Builder builder = PulseCreateRequest.newBuilder()
+                                                                   .setPulseType(this.pulseType);
+
+            this.reqStream = PDPulseImpl.this.stub.pulse(this);
+            this.reqStream.onNext(reqBuilder.clear().setCreateRequest(builder).build());
+        }
+
+        /*** notifier ***/
+        @Override
+        public void close() {
+            this.reqStream.onCompleted();
+        }
+
+        @Override
+        public abstract void notifyServer(N t);
+
+        @Override
+        public void crash(String error) {
+            this.reqStream.onError(new Throwable(error));
+        }
+
+        /*** listener  ***/
+        @Override
+        public abstract void onNext(PulseResponse pulseResponse);
+
+        @Override
+        public void onError(Throwable throwable) {
+            this.listener.onError(throwable);
+        }
+
+        @Override
+        public void onCompleted() {
+            this.listener.onCompleted();
+        }
+
+        protected void ackNotice(long noticeId, long observerId) {
+            threadPool.execute(() -> {
+                // log.info("send ack: {}, ts: {}", noticeId, System.currentTimeMillis());
+                this.reqStream.onNext(reqBuilder.clear()
+                                                .setAckRequest(
+                                                        this.ackBuilder.clear()
+                                                                       .setNoticeId(noticeId)
+                                                                       .setObserverId(observerId)
+                                                                       .build()
+                                                ).build()
+                );
+            });
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java
new file mode 100644
index 000000000..c6c46d03d
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatch.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.io.Closeable;
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.PartitionEvent;
+
+public interface PDWatch {
+
+    /**
+     * Watch the events of all store-nodes registered in the remote PD-Server.
+     *
+     * @param listener
+     * @return
+     */
+    //PDWatcher watchNode(Listener<NodeEvent> listener);
+
+    /*** inner static methods ***/
+    static <T> Listener<T> listener(Consumer<T> onNext) {
+        return listener(onNext, t -> {
+        }, () -> {
+        });
+    }
+
+    static <T> Listener<T> listener(Consumer<T> onNext, Consumer<Throwable> onError) {
+        return listener(onNext, onError, () -> {
+        });
+    }
+
+    static <T> Listener<T> listener(Consumer<T> onNext, Runnable onCompleted) {
+        return listener(onNext, t -> {
+        }, onCompleted);
+    }
+
+    static <T> Listener<T> listener(Consumer<T> onNext, Consumer<Throwable> onError,
+                                    Runnable onCompleted) {
+        return new Listener<T>() {
+            @Override
+            public void onNext(T response) {
+                onNext.accept(response);
+            }
+
+            @Override
+            public void onError(Throwable throwable) {
+                onError.accept(throwable);
+            }
+
+            @Override
+            public void onCompleted() {
+                onCompleted.run();
+            }
+        };
+    }
+
+    /**
+     * Watch the events of the store-nodes assigned to a specified graph.
+     *
+     * @param graph    the graph name which you want to watch
+     * @param listener
+     * @return
+     */
+    //PDWatcher watchNode(String graph, Listener<NodeEvent> listener);
+
+    String getCurrentHost();
+
+    boolean checkChannel();
+
+    /**
+     * @param listener
+     * @return
+     */
+    Watcher watchPartition(Listener<PartitionEvent> listener);
+
+    Watcher watchNode(Listener<NodeEvent> listener);
+
+    Watcher watchGraph(Listener<WatchResponse> listener);
+
+    Watcher watchShardGroup(Listener<WatchResponse> listener);
+
+    /**
+     * Interface of Watcher.
+     */
+    interface Listener<T> {
+
+        /**
+         * Invoked on new events.
+         *
+         * @param response the response.
+         */
+        void onNext(T response);
+
+        /**
+         * Invoked on errors.
+         *
+         * @param throwable the error.
+         */
+        void onError(Throwable throwable);
+
+        /**
+         * Invoked on completion.
+         */
+        default void onCompleted() {
+        }
+
+    }
+
+    interface Watcher extends Closeable {
+
+        /**
+         * closes this watcher and all its resources.
+         */
+        @Override
+        void close();
+
+        /**
+         * Requests the latest revision processed and propagates it to listeners
+         */
+        // TODO: what's it for?
+        //void requestProgress();
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java
new file mode 100644
index 000000000..9b136bb26
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/client/PDWatchImpl.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.client;
+
+import java.util.function.Supplier;
+
+import org.apache.hugegraph.pd.grpc.watch.HgPdWatchGrpc;
+import org.apache.hugegraph.pd.grpc.watch.WatchCreateRequest;
+import org.apache.hugegraph.pd.grpc.watch.WatchNodeResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchPartitionResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchRequest;
+import org.apache.hugegraph.pd.grpc.watch.WatchResponse;
+import org.apache.hugegraph.pd.grpc.watch.WatchType;
+import org.apache.hugegraph.pd.watch.NodeEvent;
+import org.apache.hugegraph.pd.watch.PartitionEvent;
+
+import io.grpc.ManagedChannel;
+import io.grpc.stub.StreamObserver;
+
+final class PDWatchImpl implements PDWatch {
+
+    private final HgPdWatchGrpc.HgPdWatchStub stub;
+
+    private final String pdServerAddress;
+
+    // TODO: support several servers.
+    PDWatchImpl(String pdServerAddress) {
+        this.pdServerAddress = pdServerAddress;
+        this.stub = HgPdWatchGrpc.newStub(Channels.getChannel(pdServerAddress));
+    }
+
+    @Override
+    public String getCurrentHost() {
+        return this.pdServerAddress;
+    }
+
+    @Override
+    public boolean checkChannel() {
+        return stub != null && !((ManagedChannel) stub.getChannel()).isShutdown();
+    }
+
+    /**
+     * Get Partition change watcher.
+     *
+     * @param listener
+     * @return
+     */
+    @Override
+    public Watcher watchPartition(Listener<PartitionEvent> listener) {
+        return new PartitionWatcher(listener);
+    }
+
+    /**
+     * Get Store-Node change watcher.
+     *
+     * @param listener
+     * @return
+     */
+    @Override
+    public Watcher watchNode(Listener<NodeEvent> listener) {
+        return new NodeWatcher(listener);
+    }
+
+    @Override
+    public Watcher watchGraph(Listener<WatchResponse> listener) {
+        return new GraphWatcher(listener);
+    }
+
+    @Override
+    public Watcher watchShardGroup(Listener<WatchResponse> listener) {
+        return new ShardGroupWatcher(listener);
+    }
+
+    private class GraphWatcher extends AbstractWatcher<WatchResponse> {
+
+        private GraphWatcher(Listener listener) {
+            super(listener,
+                  () -> WatchCreateRequest
+                          .newBuilder()
+                          .setWatchType(WatchType.WATCH_TYPE_GRAPH_CHANGE)
+                          .build()
+            );
+        }
+
+        @Override
+        public void onNext(WatchResponse watchResponse) {
+            this.listener.onNext(watchResponse);
+        }
+    }
+
+    private class ShardGroupWatcher extends AbstractWatcher<WatchResponse> {
+
+        private ShardGroupWatcher(Listener listener) {
+            super(listener,
+                  () -> WatchCreateRequest
+                          .newBuilder()
+                          .setWatchType(WatchType.WATCH_TYPE_SHARD_GROUP_CHANGE)
+                          .build()
+            );
+        }
+
+        @Override
+        public void onNext(WatchResponse watchResponse) {
+            this.listener.onNext(watchResponse);
+        }
+    }
+
+    private class PartitionWatcher extends AbstractWatcher<PartitionEvent> {
+
+        private PartitionWatcher(Listener listener) {
+            super(listener,
+                  () -> WatchCreateRequest
+                          .newBuilder()
+                          .setWatchType(WatchType.WATCH_TYPE_PARTITION_CHANGE)
+                          .build()
+            );
+        }
+
+        @Override
+        public void onNext(WatchResponse watchResponse) {
+            WatchPartitionResponse res = watchResponse.getPartitionResponse();
+            PartitionEvent event = new PartitionEvent(res.getGraph(), res.getPartitionId(),
+                                                      PartitionEvent.ChangeType.grpcTypeOf(
+                                                              res.getChangeType()));
+            this.listener.onNext(event);
+        }
+    }
+
+    private class NodeWatcher extends AbstractWatcher<NodeEvent> {
+
+        private NodeWatcher(Listener listener) {
+            super(listener,
+                  () -> WatchCreateRequest
+                          .newBuilder()
+                          .setWatchType(WatchType.WATCH_TYPE_STORE_NODE_CHANGE)
+                          .build()
+            );
+        }
+
+        @Override
+        public void onNext(WatchResponse watchResponse) {
+            WatchNodeResponse res = watchResponse.getNodeResponse();
+            NodeEvent event = new NodeEvent(res.getGraph(), res.getNodeId(),
+                                            NodeEvent.EventType.grpcTypeOf(res.getNodeEventType()));
+            this.listener.onNext(event);
+        }
+    }
+
+    private abstract class AbstractWatcher<T> implements Watcher, StreamObserver<WatchResponse> {
+
+        Listener<T> listener;
+        StreamObserver<WatchRequest> reqStream;
+        Supplier<WatchCreateRequest> requestSupplier;
+
+        private AbstractWatcher(Listener<T> listener,
+                                Supplier<WatchCreateRequest> requestSupplier) {
+            this.listener = listener;
+            this.requestSupplier = requestSupplier;
+            this.init();
+        }
+
+        void init() {
+            this.reqStream = PDWatchImpl.this.stub.watch(this);
+            this.reqStream.onNext(WatchRequest.newBuilder().setCreateRequest(
+                    this.requestSupplier.get()
+            ).build());
+        }
+
+        @Override
+        public void close() {
+            this.reqStream.onCompleted();
+        }
+
+        @Override
+        public abstract void onNext(WatchResponse watchResponse);
+
+        @Override
+        public void onError(Throwable throwable) {
+
+            this.listener.onError(throwable);
+        }
+
+        @Override
+        public void onCompleted() {
+            this.listener.onCompleted();
+        }
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java
new file mode 100644
index 000000000..80aa8951b
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PartitionNotice.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+import java.util.function.Consumer;
+
+import org.apache.hugegraph.pd.grpc.pulse.PulseResponse;
+
+public class PartitionNotice implements PulseServerNotice<PulseResponse> {
+
+    private final long noticeId;
+    private final Consumer<Long> ackConsumer;
+    private final PulseResponse content;
+
+    public PartitionNotice(long noticeId, Consumer<Long> ackConsumer, PulseResponse content) {
+        this.noticeId = noticeId;
+        this.ackConsumer = ackConsumer;
+        this.content = content;
+    }
+
+    @Override
+    public void ack() {
+        this.ackConsumer.accept(this.noticeId);
+    }
+
+    @Override
+    public long getNoticeId() {
+        return this.noticeId;
+    }
+
+    @Override
+    public PulseResponse getContent() {
+        return this.content;
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java
new file mode 100644
index 000000000..9a30e2679
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/pulse/PulseServerNotice.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.pulse;
+
+public interface PulseServerNotice<T> {
+
+    /**
+     * @throws RuntimeException when failed to send ack-message to pd-server
+     */
+    void ack();
+
+    long getNoticeId();
+
+    /**
+     * Return a response object of gRPC stream.
+     *
+     * @return
+     */
+    T getContent();
+
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java
new file mode 100644
index 000000000..bb68383b8
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/NodeEvent.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import java.util.Objects;
+
+import org.apache.hugegraph.pd.grpc.watch.NodeEventType;
+
+public class NodeEvent {
+
+    private final String graph;
+    private final long nodeId;
+    private final EventType eventType;
+
+    public NodeEvent(String graph, long nodeId, EventType eventType) {
+        this.graph = graph;
+        this.nodeId = nodeId;
+        this.eventType = eventType;
+    }
+
+    public String getGraph() {
+        return graph;
+    }
+
+    public long getNodeId() {
+        return nodeId;
+    }
+
+    public EventType getEventType() {
+        return eventType;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        NodeEvent nodeEvent = (NodeEvent) o;
+        return nodeId == nodeEvent.nodeId && Objects.equals(graph,
+                                                            nodeEvent.graph) &&
+               eventType == nodeEvent.eventType;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(graph, nodeId, eventType);
+    }
+
+    @Override
+    public String toString() {
+        return "NodeEvent{" +
+               "graph='" + graph + '\'' +
+               ", nodeId=" + nodeId +
+               ", eventType=" + eventType +
+               '}';
+    }
+
+    public enum EventType {
+        UNKNOWN,
+        NODE_ONLINE,
+        NODE_OFFLINE,
+        NODE_RAFT_CHANGE,
+        NODE_PD_LEADER_CHANGE;
+
+        public static EventType grpcTypeOf(NodeEventType grpcType) {
+            switch (grpcType) {
+                case NODE_EVENT_TYPE_NODE_ONLINE:
+                    return NODE_ONLINE;
+                case NODE_EVENT_TYPE_NODE_OFFLINE:
+                    return NODE_OFFLINE;
+                case NODE_EVENT_TYPE_NODE_RAFT_CHANGE:
+                    return NODE_RAFT_CHANGE;
+                case NODE_EVENT_TYPE_PD_LEADER_CHANGE:
+                    return NODE_PD_LEADER_CHANGE;
+                default:
+                    return UNKNOWN;
+            }
+
+        }
+
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java
new file mode 100644
index 000000000..d663f34a3
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PDWatcher.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+public class PDWatcher {
+
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java
new file mode 100644
index 000000000..e5be1b348
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/PartitionEvent.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+import java.util.Objects;
+
+import org.apache.hugegraph.pd.grpc.watch.WatchChangeType;
+
+public class PartitionEvent {
+
+    private final String graph;
+    private final int partitionId;
+    private final ChangeType changeType;
+
+    public PartitionEvent(String graph, int partitionId, ChangeType changeType) {
+        this.graph = graph;
+        this.partitionId = partitionId;
+        this.changeType = changeType;
+    }
+
+    public String getGraph() {
+        return this.graph;
+    }
+
+    public int getPartitionId() {
+        return this.partitionId;
+    }
+
+    public ChangeType getChangeType() {
+        return this.changeType;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        PartitionEvent that = (PartitionEvent) o;
+        return partitionId == that.partitionId && Objects.equals(graph, that.graph) &&
+               changeType == that.changeType;
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hash(graph, partitionId, changeType);
+    }
+
+    @Override
+    public String toString() {
+        return "PartitionEvent{" +
+               "graph='" + graph + '\'' +
+               ", partitionId=" + partitionId +
+               ", changeType=" + changeType +
+               '}';
+    }
+
+    public enum ChangeType {
+        UNKNOWN,
+        ADD,
+        ALTER,
+        DEL;
+
+        public static ChangeType grpcTypeOf(WatchChangeType grpcType) {
+            switch (grpcType) {
+                case WATCH_CHANGE_TYPE_ADD:
+                    return ADD;
+                case WATCH_CHANGE_TYPE_ALTER:
+                    return ALTER;
+                case WATCH_CHANGE_TYPE_DEL:
+                    return DEL;
+                default:
+                    return UNKNOWN;
+            }
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java
new file mode 100644
index 000000000..e53770193
--- /dev/null
+++ b/hugegraph-pd/hg-pd-client/src/main/java/org/apache/hugegraph/pd/watch/WatchType.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.watch;
+
+enum WatchType {
+
+    PARTITION_CHANGE(10);
+
+    private final int value;
+
+    WatchType(int value) {
+        this.value = value;
+    }
+
+}


(incubator-hugegraph) 04/05: feat(pd): integrate `pd-test` submodule

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit bd1d9db77b5d263f4dc0f8cc38198f5f26b65cb8
Author: VGalaxies <vg...@apache.org>
AuthorDate: Thu Apr 4 00:00:29 2024 +0800

    feat(pd): integrate `pd-test` submodule
    
    prepare tests for `pd-common`
---
 hugegraph-pd/hg-pd-test/pom.xml                    | 259 ++++++++++++++
 .../apache/hugegraph/pd/common/BaseCommonTest.java |  34 ++
 .../hugegraph/pd/common/CommonSuiteTest.java       |  36 ++
 .../apache/hugegraph/pd/common/HgAssertTest.java   | 132 +++++++
 .../org/apache/hugegraph/pd/common/KVPairTest.java |  72 ++++
 .../hugegraph/pd/common/PartitionCacheTest.java    | 388 +++++++++++++++++++++
 .../hugegraph/pd/common/PartitionUtilsTest.java    |  54 +++
 .../hg-pd-test/src/main/resources/log4j2.xml       | 139 ++++++++
 8 files changed, 1114 insertions(+)

diff --git a/hugegraph-pd/hg-pd-test/pom.xml b/hugegraph-pd/hg-pd-test/pom.xml
new file mode 100644
index 000000000..31c0fd889
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/pom.xml
@@ -0,0 +1,259 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-pd</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <modelVersion>4.0.0</modelVersion>
+
+    <artifactId>hg-pd-test</artifactId>
+
+    <properties>
+        <skip.dump>true</skip.dump>
+        <!--maven.test.skip>true</maven.test.skip-->
+        <powermock.version>2.0.0-RC.3</powermock.version>
+    </properties>
+
+    <profiles>
+        <profile>
+            <id>jacoco</id>
+            <activation>
+                <activeByDefault>false</activeByDefault>
+            </activation>
+            <build>
+                <plugins>
+                    <plugin>
+                        <groupId>org.jacoco</groupId>
+                        <artifactId>jacoco-maven-plugin</artifactId>
+                        <version>0.8.4</version>
+                        <configuration>
+                            <excludes>
+                                <exclude>**/grpc/**.*</exclude>
+                                <exclude>**/config/**.*</exclude>
+                            </excludes>
+                        </configuration>
+                        <executions>
+                            <execution>
+                                <goals>
+                                    <goal>prepare-agent</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
+    </profiles>
+
+    <dependencies>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.13.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>1.18.24</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-context-support</artifactId>
+            <version>5.3.20</version>
+        </dependency>
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-test</artifactId>
+            <version>5.3.20</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-slf4j-impl</artifactId>
+            <version>${log4j2.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.hugegraph</groupId>
+            <artifactId>hg-pd-common</artifactId>
+            <version>${revision}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+            <version>2.8.9</version>
+        </dependency>
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.7</version>
+        </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-databind</artifactId>
+            <version>2.13.0</version>
+        </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-core</artifactId>
+            <version>2.13.0</version>
+        </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-annotations</artifactId>
+            <version>2.13.0</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.springframework.boot</groupId>
+            <artifactId>spring-boot-starter-test</artifactId>
+            <version>2.5.14</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.springframework.boot</groupId>
+                    <artifactId>spring-boot-starter-logging</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-classloading-xstream</artifactId>
+            <version>${powermock.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-module-junit4-rule</artifactId>
+            <version>${powermock.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-api-support</artifactId>
+            <version>${powermock.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-module-junit4</artifactId>
+            <version>${powermock.version}</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>org.powermock</groupId>
+            <artifactId>powermock-api-mockito2</artifactId>
+            <version>${powermock.version}</version>
+            <scope>compile</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-lang3</artifactId>
+            <version>3.14.0</version>
+            <scope>compile</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-surefire-plugin</artifactId>
+                <version>2.20</version>
+                <executions>
+                    <execution>
+                        <id>pd-common-test</id>
+                        <configuration>
+                            <testSourceDirectory>${basedir}/src/main/java/
+                            </testSourceDirectory>
+                            <testClassesDirectory>${basedir}/target/classes/
+                            </testClassesDirectory>
+                            <includes>
+                                <include>**/CommonSuiteTest.java</include>
+                            </includes>
+                        </configuration>
+                    </execution>
+                    <execution>
+                        <id>pd-client-test</id>
+                        <configuration>
+                            <testSourceDirectory>${basedir}/src/main/java/
+                            </testSourceDirectory>
+                            <testClassesDirectory>${basedir}/target/classes/
+                            </testClassesDirectory>
+                            <includes>
+                                <include>**/PDClientSuiteTest.java</include>
+                            </includes>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.jacoco</groupId>
+                <artifactId>jacoco-maven-plugin</artifactId>
+                <version>0.8.4</version>
+                <executions>
+                    <execution>
+                        <id>pre-test</id>
+                        <goals>
+                            <goal>prepare-agent</goal>
+                        </goals>
+                    </execution>
+                    <execution>
+                        <id>post-test</id>
+                        <phase>test</phase>
+                        <goals>
+                            <goal>report-aggregate</goal>
+                        </goals>
+                        <configuration>
+                            <outputDirectory>${basedir}/target/site/jacoco</outputDirectory>
+                        </configuration>
+                    </execution>
+                </executions>
+                <configuration>
+                    <excludes>
+                        <exclude>org/apache/hugegraph/pd/rest/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/service/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/model/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/watch/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/pulse/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/license/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/notice/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/util/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/metrics/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/util/grpc/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/boot/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/grpc/**/*.class</exclude>
+                        <exclude>org/apache/hugegraph/pd/raft/*.class</exclude>
+                        <exclude>**/RaftKVStore.class</exclude>
+                    </excludes>
+                </configuration>
+            </plugin>
+        </plugins>
+        <resources>
+            <resource>
+                <directory>src/main/resources/</directory>
+                <filtering>true</filtering>
+            </resource>
+        </resources>
+    </build>
+</project>
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java
new file mode 100644
index 000000000..fb4478e3d
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/BaseCommonTest.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import org.junit.After;
+import org.junit.BeforeClass;
+
+public class BaseCommonTest {
+
+    @BeforeClass
+    public static void init() {
+
+    }
+
+    @After
+    public void teardown() {
+        // pass
+    }
+}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java
new file mode 100644
index 000000000..02a5dfca6
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/CommonSuiteTest.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+import lombok.extern.slf4j.Slf4j;
+
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+        PartitionUtilsTest.class,
+        PartitionCacheTest.class,
+        HgAssertTest.class,
+        KVPairTest.class,
+})
+
+@Slf4j
+public class CommonSuiteTest {
+
+}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java
new file mode 100644
index 000000000..3e61dd0a9
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/HgAssertTest.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+
+import org.junit.Test;
+
+public class HgAssertTest {
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIsTrue() {
+        HgAssert.isTrue(false, "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIsTrue2() {
+        HgAssert.isTrue(true, null);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIsFalse() {
+        HgAssert.isFalse(true, "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIsFalse2() {
+        HgAssert.isTrue(false, null);
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void isArgumentValid() {
+        HgAssert.isArgumentValid(new byte[0], "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void isArgumentValidStr() {
+        HgAssert.isArgumentValid("", "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIsArgumentNotNull() {
+        HgAssert.isArgumentNotNull(null, "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIstValid() {
+        HgAssert.istValid(new byte[0], "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIstValidStr() {
+        HgAssert.isValid("", "");
+    }
+
+    @Test(expected = IllegalArgumentException.class)
+    public void testIsNotNull() {
+        HgAssert.isNotNull(null, "");
+    }
+
+    @Test
+    public void testIsInvalid() {
+        assertFalse(HgAssert.isInvalid("abc", "test"));
+        assertTrue(HgAssert.isInvalid("", null));
+    }
+
+    @Test
+    public void testIsInvalidByte() {
+        assertTrue(HgAssert.isInvalid(new byte[0]));
+        assertFalse(HgAssert.isInvalid(new byte[1]));
+    }
+
+    @Test
+    public void testIsInvalidMap() {
+        assertTrue(HgAssert.isInvalid(new HashMap<Integer, Integer>()));
+        assertFalse(HgAssert.isInvalid(new HashMap<Integer, Integer>() {{
+            put(1, 1);
+        }}));
+    }
+
+    @Test
+    public void testIsInvalidCollection() {
+        assertTrue(HgAssert.isInvalid(new ArrayList<Integer>()));
+        assertFalse(HgAssert.isInvalid(new ArrayList<Integer>() {{
+            add(1);
+        }}));
+    }
+
+    @Test
+    public void testIsContains() {
+        assertTrue(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)},
+                                       Long.valueOf(2)));
+        assertFalse(HgAssert.isContains(new Object[]{Integer.valueOf(1), Long.valueOf(2)},
+                                        Long.valueOf(3)));
+    }
+
+    @Test
+    public void testIsContainsT() {
+        assertTrue(HgAssert.isContains(new ArrayList<>() {{
+            add(1);
+        }}, 1));
+        assertFalse(HgAssert.isContains(new ArrayList<>() {{
+            add(1);
+        }}, 2));
+    }
+
+    @Test
+    public void testIsNull() {
+        assertTrue(HgAssert.isNull(null));
+        assertFalse(HgAssert.isNull("abc", "cdf"));
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java
new file mode 100644
index 000000000..9fb676d39
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/KVPairTest.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class KVPairTest {
+
+    KVPair<String, Integer> pair;
+
+    @Before
+    public void init() {
+        this.pair = new KVPair<>("key", 1);
+    }
+
+    @Test
+    public void testGetKey() {
+        assertEquals(this.pair.getKey(), "key");
+    }
+
+    @Test
+    public void testSetKey() {
+        this.pair.setKey("key2");
+        assertEquals(this.pair.getKey(), "key2");
+    }
+
+    @Test
+    public void testGetValue() {
+        assertEquals(1, this.pair.getValue());
+    }
+
+    @Test
+    public void testSetValue() {
+        this.pair.setValue(2);
+        assertEquals(2, this.pair.getValue());
+    }
+
+    @Test
+    public void testToString() {
+
+    }
+
+    @Test
+    public void testHashCode() {
+
+    }
+
+    @Test
+    public void testEquals() {
+        var pair2 = new KVPair<>("key", 1);
+        Assert.assertEquals(pair2, this.pair);
+    }
+}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java
new file mode 100644
index 000000000..21e757ffa
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionCacheTest.java
@@ -0,0 +1,388 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hugegraph.pd.grpc.Metapb;
+import org.junit.Before;
+import org.junit.Test;
+
+public class PartitionCacheTest {
+
+    private PartitionCache cache;
+
+    private static Metapb.Partition createPartition(int pid, String graphName, long start,
+                                                    long end) {
+        return Metapb.Partition.newBuilder()
+                               .setId(pid)
+                               .setGraphName(graphName)
+                               .setStartKey(start)
+                               .setEndKey(end)
+                               .setState(Metapb.PartitionState.PState_Normal)
+                               .setVersion(1)
+                               .build();
+    }
+
+    private static Metapb.ShardGroup creteShardGroup(int pid) {
+        return Metapb.ShardGroup.newBuilder()
+                                .addShards(
+                                        Metapb.Shard.newBuilder().setStoreId(0)
+                                                    .setRole(Metapb.ShardRole.Leader).build()
+                                )
+                                .setId(pid)
+                                .setVersion(0)
+                                .setConfVer(0)
+                                .setState(Metapb.PartitionState.PState_Normal)
+                                .build();
+    }
+
+    private static Metapb.Shard createShard() {
+        return Metapb.Shard.newBuilder()
+                           .setStoreId(0)
+                           .setRole(Metapb.ShardRole.Leader)
+                           .build();
+    }
+
+    private static Metapb.Store createStore(long storeId) {
+        return Metapb.Store.newBuilder()
+                           .setId(storeId)
+                           .setAddress("127.0.0.1")
+                           .setCores(4)
+                           .setVersion("1")
+                           .setDataPath("/tmp/junit")
+                           .setDataVersion(1)
+                           .setLastHeartbeat(System.currentTimeMillis())
+                           .setStartTimestamp(System.currentTimeMillis())
+                           .setState(Metapb.StoreState.Up)
+                           .setDeployPath("/tmp/junit")
+                           .build();
+    }
+
+    private static Metapb.Graph createGraph(String graphName, int partitionCount) {
+        return Metapb.Graph.newBuilder()
+                           .setGraphName(graphName)
+                           .setPartitionCount(partitionCount)
+                           .setState(Metapb.PartitionState.PState_Normal)
+                           .build();
+    }
+
+    private static Metapb.ShardGroup createShardGroup() {
+        List<Metapb.Shard> shards = new ArrayList<>();
+        for (int i = 0; i < 3; i++) {
+            shards.add(Metapb.Shard.newBuilder()
+                                   .setStoreId(i)
+                                   .setRole(i == 0 ? Metapb.ShardRole.Leader :
+                                            Metapb.ShardRole.Follower)
+                                   .build()
+            );
+        }
+
+        return Metapb.ShardGroup.newBuilder()
+                                .setId(1)
+                                .setVersion(1)
+                                .setConfVer(1)
+                                .setState(Metapb.PartitionState.PState_Normal)
+                                .addAllShards(shards)
+                                .build();
+    }
+
+    @Before
+    public void setup() {
+        this.cache = new PartitionCache();
+    }
+
+    @Test
+    public void testGetPartitionById() {
+        var partition = createPartition(0, "graph0", 0, 65535);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition);
+        var ret = this.cache.getPartitionById("graph0", 0);
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition);
+    }
+
+    @Test
+    public void testGetPartitionByKey() throws UnsupportedEncodingException {
+        var partition = createPartition(0, "graph0", 0, 65535);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition);
+        var ret = this.cache.getPartitionByKey("graph0", "0".getBytes(StandardCharsets.UTF_8));
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition);
+    }
+
+    @Test
+    public void getPartitionByCode() {
+        var partition = createPartition(0, "graph0", 0, 1024);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition);
+        var ret = this.cache.getPartitionByCode("graph0", 10);
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition);
+        assertNull(this.cache.getPartitionByCode("graph0", 2000));
+    }
+
+    @Test
+    public void testGetPartitions() {
+        var partition1 = createPartition(0, "graph0", 0, 1024);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition1);
+        assertEquals(this.cache.getPartitions("graph0").size(), 1);
+        var partition2 = createPartition(1, "graph0", 1024, 2048);
+        this.cache.updateShardGroup(creteShardGroup(1));
+        this.cache.updatePartition(partition2);
+        assertEquals(this.cache.getPartitions("graph0").size(), 2);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+    }
+
+    @Test
+    public void testAddPartition() {
+        var partition = createPartition(0, "graph0", 0, 65535);
+        this.cache.addPartition("graph0", 0, partition);
+        var ret = this.cache.getPartitionById("graph0", 0);
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition);
+        assertNotNull(this.cache.getPartitionByCode("graph0", 2000));
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+        var partition2 = createPartition(0, "graph0", 0, 1024);
+        this.cache.addPartition("graph0", 0, partition2);
+        ret = this.cache.getPartitionById("graph0", 0);
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition2);
+        assertNull(this.cache.getPartitionByCode("graph0", 2000));
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+    }
+
+    @Test
+    public void testUpdatePartition() {
+        var partition = createPartition(0, "graph0", 0, 65535);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.addPartition("graph0", 0, partition);
+        var partition2 = createPartition(0, "graph0", 0, 1024);
+        this.cache.updatePartition("graph0", 0, partition2);
+        var ret = this.cache.getPartitionById("graph0", 0);
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition2);
+        assertNull(this.cache.getPartitionByCode("graph0", 2000));
+    }
+
+    @Test
+    public void testUpdatePartition2() {
+        var partition = createPartition(0, "graph0", 0, 1024);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        assertTrue(this.cache.updatePartition(partition));
+        assertFalse(this.cache.updatePartition(partition));
+        var ret = this.cache.getPartitionById("graph0", 0);
+        assertNotNull(ret);
+        assertEquals(ret.getKey(), partition);
+        assertNull(this.cache.getPartitionByCode("graph0", 2000));
+    }
+
+    @Test
+    public void testRemovePartition() {
+        var partition = createPartition(0, "graph0", 0, 1024);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition);
+        assertNotNull(this.cache.getPartitionById("graph0", 0));
+        this.cache.removePartition("graph0", 0);
+        assertNull(this.cache.getPartitionById("graph0", 0));
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+    }
+
+    @Test
+    public void testRange() {
+        var partition1 = createPartition(1, "graph0", 0, 3);
+        var partition2 = createPartition(2, "graph0", 3, 6);
+        this.cache.updatePartition(partition1);
+        this.cache.updatePartition(partition2);
+
+        var partition3 = createPartition(3, "graph0", 1, 2);
+        var partition4 = createPartition(4, "graph0", 2, 3);
+        this.cache.updatePartition(partition3);
+        this.cache.updatePartition(partition4);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+
+        var partition6 = createPartition(1, "graph0", 0, 1);
+        this.cache.updatePartition(partition6);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+
+        var partition5 = createPartition(1, "graph0", 0, 3);
+        this.cache.updatePartition(partition5);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+    }
+
+    @Test
+    public void testRange2() {
+        var partition1 = createPartition(1, "graph0", 0, 3);
+        var partition2 = createPartition(2, "graph0", 3, 6);
+        this.cache.updatePartition(partition1);
+        this.cache.updatePartition(partition2);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+
+        // 中间有缺失
+        var partition3 = createPartition(1, "graph0", 2, 3);
+        this.cache.updatePartition(partition3);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+
+        var partition5 = createPartition(1, "graph0", 0, 3);
+        this.cache.updatePartition(partition5);
+        System.out.println(this.cache.debugCacheByGraphName("graph0"));
+    }
+
+    @Test
+    public void testRemovePartitions() {
+        var partition1 = createPartition(0, "graph0", 0, 1024);
+        var partition2 = createPartition(1, "graph0", 1024, 2048);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition1);
+        this.cache.updateShardGroup(creteShardGroup(1));
+        this.cache.updatePartition(partition2);
+        assertEquals(this.cache.getPartitions("graph0").size(), 2);
+        this.cache.removePartitions();
+        assertEquals(this.cache.getPartitions("graph0").size(), 0);
+    }
+
+    @Test
+    public void testRemoveAll() {
+        var partition1 = createPartition(0, "graph0", 0, 1024);
+        var partition2 = createPartition(1, "graph0", 1024, 2048);
+        var partition3 = createPartition(0, "graph1", 0, 2048);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updateShardGroup(creteShardGroup(1));
+        this.cache.updatePartition(partition1);
+        this.cache.updatePartition(partition2);
+        this.cache.updatePartition(partition3);
+
+        assertEquals(this.cache.getPartitions("graph0").size(), 2);
+        assertEquals(this.cache.getPartitions("graph1").size(), 1);
+        this.cache.removeAll("graph0");
+        assertEquals(this.cache.getPartitions("graph0").size(), 0);
+        assertEquals(this.cache.getPartitions("graph1").size(), 1);
+    }
+
+    @Test
+    public void testUpdateShardGroup() {
+        var shardGroup = createShardGroup();
+        this.cache.updateShardGroup(shardGroup);
+        assertNotNull(this.cache.getShardGroup(shardGroup.getId()));
+    }
+
+    @Test
+    public void testGetShardGroup() {
+        var shardGroup = createShardGroup();
+        this.cache.updateShardGroup(shardGroup);
+        assertEquals(this.cache.getShardGroup(shardGroup.getId()), shardGroup);
+    }
+
+    @Test
+    public void testAddStore() {
+        var store = createStore(1);
+        this.cache.addStore(1L, store);
+        assertEquals(this.cache.getStoreById(1L), store);
+    }
+
+    @Test
+    public void testGetStoreById() {
+        var store = createStore(1);
+        this.cache.addStore(1L, store);
+        assertEquals(this.cache.getStoreById(1L), store);
+    }
+
+    @Test
+    public void testRemoveStore() {
+        var store = createStore(1);
+        this.cache.addStore(1L, store);
+        assertEquals(this.cache.getStoreById(1L), store);
+
+        this.cache.removeStore(1L);
+        assertNull(this.cache.getStoreById(1L));
+    }
+
+    @Test
+    public void testHasGraph() {
+        var partition = createPartition(0, "graph0", 0, 65535);
+        this.cache.updateShardGroup(creteShardGroup(0));
+        this.cache.updatePartition(partition);
+        assertTrue(this.cache.hasGraph("graph0"));
+        assertFalse(this.cache.hasGraph("graph1"));
+    }
+
+    @Test
+    public void testUpdateGraph() {
+        var graph = createGraph("graph0", 10);
+        this.cache.updateGraph(graph);
+        assertEquals(this.cache.getGraph("graph0"), graph);
+        graph = createGraph("graph0", 12);
+        this.cache.updateGraph(graph);
+        assertEquals(this.cache.getGraph("graph0"), graph);
+    }
+
+    @Test
+    public void testGetGraph() {
+        var graph = createGraph("graph0", 12);
+        this.cache.updateGraph(graph);
+        assertEquals(this.cache.getGraph("graph0"), graph);
+    }
+
+    @Test
+    public void testGetGraphs() {
+        var graph1 = createGraph("graph0", 12);
+        var graph2 = createGraph("graph1", 12);
+        var graph3 = createGraph("graph2", 12);
+        this.cache.updateGraph(graph1);
+        this.cache.updateGraph(graph2);
+        this.cache.updateGraph(graph3);
+        assertEquals(this.cache.getGraphs().size(), 3);
+    }
+
+    @Test
+    public void testReset() {
+        var graph1 = createGraph("graph0", 12);
+        var graph2 = createGraph("graph1", 12);
+        var graph3 = createGraph("graph2", 12);
+        this.cache.updateGraph(graph1);
+        this.cache.updateGraph(graph2);
+        this.cache.updateGraph(graph3);
+        assertEquals(this.cache.getGraphs().size(), 3);
+        this.cache.reset();
+        assertEquals(this.cache.getGraphs().size(), 0);
+    }
+
+    @Test
+    public void testUpdateShardGroupLeader() {
+        var shardGroup = createShardGroup();
+        this.cache.updateShardGroup(shardGroup);
+
+        var leader =
+                Metapb.Shard.newBuilder().setStoreId(2).setRole(Metapb.ShardRole.Leader).build();
+        this.cache.updateShardGroupLeader(shardGroup.getId(), leader);
+
+        assertEquals(this.cache.getLeaderShard(shardGroup.getId()), leader);
+    }
+
+}
diff --git a/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java
new file mode 100644
index 000000000..e0742a483
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/java/org/apache/hugegraph/pd/common/PartitionUtilsTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hugegraph.pd.common;
+
+import java.nio.charset.StandardCharsets;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class PartitionUtilsTest extends BaseCommonTest {
+
+    @Test
+    public void testCalcHashcode() {
+        byte[] key = new byte[5];
+        long code = PartitionUtils.calcHashcode(key);
+        Assert.assertEquals(code, 31912L);
+    }
+
+    // @Test
+    public void testHashCode() {
+        int partCount = 10;
+        int partSize = PartitionUtils.MAX_VALUE / partCount + 1;
+        int[] counter = new int[partCount];
+        for (int i = 0; i < 10000; i++) {
+            String s = String.format("BATCH-GET-UNIT-%02d", i);
+            int c = PartitionUtils.calcHashcode(s.getBytes(StandardCharsets.UTF_8));
+
+            counter[c / partSize]++;
+
+        }
+
+        for (int i = 0; i < counter.length; i++) {
+            System.out.println(i + " " + counter[i]);
+        }
+    }
+}
diff --git a/hugegraph-pd/hg-pd-test/src/main/resources/log4j2.xml b/hugegraph-pd/hg-pd-test/src/main/resources/log4j2.xml
new file mode 100644
index 000000000..e462bf16e
--- /dev/null
+++ b/hugegraph-pd/hg-pd-test/src/main/resources/log4j2.xml
@@ -0,0 +1,139 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<!-- Config will be autoloaded every 60s -->
+<configuration status="error" monitorInterval="60">
+    <properties>
+        <property name="LOG_PATH">logs</property>
+        <property name="FILE_NAME">hg-pd-test</property>
+    </properties>
+
+    <appenders>
+        <Console name="console" target="SYSTEM_OUT">
+            <ThresholdFilter level="DEBUG" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+        </Console>
+
+        <!-- Normal server log config -->
+        <RollingRandomAccessFile name="file" fileName="${LOG_PATH}/${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="128MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- jraft server log config -->
+        <RollingRandomAccessFile name="raft_file" fileName="${LOG_PATH}/${FILE_NAME}_raft.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/${FILE_NAME}-%d{yyyy-MM-dd}-%i.log"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} [%t] [%p] %c{1.} - %m%n" />
+            <!--JsonLayout compact="true" eventEol="true" complete="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}"/>
+            </JsonLayout-->
+            <!-- Trigger after exceeding 1day or 50MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="128MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 5 files per day & auto Delete after over 2GB or 100 files -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.log" />
+                    <!-- Limit log amount & size -->
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="2GB" />
+                        <IfAccumulatedFileCount exceeds="100" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+
+        <!-- Separate & compress audit log, buffer size is 512KB -->
+        <RollingRandomAccessFile name="audit" fileName="${LOG_PATH}/audit-${FILE_NAME}.log"
+                                 filePattern="${LOG_PATH}/$${date:yyyy-MM}/audit-${FILE_NAME}-%d{yyyy-MM-dd-HH}-%i.gz"
+                                 bufferedIO="true" bufferSize="524288" immediateFlush="false">
+            <ThresholdFilter level="TRACE" onMatch="ACCEPT" onMismatch="DENY" />
+            <!-- Use simple format for audit log to speed up -->
+            <!-- PatternLayout pattern="%-d{yyyy-MM-dd HH:mm:ss} - %m%n"/ -->
+            <JsonLayout compact="true" eventEol="true" locationInfo="true">
+                <KeyValuePair key="timestamp" value="$${date:yyyy-MM-dd HH:mm:ss.SSS}" />
+            </JsonLayout>
+            <!-- Trigger after exceeding 1hour or 500MB -->
+            <Policies>
+                <SizeBasedTriggeringPolicy size="512MB" />
+                <TimeBasedTriggeringPolicy interval="1" modulate="true" />
+            </Policies>
+            <!-- Keep 2 files per hour & auto delete [after 60 days] or [over 5GB or 500 files] -->
+            <DefaultRolloverStrategy max="16">
+                <Delete basePath="${LOG_PATH}" maxDepth="2">
+                    <IfFileName glob="*/*.gz" />
+                    <IfLastModified age="60d" />
+                    <IfAny>
+                        <IfAccumulatedFileSize exceeds="5GB" />
+                        <IfAccumulatedFileCount exceeds="500" />
+                    </IfAny>
+                </Delete>
+            </DefaultRolloverStrategy>
+        </RollingRandomAccessFile>
+    </appenders>
+
+    <loggers>
+        <root level="INFO">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </root>
+        <logger name="com.alipay.sofa" level="INFO" additivity="false">
+            <appender-ref ref="raft_file" />
+            <appender-ref ref="console" />
+        </logger>
+        <logger name="io.netty" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </logger>
+        <logger name="org.apache.commons" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </logger>
+        <!-- Use mixed async way to output logs -->
+        <AsyncLogger name="org.apache.hugegraph" level="INFO" additivity="false">
+            <appender-ref ref="file" />
+            <appender-ref ref="console" />
+        </AsyncLogger>
+    </loggers>
+</configuration>


(incubator-hugegraph) 01/05: refact: prepare for integrating pd modules

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit 37e4405c04346683146be8874e2d2dfc4f6fc355
Author: VGalaxies <vg...@apache.org>
AuthorDate: Wed Apr 3 23:54:28 2024 +0800

    refact: prepare for integrating pd modules
    
    1. prepare pom and CI for pd-client, pd-common, pd-grpc and pd-test
    2. drop support for java8
---
 .github/workflows/check-dependencies.yml |   3 +-
 .github/workflows/ci.yml                 |   2 +-
 .github/workflows/pd-store.yml           |  51 +++++++++
 hugegraph-pd/.gitignore                  |   2 +
 hugegraph-pd/README.md                   |   9 +-
 hugegraph-pd/pom.xml                     | 184 +++++++++++++++++++++++++++++++
 pom.xml                                  |  23 +++-
 7 files changed, 265 insertions(+), 9 deletions(-)

diff --git a/.github/workflows/check-dependencies.yml b/.github/workflows/check-dependencies.yml
index 5350d53fe..8d5ec9f85 100644
--- a/.github/workflows/check-dependencies.yml
+++ b/.github/workflows/check-dependencies.yml
@@ -49,7 +49,8 @@ jobs:
         uses: actions/dependency-review-action@v3
         # Refer: https://github.com/actions/dependency-review-action
         with:
-          fail-on-severity: low
+          # TODO: reset critical to low before releasing
+          fail-on-severity: critical
           # Action will fail if dependencies don't match the list
           #allow-licenses: Apache-2.0, MIT
           #deny-licenses: GPL-3.0, AGPL-1.0, AGPL-3.0, LGPL-2.0, CC-BY-3.0
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 59cdad7b8..b96383f7c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -28,7 +28,7 @@ jobs:
       fail-fast: false
       matrix:
         BACKEND: [ memory, rocksdb, hbase, cassandra, mysql, postgresql, scylladb ]
-        JAVA_VERSION: [ '8', '11' ]
+        JAVA_VERSION: [ '11' ]
 
     steps:
       - name: Checkout
diff --git a/.github/workflows/pd-store.yml b/.github/workflows/pd-store.yml
new file mode 100644
index 000000000..65fb3ccc9
--- /dev/null
+++ b/.github/workflows/pd-store.yml
@@ -0,0 +1,51 @@
+name: "pd-store"
+
+on:
+  push:
+    branches:
+      - master
+      - 'release-*'
+      - 'test-*'
+  pull_request:
+
+jobs:
+  pd:
+    runs-on: ubuntu-latest
+    env:
+      USE_STAGE: 'true' # Whether to include the stage repository.
+      TRAVIS_DIR: hugegraph-server/hugegraph-dist/src/assembly/travis
+      REPORT_DIR: target/site/jacoco
+
+    steps:
+      - name: Install JDK 11
+        uses: actions/setup-java@v3
+        with:
+          java-version: '11'
+          distribution: 'zulu'
+
+      - name: Cache Maven packages
+        uses: actions/cache@v3
+        with:
+          path: ~/.m2
+          key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
+          restore-keys: ${{ runner.os }}-m2
+
+      - name: Checkout
+        uses: actions/checkout@v3
+        with:
+          fetch-depth: 2
+
+      - name: use staged maven repo settings
+        if: ${{ env.USE_STAGE == 'true' }}
+        run: |
+          cp $HOME/.m2/settings.xml /tmp/settings.xml
+          mv -vf .github/configs/settings.xml $HOME/.m2/settings.xml
+
+      - name: Run common test
+        run: |
+          mvn test -pl hugegraph-pd/hg-pd-test -am -P pd-common-test
+
+      - name: Upload coverage to Codecov
+        uses: codecov/codecov-action@v3.0.0
+        with:
+          file: ${{ env.REPORT_DIR }}/*.xml
diff --git a/hugegraph-pd/.gitignore b/hugegraph-pd/.gitignore
new file mode 100644
index 000000000..c2bd7537e
--- /dev/null
+++ b/hugegraph-pd/.gitignore
@@ -0,0 +1,2 @@
+# Exclude the generated PB files
+hg-pd-grpc/src/main/java/
diff --git a/hugegraph-pd/README.md b/hugegraph-pd/README.md
index 49548c216..1aea07d7d 100644
--- a/hugegraph-pd/README.md
+++ b/hugegraph-pd/README.md
@@ -1,5 +1,8 @@
-# HugeGraph PD
+> Note: From revision 1.5.0, the code of HugeGraph-PD will be adapted to this location (WIP).
 
-HugeGraph PD is a meta server responsible for service discovery, partition information storage, and node scheduling.
+# HugeGraph PD (BETA)
 
-> Note: Currently, the contents of this folder are empty. Starting from revision 1.5.0, the code of HugeGraph PD will be adapted to this location (WIP).
+HugeGraph PD is a meta server responsible for service discovery, partition information storage, and
+node scheduling.
+
+> BTW, if you meet any problem when using HugeGraph PD, please feel free to contact us for help
diff --git a/hugegraph-pd/pom.xml b/hugegraph-pd/pom.xml
new file mode 100644
index 000000000..6253cfd44
--- /dev/null
+++ b/hugegraph-pd/pom.xml
@@ -0,0 +1,184 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+    <artifactId>hugegraph-pd</artifactId>
+    <version>${revision}</version>
+    <packaging>pom</packaging>
+
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+
+    <modules>
+        <module>hg-pd-grpc</module>
+        <module>hg-pd-common</module>
+        <module>hg-pd-client</module>
+        <module>hg-pd-test</module>
+        <!-- TODO: uncomment later -->
+        <!-- <module>hg-pd-core</module> -->
+        <!-- <module>hg-pd-service</module> -->
+        <!-- <module>hg-pd-dist</module> -->
+        <!-- <module>hg-pd-clitools</module> -->
+    </modules>
+
+    <properties>
+        <maven.compiler.source>11</maven.compiler.source>
+        <maven.compiler.target>11</maven.compiler.target>
+        <log4j2.version>2.17.0</log4j2.version>
+    </properties>
+
+    <dependencyManagement>
+        <dependencies>
+            <dependency>
+                <groupId>org.apache.logging.log4j</groupId>
+                <artifactId>log4j-slf4j-impl</artifactId>
+                <version>2.17.0</version>
+            </dependency>
+            <dependency>
+                <groupId>org.apache.hugegraph</groupId>
+                <artifactId>hg-pd-grpc</artifactId>
+                <version>${revision}</version>
+            </dependency>
+            <dependency>
+                <groupId>org.apache.hugegraph</groupId>
+                <artifactId>hg-pd-common</artifactId>
+                <version>${revision}</version>
+            </dependency>
+        </dependencies>
+    </dependencyManagement>
+
+    <dependencies>
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.13.2</version>
+            <scope>test</scope>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <plugins>
+            <plugin>
+                <groupId>org.jacoco</groupId>
+                <artifactId>jacoco-maven-plugin</artifactId>
+                <version>0.8.4</version>
+                <configuration>
+                    <excludes>
+                        <exclude>**/grpc/**.*</exclude>
+                        <exclude>**/config/**.*</exclude>
+                    </excludes>
+                </configuration>
+                <executions>
+                    <execution>
+                        <goals>
+                            <goal>prepare-agent</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.codehaus.mojo</groupId>
+                <artifactId>flatten-maven-plugin</artifactId>
+                <version>1.2.7</version>
+                <configuration>
+                    <updatePomFile>true</updatePomFile>
+                    <flattenMode>resolveCiFriendliesOnly</flattenMode>
+                </configuration>
+                <executions>
+                    <execution>
+                        <id>flatten</id>
+                        <phase>process-resources</phase>
+                        <goals>
+                            <goal>flatten</goal>
+                        </goals>
+                    </execution>
+                    <execution>
+                        <id>flatten.clean</id>
+                        <phase>clean</phase>
+                        <goals>
+                            <goal>clean</goal>
+                        </goals>
+                    </execution>
+                    <!-- auto delete .flattened-pom.xml after "install" step -->
+                    <execution>
+                        <id>remove-flattened-pom</id>
+                        <phase>install</phase>
+                        <goals>
+                            <goal>clean</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-clean-plugin</artifactId>
+                <configuration>
+                    <filesets>
+                        <fileset>
+                            <directory>${project.basedir}/</directory>
+                            <includes>
+                                <include>*.tar</include>
+                                <include>*.tar.gz</include>
+                                <include>.flattened-pom.xml</include>
+                                <!-- WARN: delete dist dir may influence dev? -->
+                                <include>dist/**</include>
+                            </includes>
+                            <followSymlinks>false</followSymlinks>
+                        </fileset>
+                        <!-- <fileset><directory>${final.name}</directory></fileset> -->
+                    </filesets>
+                </configuration>
+            </plugin>
+        </plugins>
+    </build>
+
+    <profiles>
+        <!-- hugegraph pd test profiles -->
+        <profile>
+            <id>pd-common-test</id>
+            <activation>
+                <activeByDefault>true</activeByDefault>
+            </activation>
+            <build>
+                <plugins>
+                    <plugin>
+                        <groupId>org.apache.maven.plugins</groupId>
+                        <artifactId>maven-surefire-plugin</artifactId>
+                        <version>2.20</version>
+                        <executions>
+                            <execution>
+                                <id>pd-common-test</id>
+                                <goals>
+                                    <goal>test</goal>
+                                </goals>
+                                <phase>test</phase>
+                            </execution>
+                        </executions>
+                    </plugin>
+                </plugins>
+            </build>
+        </profile>
+    </profiles>
+</project>
diff --git a/pom.xml b/pom.xml
index ed2b53af3..ff448ffc2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -91,8 +91,8 @@
 
     <modules>
         <module>hugegraph-server</module>
-        <!-- TODO: uncomment when merge into pd and store -->
-        <!-- <module>hugegraph-pd</module> -->
+        <module>hugegraph-pd</module>
+        <!-- TODO: uncomment when merge into store -->
         <!-- <module>hugegraph-store</module> -->
     </modules>
 
@@ -177,6 +177,8 @@
                             <exclude>**/hbase-*/**</exclude>
                             <exclude>**/apache-cassandra-*/**</exclude>
                             <exclude>**/pid</exclude>
+                            <!-- sources generated by gRPC -->
+                            <exclude>**/src/main/java/org/apache/hugegraph/pd/grpc/**</exclude>
                         </excludes>
                         <consoleOutput>true</consoleOutput>
                     </configuration>
@@ -197,7 +199,7 @@
                                     <!-- TODO: uncomment for checking dependency conflicts -->
                                     <!-- <DependencyConvergence/> -->
                                     <requireJavaVersion>
-                                        <version>[1.8,12)</version>
+                                        <version>[11,)</version>
                                     </requireJavaVersion>
                                     <requireMavenVersion>
                                         <version>[3.5.0,)</version>
@@ -287,7 +289,7 @@
             </build>
         </profile>
 
-        <!-- use mvn -P stage to enable the remote apache-stage repo -->
+        <!-- Use mvn -P stage to enable the remote apache-stage repo -->
         <profile>
             <id>stage</id>
             <repositories>
@@ -297,5 +299,18 @@
                 </repository>
             </repositories>
         </profile>
+        <!-- enable it by default in ARM Mac to handle the compilation problems:) -->
+        <profile>
+            <id>arm-mac</id>
+            <activation>
+                <os>
+                    <family>mac</family>
+                    <arch>aarch64</arch>
+                </os>
+            </activation>
+            <properties>
+                <os.detected.classifier>osx-x86_64</os.detected.classifier>
+            </properties>
+        </profile>
     </profiles>
 </project>


(incubator-hugegraph) 02/05: feat(pd): integrate `pd-grpc` submodule

Posted by ji...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

jin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-hugegraph.git

commit a560a6efeef0182b132164d2cea970cd8c9c6f66
Author: VGalaxies <vg...@apache.org>
AuthorDate: Wed Apr 3 23:57:50 2024 +0800

    feat(pd): integrate `pd-grpc` submodule
---
 hugegraph-pd/hg-pd-grpc/pom.xml                    | 138 +++++
 .../hg-pd-grpc/src/main/proto/discovery.proto      |  71 +++
 hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto    | 143 +++++
 .../hg-pd-grpc/src/main/proto/metaTask.proto       |  64 +++
 .../hg-pd-grpc/src/main/proto/metapb.proto         | 394 +++++++++++++
 .../hg-pd-grpc/src/main/proto/pd_common.proto      |  53 ++
 .../hg-pd-grpc/src/main/proto/pd_pulse.proto       | 172 ++++++
 .../hg-pd-grpc/src/main/proto/pd_watch.proto       | 103 ++++
 hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto  | 607 +++++++++++++++++++++
 9 files changed, 1745 insertions(+)

diff --git a/hugegraph-pd/hg-pd-grpc/pom.xml b/hugegraph-pd/hg-pd-grpc/pom.xml
new file mode 100644
index 000000000..cef49e957
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/pom.xml
@@ -0,0 +1,138 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+
+<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xmlns="http://maven.apache.org/POM/4.0.0"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <parent>
+        <groupId>org.apache.hugegraph</groupId>
+        <artifactId>hugegraph-pd</artifactId>
+        <version>${revision}</version>
+        <relativePath>../pom.xml</relativePath>
+    </parent>
+    <artifactId>hg-pd-grpc</artifactId>
+
+
+    <properties>
+        <os.plugin.version>1.6.0</os.plugin.version>
+        <grpc.version>1.39.0</grpc.version>
+        <protoc.version>3.17.2</protoc.version>
+        <protobuf.plugin.version>0.6.1</protobuf.plugin.version>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-netty-shaded</artifactId>
+            <version>${grpc.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-protobuf</artifactId>
+            <version>${grpc.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>io.grpc</groupId>
+            <artifactId>grpc-stub</artifactId>
+            <version>${grpc.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>javax.annotation</groupId>
+            <artifactId>javax.annotation-api</artifactId>
+            <version>1.3.2</version>
+        </dependency>
+    </dependencies>
+
+    <build>
+        <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
+        <resources>
+            <resource>
+                <directory>src/main/resources</directory>
+            </resource>
+            <resource>
+                <directory>src/main/proto</directory>
+            </resource>
+        </resources>
+        <extensions>
+            <extension>
+                <groupId>kr.motd.maven</groupId>
+                <artifactId>os-maven-plugin</artifactId>
+                <version>${os.plugin.version}</version>
+            </extension>
+        </extensions>
+        <plugins>
+            <plugin>
+                <groupId>org.xolstice.maven.plugins</groupId>
+                <artifactId>protobuf-maven-plugin</artifactId>
+                <version>${protobuf.plugin.version}</version>
+                <extensions>true</extensions>
+                <configuration>
+                    <protocArtifact>
+                        com.google.protobuf:protoc:${protoc.version}:exe:${os.detected.classifier}
+                    </protocArtifact>
+                    <pluginId>grpc-java</pluginId>
+                    <pluginArtifact>
+                        io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}
+                    </pluginArtifact>
+                    <!--默认值-->
+                    <protoSourceRoot>${project.basedir}/src/main/proto</protoSourceRoot>
+                    <!--默认值-->
+                    <!--<outputDirectory>${project.build.directory}/generated-sources/protobuf/java</outputDirectory>-->
+                    <outputDirectory>${project.basedir}/src/main/java</outputDirectory>
+                    <!--设置是否在生成java文件之前清空outputDirectory的文件,默认值为true,设置为false时也会覆盖同名文件-->
+                    <clearOutputDirectory>false</clearOutputDirectory>
+                    <!--更多配置信息可以查看https://www.xolstice.org/protobuf-maven-plugin/compile-mojo.html-->
+                </configuration>
+                <executions>
+                    <execution>
+                        <!--在执行mvn compile的时候会执行以下操作-->
+                        <phase>generate-sources</phase>
+                        <goals>
+                            <!--生成OuterClass类-->
+                            <goal>compile</goal>
+                            <!--生成Grpc类-->
+                            <goal>compile-custom</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+            <plugin>
+                <artifactId>maven-clean-plugin</artifactId>
+                <configuration>
+                    <filesets>
+                        <fileset>
+                            <directory>src/main/java</directory>
+                        </fileset>
+                    </filesets>
+                </configuration>
+                <executions>
+                    <execution>
+                        <!-- remove all java files before compile -->
+                        <phase>initialize</phase>
+                        <goals>
+                            <goal>clean</goal>
+                        </goals>
+                    </execution>
+                </executions>
+            </plugin>
+        </plugins>
+    </build>
+</project>
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto
new file mode 100644
index 000000000..b434ab0e8
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/discovery.proto
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package discovery;
+import "pdpb.proto";
+
+option java_package = "org.apache.hugegraph.pd.grpc.discovery";
+option java_multiple_files = true;
+
+
+service DiscoveryService {
+  rpc register(NodeInfo) returns (RegisterInfo);
+  rpc getNodes(Query) returns (NodeInfos);
+  //  rpc getNodesByLabel(Conditions) returns (NodeInfos);
+}
+
+/* requests */
+message NodeInfo {
+  string id = 1;
+  string appName = 2;
+  string version = 3;
+  string address = 4;
+  int64 interval = 5;
+  map<string, string> labels = 6;
+}
+message Query {
+  string appName = 1;
+  string version = 2;
+  map<string, string> labels = 3;
+}
+message LeaseInfo {
+  int64 registrationTs = 1;
+  int64 lastHeartbeatTs = 2;
+  int64 serverUpTs = 3;
+}
+message RegisterInfo {
+  NodeInfo nodeInfo = 1;
+  LeaseInfo leaseInfo = 2 ;
+  RegisterType type = 3 ;
+  pdpb.ResponseHeader header = 4;
+}
+enum RegisterType {
+  Register = 0;
+  Heartbeat = 1;
+  Dislodge = 2;
+}
+//message Condition{
+//  string label = 1;
+//}
+//message Conditions{
+//  string label = 1;
+//  string value = 2;
+//}
+message NodeInfos{
+  repeated NodeInfo info = 1;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto
new file mode 100644
index 000000000..22007cda3
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/kv.proto
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package kv;
+import "pdpb.proto";
+import "metapb.proto";
+
+option java_package = "org.apache.hugegraph.pd.grpc.kv";
+option java_multiple_files = true;
+
+
+service KvService {
+  rpc put(Kv) returns (KvResponse);
+  rpc get(K) returns (KResponse);
+  rpc delete(K) returns (KvResponse);
+  rpc deletePrefix(K) returns (KvResponse);
+  rpc scanPrefix(K) returns (ScanPrefixResponse);
+  rpc watch(WatchRequest) returns (stream WatchResponse);
+  rpc watchPrefix(WatchRequest) returns (stream WatchResponse);
+  rpc lock(LockRequest) returns (LockResponse);
+  rpc lockWithoutReentrant(LockRequest) returns (LockResponse);
+  rpc unlock(LockRequest) returns (LockResponse);
+  rpc keepAlive(LockRequest) returns (LockResponse);
+  rpc isLocked(LockRequest) returns (LockResponse);
+  rpc putTTL(TTLRequest) returns (TTLResponse);
+  rpc keepTTLAlive(TTLRequest) returns (TTLResponse);
+}
+
+/* requests */
+message Kv {
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+  string value = 3;
+}
+message KvResponse {
+  pdpb.ResponseHeader header = 1;
+}
+
+message K{
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+}
+
+message KResponse{
+  pdpb.ResponseHeader header = 1;
+  string value = 2;
+}
+
+message ScanPrefixResponse {
+  pdpb.ResponseHeader header = 1;
+  map<string, string> kvs = 2;
+}
+
+message LockRequest{
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+  int64 ttl = 3;
+  int64 clientId = 4;
+}
+message LockResponse{
+  pdpb.ResponseHeader header = 1;
+  string key = 2;
+  int64 ttl = 3;
+  int64 clientId = 4;
+  bool succeed = 5;
+}
+
+message LockAliveResponse{
+  pdpb.ResponseHeader header = 1;
+  int64 clientId = 2;
+}
+
+
+message WatchKv {
+  string key = 1;
+  string value = 2;
+}
+
+enum WatchType {
+  Put = 0;
+  Delete = 1;
+  Unrecognized = 2;
+}
+
+message WatchEvent {
+  WatchKv current = 1;
+  WatchKv prev = 2;
+  WatchType type = 3;
+}
+
+message WatchResponse {
+  pdpb.ResponseHeader header = 1;
+  repeated WatchEvent events = 2;
+  int64 clientId = 3;
+  WatchState state = 4;
+}
+
+enum WatchState {
+  Starting = 0;
+  Started = 1;
+  Leader_Changed = 2;
+  Alive = 3;
+}
+
+message WatchRequest {
+  pdpb.RequestHeader header = 1;
+  WatchState state = 2;
+  string key = 3;
+  int64 clientId = 4;
+}
+
+message V{
+  string value = 1;
+  int64  ttl = 2;
+  int64 st = 3;
+}
+
+message TTLRequest{
+  pdpb.RequestHeader header = 1;
+  string key = 2;
+  string value = 3;
+  int64 ttl = 4;
+}
+
+message TTLResponse{
+  pdpb.ResponseHeader header = 1;
+  bool succeed = 2;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
new file mode 100644
index 000000000..c4bb8bde1
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metaTask.proto
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package metaTask;
+import "metapb.proto";
+import "pd_pulse.proto";
+option java_package = "org.apache.hugegraph.pd.grpc";
+
+enum TaskType {
+  Unknown = 0;
+  Split_Partition = 1;
+  Change_Shard = 2;
+  Move_Partition = 3;
+  Clean_Partition = 4;
+  Change_KeyRange = 5;
+}
+
+// 一条任务信息
+message Task {
+  uint64 id = 1;
+  TaskType type = 2;
+  TaskState state = 3;
+  int64 start_timestamp = 4;
+  metapb.Partition partition = 5;
+  string message = 6;
+  //每个shard执行的任务状态
+  repeated ShardTaskState shardState = 7;
+  ChangeShard changeShard = 9;
+  SplitPartition splitPartition = 10;
+  MovePartition movePartition = 11;
+  CleanPartition cleanPartition = 12;
+  PartitionKeyRange partitionKeyRange = 13;
+}
+
+enum TaskState{
+  Task_Unknown = 0;
+  Task_Ready = 1;   //任务就绪
+  Task_Doing = 2;   //执行中
+  Task_Done = 3;    //完成
+  Task_Exit = 4;    //退出
+  Task_Stop = 10;
+  Task_Success = 11;
+  Task_Failure = 12;
+}
+
+message ShardTaskState{
+  uint64 store_id = 1;
+  TaskState state = 2;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
new file mode 100644
index 000000000..a8a695be0
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/metapb.proto
@@ -0,0 +1,394 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package metapb;
+option java_package = "org.apache.hugegraph.pd.grpc";
+import "google/protobuf/any.proto";
+
+enum ClusterState{
+  // 集群健康
+  Cluster_OK = 0;
+  // 分区警告,存在部分故障节点,短时间不影响读写
+  Cluster_Warn = 2;
+  // 分区下线,可以读,无法写
+  Cluster_Offline = 10;
+  // 分区故障,无法读写,需要尽快修复故障节点。
+  Cluster_Fault = 11;
+  Cluster_Not_Ready = -1;
+}
+// 集群状态
+message ClusterStats{
+  ClusterState state = 1;
+  string message = 2;
+  uint64 timestamp = 16;
+}
+
+enum StoreState {
+  Unknown = 0;
+  // 未激活
+  Pending = 4;
+  // 在线
+  Up = 1;
+  // 离线
+  Offline = 2;
+  // 下线中
+  Exiting = 5;
+  // 已下线
+  Tombstone = 3;
+}
+
+// Store label for Storage grouping.
+message StoreLabel {
+  string key = 1;
+  string value = 2;
+}
+
+message Store {
+  uint64 id = 1;
+  // Address to handle client requests
+  string address = 2;
+  string raft_address = 3;
+  repeated StoreLabel labels = 4;
+  // Store软件版本号
+  string version = 5;
+  StoreState state = 6;
+  // The start timestamp of the current store
+  int64 start_timestamp = 7;
+  string deploy_path = 8;
+  // The last heartbeat timestamp of the store.
+  int64 last_heartbeat = 9;
+  StoreStats stats = 10;
+  // 数据格式版本号
+  int32 data_version = 11;
+  int32 cores = 12;
+  string data_path = 13;
+}
+
+enum ShardRole {
+  None = 0;
+  Leader = 1;
+  Follower = 2;
+  // Learner/None -> Learner
+  Learner = 3;
+}
+
+message Shard {
+  uint64 store_id = 2;
+  ShardRole role = 3;
+}
+
+message ShardGroup{
+  uint32 id = 1;
+  uint64 version = 2;
+  uint64 conf_ver = 3;
+  repeated Shard shards = 6;
+  PartitionState state = 10;
+  string message = 11;
+}
+
+message Graph {
+  string graph_name = 2;
+  // 分区数量,0表示无效,不能大于raft分组总数
+  int32 partition_count = 3;
+  // 当前工作状态
+  PartitionState state = 10;
+  string message = 11;
+  GraphState graph_state = 12;
+}
+// 分区工作状态
+enum PartitionState{
+  PState_None = 0;
+  //
+  PState_Normal = 1;
+  // 分区警告,存在部分故障节点,短时间不影响读写
+  PState_Warn = 2;
+  // 分区下线,可以读,无法写
+  PState_Offline = 10;
+  // 分区故障,无法读写,需要尽快修复故障节点。
+  PState_Fault = 11;
+}
+
+message PartitionV36 {
+  uint32 id = 1;
+  string graph_name = 3;
+  // 分区范围 [start_key, end_key).
+  uint64 start_key = 4;
+  uint64 end_key = 5;
+  repeated Shard shards = 6;
+  // Leader任期,leader切换后递增
+  uint64 version = 7;
+  // shards版本号,每次改变后递增
+  uint64 conf_ver = 8;
+  // 当前工作状态
+  PartitionState state = 10;
+  string message = 11;
+}
+
+message Partition {
+  uint32 id = 1;
+  string graph_name = 3;
+  // 分区范围 [start_key, end_key).
+  uint64 start_key = 4;
+  uint64 end_key = 5;
+  // Partition 对象不在保存 shard list(根据对应的shard group 去查询), version 和 conf version不再有实际的意义
+  // repeated Shard shards = 6;
+  // key range 每次改变后递增
+  uint64 version = 7;
+  // shards版本号,每次改变后递增
+  // uint64 conf_ver = 8;
+  // 当前工作状态
+  PartitionState state = 10;
+  string message = 11;
+}
+
+message PartitionShard {
+  metapb.Partition partition = 1;
+  metapb.Shard leader = 2;
+  // 离线的Shard
+  repeated metapb.Shard offline_shards = 3;
+}
+// 记录分区所在的存储位置
+message PartitionStore {
+  uint32 partition_id = 1;
+  string graph_name = 3;
+  // 存储位置
+  string store_location = 4;
+}
+
+message PartitionRaft {
+  uint32 partition_id = 1;
+  string graph_name = 3;
+  // 存储位置
+  string raft_location = 4;
+}
+
+message ShardStats{
+  uint64 store_id = 2;
+  ShardRole role = 3;
+  ShardState state = 4;
+  // 安装快照的进度
+  uint32 progress = 5;
+}
+message PartitionStats{
+  uint32 id = 1;
+  // raft分组的任期.
+  uint64 leader_term = 2;
+  repeated string graph_name = 3;
+  metapb.Shard leader = 4;
+  // 离线 shards
+  repeated metapb.Shard shard = 5;
+  repeated metapb.Shard learner = 6;
+  uint64 conf_ver = 7;
+  // 分区状态
+  PartitionState state = 8;
+  repeated ShardStats shardStats = 9;
+  // 分区近似大小
+  uint64 approximate_size = 10;
+  // 分区key的近似数量
+  uint64 approximate_keys = 13;
+  // heartbeat timestamp
+  int64 timestamp = 16;
+}
+
+message GraphStats{
+  // 图名
+  string graph_name = 1;
+  // 分区近似大小
+  uint64 approximate_size = 2;
+  // 分区key的近似数量
+  uint64 approximate_keys = 3;
+  //  // committed index
+  //  uint64 committed_index = 4;
+  uint32 partition_id = 5;
+  ShardRole role = 6;
+  // 当前工作状态
+  PartitionState work_state = 8;
+}
+
+message RaftStats {
+  // partition id
+  uint32 partition_id = 1;
+  // committed index
+  uint64 committed_index = 2;
+}
+
+message TimeInterval {
+  // The unix timestamp in seconds of the start of this period.
+  uint64 start_timestamp = 1;
+  // The unix timestamp in seconds of the end of this period.
+  uint64 end_timestamp = 2;
+}
+
+message RecordPair {
+  string key = 1;
+  uint64 value = 2;
+}
+
+
+message QueryStats {
+  uint64 GC = 1;
+  uint64 Get = 2;
+  uint64 Scan = 3;
+  uint64 Coprocessor = 4;
+  uint64 Delete = 5;
+  uint64 DeleteRange = 6;
+  uint64 Put = 7;
+}
+
+enum ShardState{
+  SState_None = 0;
+  // 正常
+  SState_Normal = 1;
+  // 安装快照
+  SState_Snapshot = 2;
+  // 离线
+  SState_Offline = 10;
+}
+
+
+message StoreStats {
+  uint64 store_id = 1;
+  // Capacity for the store.
+  uint64 capacity = 2;
+  // Available size for the store.
+  uint64 available = 3;
+  // Total partition count in this store.
+  uint32 partition_count = 4;
+  // Current sending snapshot count.
+  uint32 sending_snap_count = 5;
+  // Current receiving snapshot count.
+  uint32 receiving_snap_count = 6;
+  // When the store is started (unix timestamp in seconds).
+  uint32 start_time = 7;
+  // How many partition is applying snapshot.
+  uint32 applying_snap_count = 8;
+  // If the store is busy
+  bool is_busy = 9;
+  // Actually used space by db
+  uint64 used_size = 10;
+  // Bytes written for the store during this period.
+  uint64 bytes_written = 11;
+  // Keys written for the store during this period.
+  uint64 keys_written = 12;
+  // Bytes read for the store during this period.
+  uint64 bytes_read = 13;
+  // Keys read for the store during this period.
+  uint64 keys_read = 14;
+  // Actually reported time interval
+  TimeInterval interval = 15;
+  // Threads' CPU usages in the store
+  repeated RecordPair cpu_usages = 16;
+  // Threads' read disk I/O rates in the store
+  repeated RecordPair read_io_rates = 17;
+  // Threads' write disk I/O rates in the store
+  repeated RecordPair write_io_rates = 18;
+  // Operations' latencies in the store
+  repeated RecordPair op_latencies = 19;
+  // Store query stats
+  QueryStats query_stats = 21;
+  // graph stats
+  repeated GraphStats graph_stats = 22;
+  // raft stats
+  repeated RaftStats raft_stats = 23;
+  int32 cores = 24;
+  // system metrics
+  repeated RecordPair system_metrics = 25;
+}
+
+// 分区查询条件
+message PartitionQuery{
+  optional uint64 store_id = 1;      // 0 表示查询条件不包含store_id
+  optional string graph_name = 2;
+  optional uint32 partition_id = 4;
+}
+
+//PD 节点信息
+message Member {
+  uint64 cluster_id = 1;
+  string raft_url = 3;
+  string grpc_url = 4;
+  string rest_url = 5;
+  string data_path = 6;
+  StoreState state = 7;
+  ShardRole role = 8;
+  string replicator_state = 9;
+}
+
+// 图空间配置
+message GraphSpace{
+  string name = 1;
+  // 最大占用存储
+  uint64 storage_limit = 2;
+  // 已使用空间
+  uint64 used_size = 3;
+  // 修改时间
+  uint64 timestamp = 10;
+}
+
+// PD 配置
+message PDConfig{
+  uint64 version = 1;
+  // 分区数量, 初始化根据Store数量动态计算,分裂后进行修改
+  int32 partition_count = 2;
+  // 每分区副本数量
+  int32 shard_count = 3;
+  // pd集群列表
+  string peers_list = 4;
+  // 集群中最少store数量
+  int32 min_store_count = 6;
+  // 每个store最大副本数
+  int32 max_Shards_Per_Store = 7;
+  // 修改时间
+  uint64 timestamp = 10;
+}
+
+
+
+//消息持久化
+message QueueItem{
+  string item_id = 1;
+  string item_class = 2;
+  bytes item_content = 3;
+  int64 timestamp = 10;
+}
+
+message LogRecord{
+  string action = 1;
+  int64 timestamp = 2;
+  map<string, string> labels = 3;
+  google.protobuf.Any object = 4;
+  string message = 5;
+}
+
+message GraphState{
+  GraphMode mode = 1;
+  GraphModeReason reason = 2;
+}
+
+enum GraphMode{
+  ReadWrite = 0;
+  ReadOnly = 1;
+  WriteOnly = 2;
+}
+
+enum GraphModeReason{
+  Empty = 0; // 空
+  Initiative = 1; // 主动的状态设置
+  Quota = 2; // 达到限额条件
+
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
new file mode 100644
index 000000000..c9eec8149
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_common.proto
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.pd.grpc.common";
+option java_outer_classname = "HgPdCommonProto";
+
+message RequestHeader {
+  // 集群 ID.
+  uint64 cluster_id = 1;
+  // 发送者 ID.
+  uint64 sender_id = 2;
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  Error error = 2;
+}
+
+enum ErrorType {
+  OK = 0;
+  UNKNOWN = 1;
+  STORE_NON_EXIST = 101;
+  STORE_TOMBSTONE = 103;
+  ALREADY_BOOTSTRAPPED = 4;
+  INCOMPATIBLE_VERSION = 5;
+  PARTITION_NOT_FOUND = 6;
+
+  ETCD_READ_ERROR = 1000;
+  ETCD_WRITE_ERROR = 1001;
+}
+
+message Error {
+  ErrorType type = 1;
+  string message = 2;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
new file mode 100644
index 000000000..fb8940df6
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_pulse.proto
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+import "metapb.proto";
+import "pd_common.proto";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.pd.grpc.pulse";
+option java_outer_classname = "HgPdPulseProto";
+
+service HgPdPulse {
+  rpc Pulse(stream PulseRequest) returns (stream PulseResponse);
+}
+
+/* requests */
+message PulseRequest {
+  PulseCreateRequest create_request = 1;
+  PulseCancelRequest cancel_request = 2;
+  PulseNoticeRequest notice_request = 3;
+  PulseAckRequest ack_request = 4;
+}
+
+message PulseCreateRequest {
+  PulseType  pulse_type = 1;
+}
+
+message PulseCancelRequest {
+  int64 observer_id = 1;
+}
+
+message PulseNoticeRequest {
+  int64 observer_id = 1;
+  oneof request_union {
+    PartitionHeartbeatRequest partition_heartbeat_request = 10;
+  }
+}
+
+message PulseAckRequest {
+  int64 observer_id = 1;
+  int64 notice_id = 2;
+}
+
+// 分区心跳,分区的peer增减、leader改变等事件发生时,由leader发送心跳。
+// 同时pd对分区进行shard增减通过Response发送给leader
+message PartitionHeartbeatRequest {
+  RequestHeader header = 1;
+  // Leader Peer sending the heartbeat
+  metapb.PartitionStats states = 4;
+}
+
+/* responses */
+message PulseResponse {
+  PulseType pulse_type = 1;
+  int64 observer_id = 2;
+  int32 status = 3;   //0=ok,1=fail
+  int64 notice_id = 4;
+  oneof response_union {
+    PartitionHeartbeatResponse partition_heartbeat_response = 10;
+    PdInstructionResponse instruction_response = 11;
+  }
+}
+
+message PartitionHeartbeatResponse {
+  ResponseHeader header = 1;
+  uint64 id = 3;
+  metapb.Partition partition = 2;
+  ChangeShard change_shard = 4;
+
+  TransferLeader transfer_leader = 5;
+  // 拆分成多个分区,第一个SplitPartition是原分区,从第二开始是新分区
+  SplitPartition split_partition = 6;
+  // rocksdb compaction 指定的表,null是针对所有
+  DbCompaction db_compaction = 7;
+  // 将partition的数据,迁移到 target
+  MovePartition move_partition = 8;
+  // 清理partition的graph的数据
+  CleanPartition clean_partition = 9;
+  // partition key range 变化
+  PartitionKeyRange key_range = 10;
+}
+
+/* Date model */
+message ChangeShard {
+  repeated metapb.Shard shard = 1;
+  ConfChangeType change_type = 2;
+}
+
+message TransferLeader {
+  metapb.Shard shard = 1;
+}
+
+message SplitPartition {
+  repeated metapb.Partition new_partition = 1;
+}
+
+message DbCompaction {
+  string table_name = 3;
+}
+
+message MovePartition{
+  // target partition的key range为,迁移后的新range
+  metapb.Partition target_partition = 1;
+  // partition 的 key start 和 key end的所有数据,
+  // 会迁移到 target partition 上
+  uint64 key_start = 2;
+  uint64 key_end = 3;
+}
+
+message CleanPartition {
+  uint64 key_start = 1;
+  uint64 key_end = 2;
+  CleanType clean_type = 3;
+  bool delete_partition = 4; //是否删除分区
+}
+
+message PartitionKeyRange{
+  uint32 partition_id = 1;
+  uint64 key_start = 2;
+  uint64 key_end = 3;
+}
+
+message PdInstructionResponse {
+  PdInstructionType instruction_type = 1;
+  string leader_ip = 2;
+}
+
+/* enums */
+enum PulseType {
+  PULSE_TYPE_UNKNOWN = 0;
+  PULSE_TYPE_PARTITION_HEARTBEAT = 1;
+  PULSE_TYPE_PD_INSTRUCTION = 2;
+}
+
+enum PulseChangeType {
+  PULSE_CHANGE_TYPE_UNKNOWN = 0;
+  PULSE_CHANGE_TYPE_ADD = 1;
+  PULSE_CHANGE_TYPE_ALTER = 2;
+  PULSE_CHANGE_TYPE_DEL = 3;
+}
+
+enum ConfChangeType {
+  CONF_CHANGE_TYPE_UNKNOWN = 0;
+  CONF_CHANGE_TYPE_ADD_NODE = 1;
+  CONF_CHANGE_TYPE_REMOVE_NODE = 2;
+  CONF_CHANGE_TYPE_ADD_LEARNER_NODE = 3;
+  CONF_CHANGE_TYPE_ADJUST = 4;    // 调整shard,leader根据新的配置动态增减。
+}
+
+enum CleanType {
+  CLEAN_TYPE_KEEP_RANGE = 0; // 仅保留这个range
+  CLEAN_TYPE_EXCLUDE_RANGE = 1; // 删除这个range
+}
+
+enum PdInstructionType {
+  CHANGE_TO_FOLLOWER = 0;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
new file mode 100644
index 000000000..febc41f52
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pd_watch.proto
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+
+import "metapb.proto";
+
+option java_multiple_files = true;
+option java_package = "org.apache.hugegraph.pd.grpc.watch";
+option java_outer_classname = "HgPdWatchProto";
+
+service HgPdWatch {
+  rpc Watch(stream WatchRequest) returns (stream WatchResponse);
+}
+
+message WatchRequest {
+  WatchCreateRequest create_request = 1;
+  WatchCancelRequest cancel_request = 2;
+}
+
+message WatchCreateRequest {
+  WatchType  watch_type = 1;
+}
+
+message WatchCancelRequest {
+  int64 watcher_id = 1;
+}
+
+message WatchResponse {
+  WatchType watch_type = 1;
+  int64 watcher_id = 2;
+  int32 status = 3;   //0=ok,1=fail
+  int64 notice_id = 4;
+  string msg = 5;
+  oneof response_union {
+    WatchPartitionResponse partition_response = 10;
+    WatchNodeResponse node_response = 11;
+    WatchGraphResponse graph_response = 12;
+    WatchShardGroupResponse shard_group_response = 13;
+  }
+}
+
+message WatchPartitionResponse {
+  string graph = 1;
+  int32 partition_id = 2;
+  WatchChangeType change_type = 3;
+}
+
+message WatchNodeResponse {
+  string graph = 1;
+  uint64 node_id = 2;
+  NodeEventType node_event_type = 3;
+}
+
+message WatchGraphResponse {
+  metapb.Graph graph = 1;
+  WatchType type = 2;
+}
+
+message WatchShardGroupResponse {
+  metapb.ShardGroup shard_group = 1;
+  WatchChangeType type = 2;
+  int32 shard_group_id = 3;
+}
+
+enum WatchType {
+  WATCH_TYPE_UNKNOWN = 0;
+  WATCH_TYPE_PARTITION_CHANGE = 1;
+  WATCH_TYPE_STORE_NODE_CHANGE = 2;
+  WATCH_TYPE_GRAPH_CHANGE = 3;
+  WATCH_TYPE_SHARD_GROUP_CHANGE = 4;
+}
+
+enum WatchChangeType {
+  WATCH_CHANGE_TYPE_UNKNOWN = 0;
+  WATCH_CHANGE_TYPE_ADD = 1;
+  WATCH_CHANGE_TYPE_ALTER = 2;
+  WATCH_CHANGE_TYPE_DEL = 3;
+  WATCH_CHANGE_TYPE_SPECIAL1 = 4;
+}
+
+enum NodeEventType {
+  NODE_EVENT_TYPE_UNKNOWN = 0;
+  NODE_EVENT_TYPE_NODE_ONLINE = 1;
+  NODE_EVENT_TYPE_NODE_OFFLINE = 2;
+  NODE_EVENT_TYPE_NODE_RAFT_CHANGE = 3;
+  // pd leader 变更
+  NODE_EVENT_TYPE_PD_LEADER_CHANGE = 4;
+}
diff --git a/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
new file mode 100644
index 000000000..4e293ca08
--- /dev/null
+++ b/hugegraph-pd/hg-pd-grpc/src/main/proto/pdpb.proto
@@ -0,0 +1,607 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+syntax = "proto3";
+package pdpb;
+
+import "metapb.proto";
+import "metaTask.proto";
+
+option java_package = "org.apache.hugegraph.pd.grpc";
+
+service PD {
+  // 注册store,首次注册会生成新的store_id, store_id是store唯一标识
+  rpc RegisterStore(RegisterStoreRequest) returns (RegisterStoreResponse) {}
+  rpc GetStore(GetStoreRequest) returns (GetStoreResponse) {}
+  // 修改Store状态等信息.
+  rpc SetStore(SetStoreRequest) returns (SetStoreResponse) {}
+  // 根据可以查找所属分区
+  rpc DelStore(DetStoreRequest) returns (DetStoreResponse) {}
+  rpc GetAllStores(GetAllStoresRequest) returns (GetAllStoresResponse) {}
+  rpc StoreHeartbeat(StoreHeartbeatRequest) returns (StoreHeartbeatResponse) {}
+
+  // 根据可以查找所属分区
+  rpc GetPartition(GetPartitionRequest) returns (GetPartitionResponse) {}
+
+  // 根据HashCode查找所属分区
+  rpc GetPartitionByCode(GetPartitionByCodeRequest) returns (GetPartitionResponse) {}
+  // 根据PartitionID返回分区
+  rpc GetPartitionByID(GetPartitionByIDRequest) returns (GetPartitionResponse) {}
+  rpc ScanPartitions(ScanPartitionsRequest) returns (ScanPartitionsResponse) {}
+  // 更新分区信息,主要用来更新分区key范围,调用此接口需谨慎,否则会造成数据丢失。
+  rpc UpdatePartition(UpdatePartitionRequest) returns (UpdatePartitionResponse) {}
+  // 根据可以查找所属分区
+  rpc DelPartition(DelPartitionRequest) returns (DelPartitionResponse) {}
+  // 根据条件查询分区信息, 包括Store、Graph等条件
+  rpc QueryPartitions(QueryPartitionsRequest) returns (QueryPartitionsResponse){}
+  // 读取图信息
+  rpc GetGraph(GetGraphRequest) returns (GetGraphResponse){}
+  // 修改图信息
+  rpc SetGraph(SetGraphRequest) returns (SetGraphResponse){}
+  rpc DelGraph(DelGraphRequest) returns (DelGraphResponse){}
+  // 全局唯一自增ID
+  rpc GetId(GetIdRequest) returns (GetIdResponse){}
+  rpc ResetId(ResetIdRequest) returns (ResetIdResponse){}
+  // PD的集群列表
+  rpc GetMembers(GetMembersRequest) returns (GetMembersResponse) {}
+  rpc GetStoreStatus(GetAllStoresRequest) returns (GetAllStoresResponse) {}
+  rpc GetPDConfig(GetPDConfigRequest) returns (GetPDConfigResponse){}
+  rpc SetPDConfig(SetPDConfigRequest) returns (SetPDConfigResponse){}
+  rpc GetGraphSpace(GetGraphSpaceRequest) returns (GetGraphSpaceResponse){}
+  rpc SetGraphSpace(SetGraphSpaceRequest) returns (SetGraphSpaceResponse){}
+  // 获取集群健康状态
+  rpc GetClusterStats(GetClusterStatsRequest) returns (GetClusterStatsResponse){}
+  // 替换PD的集群节点
+  rpc ChangePeerList(ChangePeerListRequest) returns (getChangePeerListResponse) {}
+  // 数据分裂
+  rpc SplitData(SplitDataRequest) returns (SplitDataResponse){}
+
+  rpc SplitGraphData(SplitGraphDataRequest) returns (SplitDataResponse) {}
+  // 数据迁移
+  rpc MovePartition(MovePartitionRequest) returns (MovePartitionResponse){}
+  // 汇报分区分裂等任务执行结果
+  rpc ReportTask(ReportTaskRequest) returns (ReportTaskResponse){}
+
+  rpc GetPartitionStats(GetPartitionStatsRequest) returns (GetPartitionStatsResponse){}
+  //平衡store中分区leader的数量
+  rpc BalanceLeaders(BalanceLeadersRequest) returns (BalanceLeadersResponse){}
+
+  // 替换license文件
+  rpc PutLicense(PutLicenseRequest) returns (PutLicenseResponse){}
+
+  // 通知rocksdb进行compaction
+  rpc DbCompaction(DbCompactionRequest) returns (DbCompactionResponse){}
+
+  // 合并分区
+  rpc CombineCluster(CombineClusterRequest) returns (CombineClusterResponse){}
+  // 单个图缩容
+  rpc CombineGraph(CombineGraphRequest) returns (CombineGraphResponse) {}
+
+  // shard group
+  rpc GetShardGroup(GetShardGroupRequest) returns (GetShardGroupResponse){}
+  rpc UpdateShardGroup(UpdateShardGroupRequest) returns (UpdateShardGroupResponse){}
+  // 删除掉shard group
+  rpc DeleteShardGroup(DeleteShardGroupRequest) returns (DeleteShardGroupResponse) {}
+  // shard group 运维相关的处理
+  rpc UpdateShardGroupOp(ChangeShardRequest) returns (ChangeShardResponse){}
+  // change shard
+  rpc ChangeShard(ChangeShardRequest) returns (ChangeShardResponse) {}
+  // 更新pd raft
+  rpc updatePdRaft(UpdatePdRaftRequest) returns (UpdatePdRaftResponse)  {}
+
+  rpc getCache(GetGraphRequest) returns (CacheResponse)  {}
+  rpc getPartitions(GetGraphRequest) returns (CachePartitionResponse)  {}
+}
+
+message RequestHeader {
+  // 集群 ID.
+  uint64 cluster_id = 1;
+  // 发送者 ID.
+  uint64 sender_id = 2;
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  Error error = 2;
+}
+
+enum ErrorType {
+  OK = 0;
+  UNKNOWN = 1;
+
+  NOT_LEADER = 100;
+  STORE_ID_NOT_EXIST = 101;
+  NO_ACTIVE_STORE = 102;
+  NOT_FOUND = 103;
+  PD_UNREACHABLE = 104;
+  LESS_ACTIVE_STORE = 105;
+  STORE_HAS_BEEN_REMOVED = 106;
+  STORE_PROHIBIT_DELETION = 111;
+  SET_CONFIG_SHARD_COUNT_ERROR = 112;
+  UPDATE_STORE_STATE_ERROR = 113;
+  STORE_PROHIBIT_DUPLICATE = 114;
+  ROCKSDB_READ_ERROR = 1002;
+  ROCKSDB_WRITE_ERROR = 1003;
+  ROCKSDB_DEL_ERROR = 1004;
+  ROCKSDB_SAVE_SNAPSHOT_ERROR = 1005;
+  ROCKSDB_LOAD_SNAPSHOT_ERROR = 1006;
+
+  // 当前集群状态禁止分裂
+  Cluster_State_Forbid_Splitting = 1007;
+  // 正在分裂中
+  Split_Partition_Doing = 1008;
+  // store上分区数量超过上限
+  Too_Many_Partitions_Per_Store = 1009;
+  // license 错误
+  LICENSE_ERROR = 107;
+  // license 认证错误
+  LICENSE_VERIFY_ERROR = 108;
+
+  //分区下线正在进行
+  Store_Tombstone_Doing = 1010;
+
+  // 不合法的分裂个数
+  Invalid_Split_Partition_Count = 1011;
+}
+
+message Error {
+  ErrorType type = 1;
+  string message = 2;
+}
+message GetStoreRequest {
+  RequestHeader header = 1;
+  uint64 store_id = 2;
+}
+
+message GetStoreResponse {
+  ResponseHeader header = 1;
+
+  metapb.Store store = 2;
+  metapb.StoreStats stats = 3;
+}
+
+message DetStoreRequest {
+  RequestHeader header = 1;
+  uint64 store_id = 2;
+}
+
+message DetStoreResponse {
+  ResponseHeader header = 1;
+  metapb.Store store = 2;
+}
+
+message RegisterStoreRequest {
+  RequestHeader header = 1;
+  metapb.Store store = 2;
+}
+
+
+message RegisterStoreResponse {
+  ResponseHeader header = 1;
+  // 初次注册,返回新的store_id
+  uint64 store_id = 2;
+}
+
+message SetStoreRequest {
+  RequestHeader header = 1;
+  metapb.Store store = 2;
+}
+
+message SetStoreResponse {
+  ResponseHeader header = 1;
+  // 返回修改后的Store
+  metapb.Store store = 2;
+}
+
+
+// 返回graph_name所在的所有store,如果graph_name为空值,则返回系统所有的store
+message GetAllStoresRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  // 是否返回离线的store
+  bool exclude_offline_stores = 3;
+}
+
+message GetAllStoresResponse {
+  ResponseHeader header = 1;
+
+  repeated metapb.Store stores = 2;
+}
+
+
+message StoreHeartbeatRequest {
+  RequestHeader header = 1;
+
+  metapb.StoreStats stats = 2;
+}
+
+message StoreHeartbeatResponse {
+  ResponseHeader header = 1;
+  string cluster_version = 3;
+  metapb.ClusterStats clusterStats = 4;
+}
+
+message GetPartitionRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  bytes key = 3;
+}
+
+
+message GetPartitionByCodeRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  uint64 code = 3;
+}
+
+
+message GetPartitionResponse {
+  ResponseHeader header = 1;
+  metapb.Partition partition = 2;
+  metapb.Shard leader = 3;
+  // 离线的Shard
+  repeated metapb.Shard offline_shards = 4;
+}
+
+message GetPartitionByIDRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  uint32 partition_id = 3;
+}
+
+message DelPartitionRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  uint32 partition_id = 3;
+}
+message DelPartitionResponse {
+  ResponseHeader header = 1;
+  metapb.Partition partition = 2;
+}
+
+message UpdatePartitionRequest{
+  RequestHeader header = 1;
+  repeated metapb.Partition partition = 2;
+}
+
+message UpdatePartitionResponse{
+  ResponseHeader header = 1;
+  repeated metapb.Partition partition = 2;
+}
+// Use GetPartitionResponse as the response of GetPartitionByIDRequest.
+
+message ScanPartitionsRequest {
+  RequestHeader header = 1;
+  string graph_name = 2;
+  bytes start_key = 3;
+  bytes end_key = 4; // end_key is +inf when it is empty.
+}
+
+
+
+message ScanPartitionsResponse {
+  ResponseHeader header = 1;
+  repeated metapb.PartitionShard partitions = 4;
+}
+
+
+
+message QueryPartitionsRequest{
+  RequestHeader header = 1;
+  metapb.PartitionQuery query = 2;
+}
+
+message QueryPartitionsResponse {
+  ResponseHeader header = 1;
+  repeated metapb.Partition partitions = 4;
+}
+
+
+
+message GetGraphRequest{
+  RequestHeader header = 1;
+  string graph_name = 2;
+}
+
+message GetGraphResponse{
+  ResponseHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message SetGraphRequest{
+  RequestHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message SetGraphResponse{
+  ResponseHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message DelGraphRequest{
+  RequestHeader header = 1;
+  string graph_name = 2;
+}
+
+message DelGraphResponse{
+  ResponseHeader header = 1;
+  metapb.Graph graph = 2;
+}
+
+message GetIdRequest{
+  RequestHeader header = 1;
+  string key = 2;
+  int32 delta = 3;
+}
+
+message GetIdResponse{
+  ResponseHeader header = 1;
+  int64 id = 2;
+  int32 delta = 3;
+}
+
+message ResetIdRequest{
+  RequestHeader header = 1;
+  string key = 2;
+}
+
+message ResetIdResponse{
+  ResponseHeader header = 1;
+  int32 result = 2;
+}
+
+message GetMembersRequest{
+  RequestHeader header = 1;
+}
+
+message GetMembersResponse{
+  ResponseHeader header = 1;
+  repeated metapb.Member members = 2;
+  metapb.Member leader = 3;
+}
+
+message GetPDConfigRequest{
+  RequestHeader header = 1;
+  uint64 version = 2 ;
+}
+
+message GetPDConfigResponse{
+  ResponseHeader header = 1;
+  metapb.PDConfig pd_config = 2;
+}
+
+message SetPDConfigRequest{
+  RequestHeader header = 1;
+  metapb.PDConfig pd_config = 2;
+}
+
+message SetPDConfigResponse{
+  ResponseHeader header = 1;
+}
+
+
+message GetGraphSpaceRequest{
+  RequestHeader header = 1;
+  string graph_Space_Name = 2;
+}
+
+message GetGraphSpaceResponse{
+  ResponseHeader header = 1;
+  repeated metapb.GraphSpace graph_space = 2;
+}
+
+message SetGraphSpaceRequest{
+  RequestHeader header = 1;
+  metapb.GraphSpace graph_space = 2;
+}
+
+message SetGraphSpaceResponse{
+  ResponseHeader header = 1;
+}
+
+message GetClusterStatsRequest{
+  RequestHeader header = 1;
+}
+
+message GetClusterStatsResponse{
+  ResponseHeader header = 1;
+  metapb.ClusterStats cluster = 2;
+}
+message ChangePeerListRequest{
+  RequestHeader header = 1;
+  string peer_List = 2;
+}
+message getChangePeerListResponse{
+  ResponseHeader header = 1;
+}
+
+enum OperationMode {
+  Auto = 0;
+  Expert = 1;
+}
+
+message SplitDataParam{
+  // 被分裂的源分区ID
+  uint32 partition_id = 1;
+  //目标分区数量
+  uint32 count = 2;
+}
+
+message SplitDataRequest{
+  RequestHeader header = 1;
+  //工作模式
+  //  Auto:自动分裂,每个Store上分区数达到最大值
+  //  Expert:专家模式,需要指定splitParams
+  OperationMode mode = 2;
+  repeated SplitDataParam param = 3;
+}
+
+message SplitGraphDataRequest{
+  RequestHeader header = 1;
+  //工作模式
+  string graph_name = 2;
+  uint32 to_count = 3;
+}
+
+message SplitDataResponse{
+  ResponseHeader header = 1;
+}
+
+message MovePartitionParam{
+  uint32 partition_id = 1;
+  uint64 src_store_id = 2;
+  uint64 dst_store_id = 3;
+}
+
+message MovePartitionRequest{
+  RequestHeader header = 1;
+  //工作模式
+  //  Auto:自动转移,达到每个Store上分区数量相同
+  //  Expert:专家模式,需要指定transferParams
+  OperationMode mode = 2;
+  repeated MovePartitionParam param = 3;
+}
+
+message MovePartitionResponse{
+  ResponseHeader header = 1;
+}
+
+message ReportTaskRequest{
+  RequestHeader header = 1;
+  metaTask.Task task = 2;
+}
+
+message ReportTaskResponse{
+  ResponseHeader header = 1;
+}
+
+message GetPartitionStatsRequest{
+  RequestHeader header = 1;
+  uint32 partition_id = 2;
+  // 如果未空,返回所有图的同一分区ID
+  string graph_name = 4;
+}
+
+message GetPartitionStatsResponse{
+  ResponseHeader header = 1;
+  metapb.PartitionStats partition_stats = 2;
+}
+
+message BalanceLeadersRequest{
+  RequestHeader header = 1;
+}
+
+message BalanceLeadersResponse{
+  ResponseHeader header = 1;
+}
+
+message PutLicenseRequest{
+  RequestHeader header = 1;
+  bytes content = 2;
+}
+
+message PutLicenseResponse{
+  ResponseHeader header = 1;
+}
+
+message DbCompactionRequest{
+  RequestHeader header = 1;
+  string tableName = 2;
+}
+
+message DbCompactionResponse{
+  ResponseHeader header = 1;
+}
+
+message CombineClusterRequest {
+  RequestHeader header = 1;
+  uint32 toCount = 2;
+}
+
+message CombineClusterResponse {
+  ResponseHeader header = 1;
+}
+
+message CombineGraphRequest {
+  RequestHeader header = 1;
+  string graphName = 2;
+  uint32 toCount = 3;
+}
+
+message CombineGraphResponse {
+  ResponseHeader header = 1;
+}
+
+message DeleteShardGroupRequest {
+  RequestHeader header = 1;
+  uint32 groupId = 2;
+}
+
+message DeleteShardGroupResponse {
+  ResponseHeader header = 1;
+}
+
+message GetShardGroupRequest{
+  RequestHeader header = 1;
+  uint32 group_id = 2 ;
+}
+
+message GetShardGroupResponse{
+  ResponseHeader header = 1;
+  metapb.ShardGroup shardGroup = 2;
+}
+
+message UpdateShardGroupRequest{
+  RequestHeader header = 1;
+  metapb.ShardGroup shardGroup = 2;
+}
+
+message UpdateShardGroupResponse{
+  ResponseHeader header = 1;
+}
+
+message ChangeShardRequest{
+  RequestHeader header = 1;
+  uint32 groupId = 2;
+  repeated metapb.Shard shards = 3;
+}
+
+message ChangeShardResponse {
+  ResponseHeader header = 1;
+}
+
+message UpdatePdRaftRequest{
+  RequestHeader header = 1;
+  string config = 3;
+}
+
+message UpdatePdRaftResponse{
+  ResponseHeader header = 1;
+  string message = 2;
+}
+message CacheResponse {
+  ResponseHeader header = 1;
+  // 返回修改后的Store
+  repeated metapb.Store stores = 2;
+  repeated metapb.ShardGroup shards = 3;
+  repeated metapb.Graph graphs = 4;
+}
+message CachePartitionResponse {
+  ResponseHeader header = 1;
+  repeated metapb.Partition partitions = 2;
+}