You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kvrocks.apache.org by ti...@apache.org on 2022/10/15 01:12:56 UTC

[incubator-kvrocks] branch unstable updated: Move TCL test integration/cluster to Go case (#990)

This is an automated email from the ASF dual-hosted git repository.

tison pushed a commit to branch unstable
in repository https://gitbox.apache.org/repos/asf/incubator-kvrocks.git


The following commit(s) were added to refs/heads/unstable by this push:
     new d1e3787  Move TCL test integration/cluster to Go case (#990)
d1e3787 is described below

commit d1e378786aedc89aef7e11f928675bbcf358dfce
Author: tison <wa...@gmail.com>
AuthorDate: Sat Oct 15 09:12:51 2022 +0800

    Move TCL test integration/cluster to Go case (#990)
    
    Signed-off-by: tison <wa...@gmail.com>
---
 tests/gocase/integration/cluster/cluster_test.go   | 299 ++++++++++++++++++++
 .../integration/slotimport/slotimport_test.go      |   6 +-
 tests/gocase/util/slot.go                          |   8 +-
 tests/tcl/tests/integration/cluster.tcl            | 302 ---------------------
 tests/tcl/tests/test_helper.tcl                    |   1 -
 5 files changed, 304 insertions(+), 312 deletions(-)

diff --git a/tests/gocase/integration/cluster/cluster_test.go b/tests/gocase/integration/cluster/cluster_test.go
new file mode 100644
index 0000000..dd690c2
--- /dev/null
+++ b/tests/gocase/integration/cluster/cluster_test.go
@@ -0,0 +1,299 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*   http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing,
+* software distributed under the License is distributed on an
+* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+* KIND, either express or implied.  See the License for the
+* specific language governing permissions and limitations
+* under the License.
+ */
+
+package cluster
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"testing"
+
+	"github.com/apache/incubator-kvrocks/tests/gocase/util"
+	"github.com/go-redis/redis/v9"
+	"github.com/stretchr/testify/require"
+)
+
+func TestDisableCluster(t *testing.T) {
+	srv := util.StartServer(t, map[string]string{})
+	defer srv.Close()
+
+	ctx := context.Background()
+	rdb := srv.NewClient()
+	defer func() { require.NoError(t, rdb.Close()) }()
+
+	t.Run("can't execute cluster command if disabled", func(t *testing.T) {
+		require.ErrorContains(t, rdb.ClusterNodes(ctx).Err(), "not enabled")
+	})
+}
+
+func TestClusterKeySlot(t *testing.T) {
+	srv := util.StartServer(t, map[string]string{"cluster-enabled": "yes"})
+	defer srv.Close()
+
+	ctx := context.Background()
+	rdb := srv.NewClient()
+	defer func() { require.NoError(t, rdb.Close()) }()
+
+	slotTableLen := len(util.SlotTable)
+	for i := 0; i < slotTableLen; i++ {
+		require.EqualValues(t, i, rdb.ClusterKeySlot(ctx, util.SlotTable[i]).Val())
+	}
+}
+
+func TestClusterNodes(t *testing.T) {
+	srv := util.StartServer(t, map[string]string{"cluster-enabled": "yes"})
+	defer srv.Close()
+
+	ctx := context.Background()
+	rdb := srv.NewClient()
+	defer func() { require.NoError(t, rdb.Close()) }()
+
+	nodeID := "07c37dfeb235213a872192d90877d0cd55635b91"
+	require.NoError(t, rdb.Do(ctx, "clusterx", "SETNODEID", nodeID).Err())
+
+	t.Run("basic function of cluster", func(t *testing.T) {
+		// cluster is not initialized
+		util.ErrorRegexp(t, rdb.ClusterNodes(ctx).Err(), ".*CLUSTERDOWN.*not initialized.*")
+
+		// set cluster nodes info
+		clusterNodes := fmt.Sprintf("%s %s %d master - 0-100", nodeID, srv.Host(), srv.Port())
+		require.NoError(t, rdb.Do(ctx, "clusterx", "SETNODES", clusterNodes, "2").Err())
+		require.EqualValues(t, "2", rdb.Do(ctx, "clusterx", "version").Val())
+
+		// get and check cluster nodes info
+		nodes := rdb.ClusterNodes(ctx).Val()
+		fields := strings.Split(nodes, " ")
+		require.Len(t, fields, 9)
+		require.Equal(t, fmt.Sprintf("%s@%d", srv.HostPort(), srv.Port()+10000), fields[1])
+		require.Equal(t, "myself,master", fields[2])
+		require.Equal(t, "0-100\n", fields[8])
+
+		// cluster slot command
+		slots := rdb.ClusterSlots(ctx).Val()
+		require.Len(t, slots, 1)
+		require.EqualValues(t, 0, slots[0].Start)
+		require.EqualValues(t, 100, slots[0].End)
+		require.EqualValues(t, []redis.ClusterNode{{ID: nodeID, Addr: srv.HostPort()}}, slots[0].Nodes)
+	})
+
+	t.Run("cluster topology is reset by old version", func(t *testing.T) {
+		// set cluster nodes info
+		clusterNodes := fmt.Sprintf("%s %s %d master - 0-200", nodeID, srv.Host(), srv.Port())
+		require.NoError(t, rdb.Do(ctx, "clusterx", "SETNODES", clusterNodes, "1", "force").Err())
+		require.EqualValues(t, "1", rdb.Do(ctx, "clusterx", "version").Val())
+		nodes := rdb.ClusterNodes(ctx).Val()
+		fields := strings.Split(nodes, " ")
+		require.Len(t, fields, 9)
+		require.Equal(t, "0-200\n", fields[8])
+	})
+
+	t.Run("errors of cluster subcommand", func(t *testing.T) {
+		require.ErrorContains(t, rdb.Do(ctx, "cluster", "no-subcommand").Err(), "CLUSTER")
+		require.ErrorContains(t, rdb.Do(ctx, "clusterx", "version", "a").Err(), "CLUSTER")
+		require.ErrorContains(t, rdb.Do(ctx, "cluster", "nodes", "a").Err(), "CLUSTER")
+		require.ErrorContains(t, rdb.Do(ctx, "clusterx", "setnodeid", "a").Err(), "CLUSTER")
+		require.ErrorContains(t, rdb.Do(ctx, "clusterx", "setnodes", "a").Err(), "CLUSTER")
+		require.ErrorContains(t, rdb.Do(ctx, "clusterx", "setnodes", "a", -1).Err(), "Invalid cluster version")
+		require.ErrorContains(t, rdb.Do(ctx, "clusterx", "setslot", "16384", "07c37dfeb235213a872192d90877d0cd55635b91", 1).Err(), "CLUSTER")
+		require.ErrorContains(t, rdb.Do(ctx, "clusterx", "setslot", "16384", "a", 1).Err(), "CLUSTER")
+	})
+}
+
+func TestClusterComplexTopology(t *testing.T) {
+	srv := util.StartServer(t, map[string]string{"cluster-enabled": "yes"})
+	defer srv.Close()
+
+	ctx := context.Background()
+	rdb := srv.NewClient()
+	defer func() { require.NoError(t, rdb.Close()) }()
+
+	nodeID := "07c37dfeb235213a872192d90877d0cd55635b91"
+	clusterNodes := fmt.Sprintf("%s %s %d master - ", nodeID, srv.Host(), srv.Port())
+	clusterNodes += "0-1 2 4-8191 8192 8193 10000 10002-11002 16381 16382-16383"
+	require.NoError(t, rdb.Do(ctx, "clusterx", "SETNODES", clusterNodes, "1").Err())
+	require.NoError(t, rdb.Do(ctx, "clusterx", "SETNODEID", nodeID).Err())
+
+	slots := rdb.ClusterSlots(ctx).Val()
+	require.Len(t, slots, 5)
+	require.EqualValues(t, 10000, slots[2].Start)
+	require.EqualValues(t, 10000, slots[2].End)
+	require.EqualValues(t, []redis.ClusterNode{{ID: nodeID, Addr: srv.HostPort()}}, slots[2].Nodes)
+
+	nodes := rdb.ClusterNodes(ctx).Val()
+	require.Contains(t, nodes, "0-2 4-8193 10000 10002-11002 16381-16383")
+}
+
+func TestClusterSlotSet(t *testing.T) {
+	ctx := context.Background()
+
+	srv1 := util.StartServer(t, map[string]string{"cluster-enabled": "yes"})
+	defer srv1.Close()
+	rdb1 := srv1.NewClient()
+	defer func() { require.NoError(t, rdb1.Close()) }()
+	nodeID1 := "07c37dfeb235213a872192d90877d0cd55635b91"
+	require.NoError(t, rdb1.Do(ctx, "clusterx", "SETNODEID", nodeID1).Err())
+
+	srv2 := util.StartServer(t, map[string]string{"cluster-enabled": "yes"})
+	defer srv2.Close()
+	rdb2 := srv2.NewClient()
+	defer func() { require.NoError(t, rdb2.Close()) }()
+	nodeID2 := "07c37dfeb235213a872192d90877d0cd55635b92"
+	require.NoError(t, rdb2.Do(ctx, "clusterx", "SETNODEID", nodeID2).Err())
+
+	clusterNodes := fmt.Sprintf("%s %s %d master - 0-16383\n", nodeID1, srv1.Host(), srv1.Port())
+	clusterNodes += fmt.Sprintf("%s %s %d master -", nodeID2, srv2.Host(), srv2.Port())
+	require.NoError(t, rdb2.Do(ctx, "clusterx", "SETNODES", clusterNodes, "2").Err())
+	require.NoError(t, rdb1.Do(ctx, "clusterx", "SETNODES", clusterNodes, "2").Err())
+
+	slotKey := util.SlotTable[0]
+	require.NoError(t, rdb1.Set(ctx, slotKey, 0, 0).Err())
+	util.ErrorRegexp(t, rdb2.Set(ctx, slotKey, 0, 0).Err(), fmt.Sprintf(".*MOVED 0.*%d.*", srv1.Port()))
+
+	require.NoError(t, rdb2.Do(ctx, "clusterx", "setslot", "0", "node", nodeID2, "3").Err())
+	require.NoError(t, rdb1.Do(ctx, "clusterx", "setslot", "0", "node", nodeID2, "3").Err())
+	require.EqualValues(t, "3", rdb2.Do(ctx, "clusterx", "version").Val())
+	require.EqualValues(t, "3", rdb1.Do(ctx, "clusterx", "version").Val())
+	slots := rdb2.ClusterSlots(ctx).Val()
+	require.EqualValues(t, slots, rdb1.ClusterSlots(ctx).Val())
+	require.Len(t, slots, 2)
+	require.EqualValues(t, 0, slots[0].Start)
+	require.EqualValues(t, 0, slots[0].End)
+	require.EqualValues(t, []redis.ClusterNode{{ID: nodeID2, Addr: srv2.HostPort()}}, slots[0].Nodes)
+	require.EqualValues(t, 1, slots[1].Start)
+	require.EqualValues(t, 16383, slots[1].End)
+	require.EqualValues(t, []redis.ClusterNode{{ID: nodeID1, Addr: srv1.HostPort()}}, slots[1].Nodes)
+
+	require.NoError(t, rdb2.Set(ctx, slotKey, 0, 0).Err())
+	util.ErrorRegexp(t, rdb1.Set(ctx, slotKey, 0, 0).Err(), fmt.Sprintf(".*MOVED 0.*%d.*", srv2.Port()))
+	require.NoError(t, rdb2.Do(ctx, "clusterx", "setslot", "1", "node", nodeID2, "4").Err())
+	require.NoError(t, rdb1.Do(ctx, "clusterx", "setslot", "1", "node", nodeID2, "4").Err())
+	slots = rdb2.ClusterSlots(ctx).Val()
+	require.EqualValues(t, slots, rdb1.ClusterSlots(ctx).Val())
+	require.Len(t, slots, 2)
+	require.EqualValues(t, 0, slots[0].Start)
+	require.EqualValues(t, 1, slots[0].End)
+	require.EqualValues(t, []redis.ClusterNode{{ID: nodeID2, Addr: srv2.HostPort()}}, slots[0].Nodes)
+	require.EqualValues(t, 2, slots[1].Start)
+	require.EqualValues(t, 16383, slots[1].End)
+	require.EqualValues(t, []redis.ClusterNode{{ID: nodeID1, Addr: srv1.HostPort()}}, slots[1].Nodes)
+
+	// wrong version can't update slot distribution
+	require.ErrorContains(t, rdb2.Do(ctx, "clusterx", "setslot", "2", "node", nodeID2, "6").Err(), "version")
+	require.ErrorContains(t, rdb2.Do(ctx, "clusterx", "setslot", "2", "node", nodeID2, "4").Err(), "version")
+	require.EqualValues(t, "4", rdb2.Do(ctx, "clusterx", "version").Val())
+	require.EqualValues(t, "4", rdb1.Do(ctx, "clusterx", "version").Val())
+}
+
+func TestClusterMultiple(t *testing.T) {
+	ctx := context.Background()
+
+	var srv []*util.KvrocksServer
+	var rdb []*redis.Client
+	var nodeID []string
+
+	for i := 0; i < 4; i++ {
+		s := util.StartServer(t, map[string]string{"cluster-enabled": "yes"})
+		t.Cleanup(s.Close)
+		c := s.NewClient()
+		t.Cleanup(func() { require.NoError(t, c.Close()) })
+		srv = append(srv, s)
+		rdb = append(rdb, c)
+		nodeID = append(nodeID, fmt.Sprintf("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx%02d", i))
+	}
+
+	t.Run("requests on non-init-cluster", func(t *testing.T) {
+		util.ErrorRegexp(t, rdb[0].Set(ctx, util.SlotTable[0], 0, 0).Err(), ".*CLUSTERDOWN.*not served.*")
+		util.ErrorRegexp(t, rdb[2].Set(ctx, util.SlotTable[16383], 16383, 0).Err(), ".*CLUSTERDOWN.*not served.*")
+	})
+
+	clusterNodes := fmt.Sprintf("%s %s %d master - 0-1 3 5-8191\n", nodeID[1], srv[1].Host(), srv[1].Port())
+	clusterNodes += fmt.Sprintf("%s %s %d master - 8192-16383\n", nodeID[2], srv[2].Host(), srv[2].Port())
+	clusterNodes += fmt.Sprintf("%s %s %d slave %s", nodeID[3], srv[3].Host(), srv[3].Port(), nodeID[2])
+
+	// node0 doesn't serve any slot, just like a router
+	for i := 0; i < 4; i++ {
+		require.NoError(t, rdb[i].Do(ctx, "clusterx", "setnodes", clusterNodes, "1").Err())
+	}
+
+	t.Run("cluster info command", func(t *testing.T) {
+		r := rdb[1].ClusterInfo(ctx).Val()
+		require.Contains(t, r, "cluster_state:ok")
+		require.Contains(t, r, "cluster_slots_assigned:16382")
+		require.Contains(t, r, "cluster_slots_ok:16382")
+		require.Contains(t, r, "cluster_known_nodes:3")
+		require.Contains(t, r, "cluster_size:2")
+		require.Contains(t, r, "cluster_current_epoch:1")
+		require.Contains(t, r, "cluster_my_epoch:1")
+	})
+
+	t.Run("MOVED slot ip:port if needed", func(t *testing.T) {
+		// request node2 that doesn't serve slot 0, we will receive MOVED
+		util.ErrorRegexp(t, rdb[2].Set(ctx, util.SlotTable[0], 0, 0).Err(), fmt.Sprintf(".*MOVED 0.*%d.*", srv[1].Port()))
+		// request node3 that doesn't serve slot 0, we will receive MOVED
+		util.ErrorRegexp(t, rdb[3].Get(ctx, util.SlotTable[0]).Err(), fmt.Sprintf(".*MOVED 0.*%d.*", srv[1].Port()))
+		// request node1 that doesn't serve slot 16383, we will receive MOVED, and the MOVED node must be master
+		util.ErrorRegexp(t, rdb[1].Get(ctx, util.SlotTable[16383]).Err(), fmt.Sprintf(".*MOVED 16383.*%d.*", srv[2].Port()))
+	})
+
+	t.Run("requests on cluster are ok", func(t *testing.T) {
+		// request node1 that serves slot 0, that's ok
+		require.NoError(t, rdb[1].Set(ctx, util.SlotTable[0], 0, 0).Err())
+		// request node2 that serve slot 16383, that's ok
+		require.NoError(t, rdb[2].Set(ctx, util.SlotTable[16383], 16383, 0).Err())
+		// request replicas a write command, it's wrong
+		require.ErrorContains(t, rdb[3].Set(ctx, util.SlotTable[16383], 16383, 0).Err(), "MOVED")
+		// request a read-only command to node3 that serve slot 16383, that's ok
+		util.WaitForOffsetSync(t, rdb[2], rdb[3])
+		require.Equal(t, "16383", rdb[3].Get(ctx, util.SlotTable[16383]).Val())
+	})
+
+	t.Run("requests non-member of cluster, role is master", func(t *testing.T) {
+		util.ErrorRegexp(t, rdb[0].Set(ctx, util.SlotTable[0], 0, 0).Err(), fmt.Sprintf(".*MOVED 0.*%d.*", srv[1].Port()))
+		util.ErrorRegexp(t, rdb[0].Get(ctx, util.SlotTable[16383]).Err(), fmt.Sprintf(".*MOVED 16383.*%d.*", srv[2].Port()))
+	})
+
+	t.Run("cluster slot is not served", func(t *testing.T) {
+		util.ErrorRegexp(t, rdb[1].Set(ctx, util.SlotTable[2], 2, 0).Err(), ".*CLUSTERDOWN.*not served.*")
+	})
+
+	t.Run("multiple keys(cross slots) command is wrong", func(t *testing.T) {
+		require.ErrorContains(t, rdb[1].MSet(ctx, util.SlotTable[0], 0, util.SlotTable[1], 1).Err(), "CROSSSLOT")
+	})
+
+	t.Run("multiple keys(the same slots) command is right", func(t *testing.T) {
+		require.NoError(t, rdb[1].MSet(ctx, util.SlotTable[0], 0, util.SlotTable[0], 1).Err())
+	})
+
+	t.Run("cluster MULTI-exec cross slots and in one node", func(t *testing.T) {
+		require.NoError(t, rdb[1].Do(ctx, "MULTI").Err())
+		require.NoError(t, rdb[1].Set(ctx, util.SlotTable[0], 0, 0).Err())
+		require.NoError(t, rdb[1].Set(ctx, util.SlotTable[1], 0, 0).Err())
+		require.EqualValues(t, []interface{}{"OK", "OK"}, rdb[1].Do(ctx, "EXEC").Val())
+	})
+
+	t.Run("cluster MULTI-exec cross slots but not in one node", func(t *testing.T) {
+		require.NoError(t, rdb[1].Set(ctx, util.SlotTable[0], "no-multi", 0).Err())
+		require.NoError(t, rdb[1].Do(ctx, "MULTI").Err())
+		require.NoError(t, rdb[1].Set(ctx, util.SlotTable[0], "multi", 0).Err())
+		util.ErrorRegexp(t, rdb[1].Set(ctx, util.SlotTable[16383], 0, 0).Err(), fmt.Sprintf(".*MOVED 16383.*%d.*", srv[2].Port()))
+		require.ErrorContains(t, rdb[1].Do(ctx, "EXEC").Err(), "EXECABORT")
+		require.Equal(t, "no-multi", rdb[1].Get(ctx, util.SlotTable[0]).Val())
+	})
+}
diff --git a/tests/gocase/integration/slotimport/slotimport_test.go b/tests/gocase/integration/slotimport/slotimport_test.go
index 653b6d3..9abf237 100644
--- a/tests/gocase/integration/slotimport/slotimport_test.go
+++ b/tests/gocase/integration/slotimport/slotimport_test.go
@@ -82,7 +82,7 @@ func TestImportedServer(t *testing.T) {
 
 	t.Run("IMPORT - slot states in right order", func(t *testing.T) {
 		slotNum := 1
-		slotKey := util.GetKeyWithSlotNum(slotNum)
+		slotKey := util.SlotTable[slotNum]
 
 		// import start
 		require.Equal(t, "OK", rdb.Do(ctx, "cluster", "import", slotNum, 0).Val())
@@ -104,7 +104,7 @@ func TestImportedServer(t *testing.T) {
 
 	t.Run("IMPORT - slot state 'error'", func(t *testing.T) {
 		slotNum := 10
-		slotKey := util.GetKeyWithSlotNum(slotNum)
+		slotKey := util.SlotTable[slotNum]
 
 		require.Equal(t, "OK", rdb.Do(ctx, "cluster", "import", slotNum, 0).Val())
 		require.NoError(t, rdb.Set(ctx, slotKey, "slot10_again", 0).Err())
@@ -124,7 +124,7 @@ func TestImportedServer(t *testing.T) {
 
 	t.Run("IMPORT - connection broken", func(t *testing.T) {
 		slotNum := 11
-		slotKey := util.GetKeyWithSlotNum(slotNum)
+		slotKey := util.SlotTable[slotNum]
 		require.Equal(t, "OK", rdb.Do(ctx, "cluster", "import", slotNum, 0).Val())
 		require.NoError(t, rdb.Set(ctx, slotKey, "slot11", 0).Err())
 		require.Equal(t, "slot11", rdb.Get(ctx, slotKey).Val())
diff --git a/tests/gocase/util/slot.go b/tests/gocase/util/slot.go
index 18922d1..302a3aa 100644
--- a/tests/gocase/util/slot.go
+++ b/tests/gocase/util/slot.go
@@ -19,16 +19,12 @@
 
 package util
 
-func GetKeyWithSlotNum(slotNum int) string {
-	return slotTable[slotNum]
-}
-
-// A table of the shortest possible alphanumeric string that is mapped by
+// SlotTable is a table of the shortest possible alphanumeric string that is mapped by
 // redis's crc16 to any given redis cluster slot.
 //
 // The array indexes are slot numbers, so that given a desired slot, this string is guaranteed
 // to make redis cluster route a request to the shard holding this slot
-var slotTable = [...]string{
+var SlotTable = [...]string{
 	"06S", "Qi", "5L5", "4Iu", "4gY", "460", "1Y7", "1LV", "0QG", "ru", "7Ok", "4ji", "4DE", "65n", "2JH", "I8", "F9", "SX", "7nF", "4KD",
 	"4eh", "6PK", "2ke", "1Ng", "0Sv", "4L", "491", "4hX", "4Ft", "5C4", "2Hy", "09R", "021", "0cX", "4Xv", "6mU", "6Cy", "42R", "0Mt", "nF",
 	"cv", "1Pe", "5kK", "6NI", "74L", "4UF", "0nh", "MZ", "2TJ", "0ai", "4ZG", "6od", "6AH", "40c", "0OE", "lw", "aG", "0Bu", "5iz", "6Lx",
diff --git a/tests/tcl/tests/integration/cluster.tcl b/tests/tcl/tests/integration/cluster.tcl
deleted file mode 100644
index 2b2d31a..0000000
--- a/tests/tcl/tests/integration/cluster.tcl
+++ /dev/null
@@ -1,302 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-source "tests/helpers/crc16_slottable.tcl"
-
-start_server {tags {"disable-cluster"}} {
-    test {can't execute cluster command if disabled} {
-        catch {[r cluster nodes]} var
-        assert_match "*not enabled*" $var
-    }
-}
-
-start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-    test {CLUSTER KEYSLOT} {
-        set slot_table_len [llength $::CRC16_SLOT_TABLE]
-        for {set i 0} {$i < $slot_table_len} {incr i} {
-            assert_equal $i [r cluster keyslot [lindex $::CRC16_SLOT_TABLE $i]]
-        }
-    }
-}
-
-start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-    set nodeid "07c37dfeb235213a872192d90877d0cd55635b91"
-    r clusterx SETNODEID $nodeid
-
-    test {basic function of cluster} {
-        # Cluster is not initialized
-        catch {[r cluster nodes]} err
-        assert_match "*CLUSTERDOWN*not initialized*" $err
-
-        # Set cluster nodes info
-        set port [srv port]
-        set nodes_str "$nodeid 127.0.0.1 $port master - 0-100"
-        r clusterx setnodes $nodes_str 2
-        assert_equal 2 [r clusterx version]
-
-        # Get and check cluster nodes info
-        set output_nodes [r cluster nodes]
-        set fields [split $output_nodes " "]
-        assert_equal 9 [llength $fields]
-        assert_equal "127.0.0.1:$port@[expr $port + 10000]" [lindex $fields 1]
-        assert_equal "myself,master" [lindex $fields 2]
-        assert_equal "0-100\n" [lindex $fields 8]
-
-        # Cluster slot command
-        set ret [r cluster slots]
-        assert_equal ${ret} "{0 100 {127.0.0.1 ${port} ${nodeid}}}"
-    }
-
-    test {cluser topology is reset by old version} {
-        # Set cluster nodes info
-        set port [srv port]
-        set nodes_str "07c37dfeb235213a872192d90877d0cd55635b91 127.0.0.1 $port master - 0-200"
-        r clusterx setnodes $nodes_str 1 force
-        assert_equal 1 [r clusterx version]
-
-        set output_nodes [r cluster nodes]
-        assert_equal "0-200\n" [lindex [split $output_nodes " "] 8]
-    }
-
-    test {errors of cluster subcommand} {
-        catch {[r cluster no-subcommand]} err
-        assert_match "*CLUSTER*" $err
-
-        catch {[r clusterx version a]} err
-        assert_match "*CLUSTER*" $err
-
-        catch {[r cluster nodes a]} err
-        assert_match "*CLUSTER*" $err
-
-        catch {[r clusterx setnodeid a]} err
-        assert_match "*CLUSTER*" $err
-
-        catch {[r clusterx setnodes a]} err
-        assert_match "*CLUSTER*" $err
-
-        catch {[r clusterx setnodes a -1]} err
-        assert_match "*Invalid cluster version*" $err
-
-        catch {[r clusterx setslot 16384 07c37dfeb235213a872192d90877d0cd55635b91 1]} err
-        assert_match "*CLUSTER*" $err
-
-        catch {[r clusterx setslot 16383 a 1]} err
-        assert_match "*CLUSTER*" $err
-    }
-}
-
-start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-    set nodeid1 "07c37dfeb235213a872192d90877d0cd55635b91"
-    r clusterx SETNODEID $nodeid1
-    set port1 [srv port]
-
-    start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-        set nodeid2 "07c37dfeb235213a872192d90877d0cd55635b92"
-        r clusterx SETNODEID $nodeid2
-        set port2 [srv port]
-
-        test {cluster slotset command test} {
-            set nodes_str "$nodeid1 127.0.0.1 $port1 master - 0-16383"
-            set nodes_str "$nodes_str\n$nodeid2 127.0.0.1 $port2 master -"
-
-            r clusterx setnodes $nodes_str 2
-            r -1 clusterx setnodes $nodes_str 2
-
-            set slot_0_key "06S"
-            assert_equal {OK} [r -1 set $slot_0_key 0]
-            catch {[r set $slot_0_key 0]} err
-            assert_match "*MOVED 0*$port1*" $err
-
-            r clusterx setslot 0 node $nodeid2 3
-            r -1 clusterx setslot 0 node $nodeid2 3
-            assert_equal {3} [r clusterx version]
-            assert_equal {3} [r -1 clusterx version]
-            assert_equal [r cluster slots] [r -1 cluster slots]
-            assert_equal [r cluster slots] "{0 0 {127.0.0.1 $port2 $nodeid2}} {1 16383 {127.0.0.1 $port1 $nodeid1}}"
-
-            assert_equal {OK} [r set $slot_0_key 0]
-            catch {[r -1 set $slot_0_key 0]} err
-            assert_match "*MOVED 0*$port2*" $err
-
-            r clusterx setslot 1 node $nodeid2 4
-            r -1 clusterx setslot 1 node $nodeid2 4
-            assert_equal [r cluster slots] [r -1 cluster slots]
-            assert_equal [r cluster slots] "{0 1 {127.0.0.1 $port2 $nodeid2}} {2 16383 {127.0.0.1 $port1 $nodeid1}}"
-
-            # wrong version can't update slot distribution
-            catch {[r clusterx setslot 2 node $nodeid2 6]} err
-            assert_match "*version*" $err
-
-            catch {[r clusterx setslot 2 node $nodeid2 4]} err
-            assert_match "*version*" $err
-
-            assert_equal {4} [r clusterx version]
-            assert_equal {4} [r -1 clusterx version]
-        }
-    }
-}
-
-start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-    test {cluster slots and nodes about complex topology} {
-        set nodeid "07c37dfeb235213a872192d90877d0cd55635b91"
-        set host [srv host]
-        set port [srv port]
-        set cluster_nodes "$nodeid $host $port master -"
-        set cluster_nodes "${cluster_nodes} 0-1 2 4-8191 8192 8193 10000 10002-11002 16381 16382-16383"
-        r clusterx setnodes "$cluster_nodes" 1
-        r clusterx setnodeid $nodeid
-        set ret [r cluster slots]
-        assert_equal 5 [llength $ret]
-
-        set slot_1w [lindex $ret 2]
-        assert_equal ${slot_1w} "10000 10000 {${host} ${port} ${nodeid}}"
-
-        set ret [r cluster nodes]
-        assert_match "*0-2 4-8193 10000 10002-11002 16381-16383*" $ret
-    }
-}
-
-start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-    set r0 [srv 0 client]
-    set node0_host [srv host]
-    set node0_port [srv port]
-    set node0_id "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx00"
-    start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-        set r1 [srv 0 client]
-        set node1_host [srv host]
-        set node1_port [srv port]
-        set node1_id "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx01"
-        start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-            set r2 [srv 0 client]
-            set node2_host [srv host]
-            set node2_port [srv port]
-            set node2_id "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx02"
-            start_server {tags {"cluster"} overrides {cluster-enabled yes}} {
-                set r3 [srv 0 client]
-                set node3_host [srv host]
-                set node3_port [srv port]
-                set node3_id "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx03"
-
-                set slot_0_key      "06S"
-                set slot_1_key      "Qi"
-                set slot_2_key      "5L5"
-                set slot_16382_key  "4oi"
-                set slot_16383_key  "6ZJ"
-
-                test {requests on non-init-cluster} {
-                    catch {[$r0 set $slot_0_key 0]} err
-                    assert_match "*CLUSTERDOWN*not served*" $err
-
-                    catch {[$r2 set $slot_16383_key 16383]} err
-                    assert_match "*CLUSTERDOWN*not served*" $err
-                }
-
-                set cluster_nodes  "$node1_id $node1_host $node1_port master - 0-1 3 5-8191"
-                set cluster_nodes  "$cluster_nodes\n$node2_id $node2_host $node2_port master - 8192-16383"
-                set cluster_nodes  "$cluster_nodes\n$node3_id $node3_host $node3_port slave $node2_id"
-
-                # node0 doesn't serve any slot, just like a router
-                $r0 clusterx setnodes $cluster_nodes 1
-                $r1 clusterx setnodes $cluster_nodes 1
-                $r2 clusterx setnodes $cluster_nodes 1
-                $r3 clusterx setnodes $cluster_nodes 1
-
-                test {cluster info command} {
-                    set ret [$r1 cluster info]
-                    assert_match "*cluster_state:ok*" $ret
-                    assert_match "*cluster_slots_assigned:16382*" $ret
-                    assert_match "*cluster_slots_ok:16382*" $ret
-                    assert_match "*cluster_known_nodes:3*" $ret
-                    assert_match "*cluster_size:2*" $ret
-                    assert_match "*cluster_current_epoch:1*" $ret
-                    assert_match "*cluster_my_epoch:1*" $ret
-                }
-
-                test {MOVED slot ip:port if needed} {
-                    # Request node2 that doesn't serve slot 0, we will recieve MOVED
-                    catch {[$r2 set $slot_0_key 0]} err
-                    assert_match "*MOVED 0*$node1_port*" $err
-
-                    # Request node3 that doesn't serve slot 0, we will recieve MOVED
-                    catch {[$r3 get $slot_0_key]} err
-                    assert_match "*MOVED 0*$node1_port*" $err
-
-                    # Request node1 that doesn't serve slot 16383, we will recieve MOVED,
-                    # and the MOVED node must be master
-                    catch {[$r1 get $slot_16383_key]} err
-                    assert_match "*MOVED 16383*$node2_port*" $err
-                }
-
-                test {requests on cluster are ok} {
-                    # Request node1 that serves slot 0, that's ok
-                    assert_equal "OK" [$r1 set $slot_0_key 0]
-
-                    # Request node2 that serve slot 16383, that's ok
-                    assert_equal "OK" [$r2 set $slot_16383_key 16383]
-                    after 200
-
-                    # Request replicas a write command, it is wrong
-                    catch {[$r3 set $slot_16383_key 16384]} err
-                    assert_match "*MOVED*" $err
-
-                    # Request a read-only command to node3 that serve slot 16383, that's ok
-                    assert_equal "16383" [$r3 get $slot_16383_key]
-                }
-
-                test {requests non-member of cluster, role is master} {
-                    catch {[$r0 set $slot_0_key 0]} err
-                    assert_match "*MOVED 0*$node1_port*" $err
-
-                    catch {[$r0 get $slot_16383_key]} err
-                    assert_match "*MOVED 16383*$node2_port*" $err
-                }
-
-                test {cluster slot is not served } {
-                    catch {[$r1 set $slot_2_key 2]} err
-                    assert_match "*CLUSTERDOWN*not served*" $err
-                }
-
-                test {multiple keys(cross slots) command is wrong} {
-                    catch {[$r1 mset $slot_0_key 0 $slot_1_key 1]} err
-                    assert_match "*CROSSSLOT*" $err
-                }
-
-                test {multiple keys(the same slots) command is right} {
-                    $r1 mset $slot_0_key 0 $slot_0_key 1
-                } {OK}
-
-                test {cluster MULTI-exec cross slots and in one node } {
-                    $r1 multi
-                    $r1 set $slot_0_key 0
-                    $r1 set $slot_1_key 0
-                    $r1 exec
-                } {OK OK}
-
-                test {cluster MULTI-exec cross slots but not in one node } {
-                    $r1 set $slot_0_key no-multi
-                    $r1 multi
-                    $r1 set $slot_0_key multi
-                    catch {[$r1 set $slot_16383_key 0]} err
-                    assert_match "*MOVED*$node2_port*" $err
-                    catch {[$r1 exec]} err
-                    assert_match "*EXECABORT*" $err
-                    assert_match no-multi [$r1 get $slot_0_key]
-                }
-            }
-        }
-    }
-}
diff --git a/tests/tcl/tests/test_helper.tcl b/tests/tcl/tests/test_helper.tcl
index a8b70e7..80f7323 100644
--- a/tests/tcl/tests/test_helper.tcl
+++ b/tests/tcl/tests/test_helper.tcl
@@ -38,7 +38,6 @@ set ::all_tests {
     unit/geo
     integration/slotmigrate
     integration/replication
-    integration/cluster
 }
 
 # Index to the next test to run in the ::all_tests list.