You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@kvrocks.apache.org by ti...@apache.org on 2022/09/30 11:08:20 UTC

[incubator-kvrocks] branch unstable updated: Move TCL test unit/scan to Go case (#934)

This is an automated email from the ASF dual-hosted git repository.

tison pushed a commit to branch unstable
in repository https://gitbox.apache.org/repos/asf/incubator-kvrocks.git


The following commit(s) were added to refs/heads/unstable by this push:
     new a8838ac  Move TCL test unit/scan to Go case  (#934)
a8838ac is described below

commit a8838aca6d408c98d4d0cdf932855944e72f9bf8
Author: tison <wa...@gmail.com>
AuthorDate: Fri Sep 30 19:08:13 2022 +0800

    Move TCL test unit/scan to Go case  (#934)
    
    Signed-off-by: tison <wa...@gmail.com>
---
 tests/gocase/go.mod                            |   1 +
 tests/gocase/go.sum                            |   4 +-
 tests/gocase/unit/info/info_test.go            |  24 +-
 tests/gocase/unit/scan/scan_test.go            | 294 +++++++++++++++++++++++++
 tests/gocase/unit/scripting/scripting_test.go  |   6 +-
 tests/gocase/util/{conditions.go => client.go} |  26 ++-
 tests/gocase/util/server.go                    |   2 +-
 tests/gocase/util/tcp_client.go                |  14 +-
 tests/tcl/tests/test_helper.tcl                |   1 -
 tests/tcl/tests/unit/scan.tcl                  | 292 ------------------------
 10 files changed, 340 insertions(+), 324 deletions(-)

diff --git a/tests/gocase/go.mod b/tests/gocase/go.mod
index 77a79b6..95115f4 100644
--- a/tests/gocase/go.mod
+++ b/tests/gocase/go.mod
@@ -5,6 +5,7 @@ go 1.19
 require (
 	github.com/go-redis/redis/v9 v9.0.0-beta.2
 	github.com/stretchr/testify v1.8.0
+	golang.org/x/exp v0.0.0-20220929160808-de9c53c655b9
 	modernc.org/mathutil v1.5.0
 )
 
diff --git a/tests/gocase/go.sum b/tests/gocase/go.sum
index 5556753..6da69a2 100644
--- a/tests/gocase/go.sum
+++ b/tests/gocase/go.sum
@@ -21,8 +21,10 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS
 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
 github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+golang.org/x/exp v0.0.0-20220929160808-de9c53c655b9 h1:lNtcVz/3bOstm7Vebox+5m3nLh/BYWnhmc3AhXOW6oI=
+golang.org/x/exp v0.0.0-20220929160808-de9c53c655b9/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
 golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA=
-golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
 golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/tests/gocase/unit/info/info_test.go b/tests/gocase/unit/info/info_test.go
index 8aab000..a8923d1 100644
--- a/tests/gocase/unit/info/info_test.go
+++ b/tests/gocase/unit/info/info_test.go
@@ -55,21 +55,21 @@ func TestInfo(t *testing.T) {
 			time.Sleep(time.Second)
 		}
 
-		r := util.FindInfoEntry(t, ctx, rdb, "put_per_sec", "rocksdb")
+		r := util.FindInfoEntry(t, rdb, "put_per_sec", "rocksdb")
 		require.Greater(t, MustAtoi(t, r), 0)
-		r = util.FindInfoEntry(t, ctx, rdb, "get_per_sec", "rocksdb")
+		r = util.FindInfoEntry(t, rdb, "get_per_sec", "rocksdb")
 		require.Greater(t, MustAtoi(t, r), 0)
-		r = util.FindInfoEntry(t, ctx, rdb, "seek_per_sec", "rocksdb")
+		r = util.FindInfoEntry(t, rdb, "seek_per_sec", "rocksdb")
 		require.Greater(t, MustAtoi(t, r), 0)
-		r = util.FindInfoEntry(t, ctx, rdb, "next_per_sec", "rocksdb")
+		r = util.FindInfoEntry(t, rdb, "next_per_sec", "rocksdb")
 		require.Greater(t, MustAtoi(t, r), 0)
 	})
 
 	t.Run("get bgsave information by INFO", func(t *testing.T) {
-		require.Equal(t, "0", util.FindInfoEntry(t, ctx, rdb, "bgsave_in_progress", "persistence"))
-		require.Equal(t, "-1", util.FindInfoEntry(t, ctx, rdb, "last_bgsave_time", "persistence"))
-		require.Equal(t, "ok", util.FindInfoEntry(t, ctx, rdb, "last_bgsave_status", "persistence"))
-		require.Equal(t, "-1", util.FindInfoEntry(t, ctx, rdb, "last_bgsave_time_sec", "persistence"))
+		require.Equal(t, "0", util.FindInfoEntry(t, rdb, "bgsave_in_progress", "persistence"))
+		require.Equal(t, "-1", util.FindInfoEntry(t, rdb, "last_bgsave_time", "persistence"))
+		require.Equal(t, "ok", util.FindInfoEntry(t, rdb, "last_bgsave_status", "persistence"))
+		require.Equal(t, "-1", util.FindInfoEntry(t, rdb, "last_bgsave_time_sec", "persistence"))
 
 		r := rdb.Do(ctx, "bgsave")
 		v, err := r.Text()
@@ -77,14 +77,14 @@ func TestInfo(t *testing.T) {
 		require.Equal(t, "OK", v)
 
 		require.Eventually(t, func() bool {
-			e := MustAtoi(t, util.FindInfoEntry(t, ctx, rdb, "bgsave_in_progress", "persistence"))
+			e := MustAtoi(t, util.FindInfoEntry(t, rdb, "bgsave_in_progress", "persistence"))
 			return e == 0
 		}, 5*time.Second, 100*time.Millisecond)
 
-		lastBgsaveTime := MustAtoi(t, util.FindInfoEntry(t, ctx, rdb, "last_bgsave_time", "persistence"))
+		lastBgsaveTime := MustAtoi(t, util.FindInfoEntry(t, rdb, "last_bgsave_time", "persistence"))
 		require.Greater(t, lastBgsaveTime, 1640507660)
-		require.Equal(t, "ok", util.FindInfoEntry(t, ctx, rdb, "last_bgsave_status", "persistence"))
-		lastBgsaveTimeSec := MustAtoi(t, util.FindInfoEntry(t, ctx, rdb, "last_bgsave_time_sec", "persistence"))
+		require.Equal(t, "ok", util.FindInfoEntry(t, rdb, "last_bgsave_status", "persistence"))
+		lastBgsaveTimeSec := MustAtoi(t, util.FindInfoEntry(t, rdb, "last_bgsave_time_sec", "persistence"))
 		require.GreaterOrEqual(t, lastBgsaveTimeSec, 0)
 		require.Less(t, lastBgsaveTimeSec, 3)
 	})
diff --git a/tests/gocase/unit/scan/scan_test.go b/tests/gocase/unit/scan/scan_test.go
new file mode 100644
index 0000000..0803e85
--- /dev/null
+++ b/tests/gocase/unit/scan/scan_test.go
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package scan
+
+import (
+	"context"
+	"fmt"
+	"strconv"
+	"strings"
+	"testing"
+
+	"github.com/apache/incubator-kvrocks/tests/gocase/util"
+	"github.com/go-redis/redis/v9"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/exp/slices"
+)
+
+func TestScan(t *testing.T) {
+	srv := util.StartServer(t, map[string]string{})
+	defer srv.Close()
+
+	ctx := context.Background()
+	rdb := srv.NewClient()
+	defer func() { require.NoError(t, rdb.Close()) }()
+
+	t.Run("SCAN Basic", func(t *testing.T) {
+		require.NoError(t, rdb.FlushDB(ctx).Err())
+		util.Populate(t, rdb, "", 1000, 10)
+		keys := scanAll(t, rdb)
+		slices.Compact(keys)
+		require.Len(t, keys, 1000)
+	})
+
+	t.Run("SCAN COUNT", func(t *testing.T) {
+		require.NoError(t, rdb.FlushDB(ctx).Err())
+		util.Populate(t, rdb, "", 1000, 10)
+		keys := scanAll(t, rdb, "count", 5)
+		slices.Compact(keys)
+		require.Len(t, keys, 1000)
+	})
+
+	t.Run("SCAN MATCH", func(t *testing.T) {
+		require.NoError(t, rdb.FlushDB(ctx).Err())
+		util.Populate(t, rdb, "key:", 1000, 10)
+		keys := scanAll(t, rdb, "match", "key:*")
+		slices.Compact(keys)
+		require.Len(t, keys, 1000)
+	})
+
+	t.Run("SCAN guarantees check under write load", func(t *testing.T) {
+		require.NoError(t, rdb.FlushDB(ctx).Err())
+		util.Populate(t, rdb, "", 100, 10)
+
+		// We start scanning here, so keys from 0 to 99 should all be reported at the end of the iteration.
+		var keys []string
+		c := "0"
+		for {
+			cursor, keyList := scan(t, rdb, c)
+
+			c = cursor
+			keys = append(keys, keyList...)
+
+			if c == "0" {
+				break
+			}
+
+			// Write 10 random keys at every SCAN iteration.
+			for i := 0; i < 10; i++ {
+				require.NoError(t, rdb.Set(ctx, fmt.Sprintf("addedkey:%d", util.RandomInt(1000)), "foo", 0).Err())
+			}
+		}
+
+		var originKeys []string
+		for _, key := range keys {
+			if strings.Contains(key, "addedkey:") {
+				continue
+			}
+			originKeys = append(originKeys, key)
+		}
+		slices.Compact(originKeys)
+		require.Len(t, originKeys, 100)
+	})
+
+	t.Run("SCAN with multi namespace", func(t *testing.T) {
+		require.NoError(t, rdb.FlushDB(ctx).Err())
+		require.NoError(t, rdb.ConfigSet(ctx, "requirepass", "foobared").Err())
+
+		tokens := []string{"test_ns_token1", "test_ns_token2"}
+		keyPrefixes := []string{"key1*", "key2*"}
+		namespaces := []string{"test_ns1", "test_ns2"}
+
+		// Add namespaces and write key
+		for i := 0; i < 2; i++ {
+			require.NoError(t, rdb.Do(ctx, "AUTH", "foobared").Err())
+			require.NoError(t, rdb.Do(ctx, "NAMESPACE", "ADD", namespaces[i], tokens[i]).Err())
+			require.NoError(t, rdb.Do(ctx, "AUTH", tokens[i]).Err())
+
+			for k := 0; k < 1000; k++ {
+				require.NoError(t, rdb.Set(ctx, fmt.Sprintf("%s:%d", keyPrefixes[i], k), "hello", 0).Err())
+			}
+			for k := 0; k < 100; k++ {
+				require.NoError(t, rdb.Set(ctx, strconv.Itoa(k), "hello", 0).Err())
+			}
+		}
+
+		// Check SCAN and SCAN MATCH in different namespace
+		for i := 0; i < 2; i++ {
+			require.NoError(t, rdb.Do(ctx, "AUTH", tokens[i]).Err())
+
+			// SCAN to get all keys
+			keys := scanAll(t, rdb)
+			require.Len(t, keys, 1100)
+
+			// SCAN MATCH
+			keys = scanAll(t, rdb, "match", keyPrefixes[i])
+			require.Len(t, keys, 1000)
+		}
+	})
+
+	t.Run("SSCAN with PATTERN", func(t *testing.T) {
+		require.NoError(t, rdb.Del(ctx, "mykey").Err())
+		require.NoError(t, rdb.SAdd(ctx, "mykey", "foo", "fab", "fiz", "foobar", 1, 2, 3, 4).Err())
+		keys, _, err := rdb.SScan(ctx, "mykey", 0, "foo*", 10000).Result()
+		require.NoError(t, err)
+		slices.Sort(keys)
+		slices.Compact(keys)
+		require.Equal(t, []string{"foo", "foobar"}, keys)
+	})
+
+	t.Run("HSCAN with PATTERN", func(t *testing.T) {
+		require.NoError(t, rdb.Del(ctx, "mykey").Err())
+		require.NoError(t, rdb.HMSet(ctx, "mykey", "foo", 1, "fab", 2, "fiz", 3, "foobar", 10, 1, "a", 2, "b", 3, "c", 4, "d").Err())
+		keys, _, err := rdb.HScan(ctx, "mykey", 0, "foo*", 10000).Result()
+		require.NoError(t, err)
+		slices.Sort(keys)
+		slices.Compact(keys)
+		require.Equal(t, []string{"1", "10", "foo", "foobar"}, keys)
+	})
+
+	t.Run("ZSCAN with PATTERN", func(t *testing.T) {
+		members := []redis.Z{
+			{Score: 1, Member: "foo"},
+			{Score: 2, Member: "fab"},
+			{Score: 3, Member: "fiz"},
+			{Score: 10, Member: "foobar"},
+		}
+		require.NoError(t, rdb.Del(ctx, "mykey").Err())
+		require.NoError(t, rdb.ZAdd(ctx, "mykey", members...).Err())
+		keys, _, err := rdb.ZScan(ctx, "mykey", 0, "foo*", 10000).Result()
+		require.NoError(t, err)
+		slices.Sort(keys)
+		slices.Compact(keys)
+		require.Equal(t, []string{"1", "10", "foo", "foobar"}, keys)
+	})
+
+	for _, test := range []struct {
+		name   string
+		keyGen func(int) interface{}
+	}{
+		{"SSCAN with encoding intset", func(i int) interface{} { return i }},
+		{"SSCAN with encoding hashtable", func(i int) interface{} { return fmt.Sprintf("ele:%d", i) }},
+	} {
+		t.Run(test.name, func(t *testing.T) {
+			require.NoError(t, rdb.Del(ctx, "set").Err())
+			var elements []interface{}
+			for i := 0; i < 100; i++ {
+				elements = append(elements, test.keyGen(i))
+			}
+			require.NoError(t, rdb.SAdd(ctx, "set", elements...).Err())
+			keys, _, err := rdb.SScan(ctx, "set", 0, "", 10000).Result()
+			require.NoError(t, err)
+			slices.Compact(keys)
+			require.Len(t, keys, 100)
+		})
+	}
+
+	for _, test := range []struct {
+		name  string
+		count int
+	}{
+		{"HSCAN with encoding ziplist", 30},
+		{"HSCAN with encoding hashtable", 1000},
+	} {
+		t.Run(test.name, func(t *testing.T) {
+			require.NoError(t, rdb.Del(ctx, "hash").Err())
+			var elements []interface{}
+			for i := 0; i < test.count; i++ {
+				elements = append(elements, fmt.Sprintf("key:%d", i), i)
+			}
+			require.NoError(t, rdb.HMSet(ctx, "hash", elements...).Err())
+			keys, _, err := rdb.HScan(ctx, "hash", 0, "", 10000).Result()
+			require.NoError(t, err)
+			var hashKeys []string
+
+			var hashKey string
+			for _, key := range keys {
+				if hashKey != "" {
+					require.Equal(t, fmt.Sprintf("key:%s", key), hashKey)
+					hashKeys = append(hashKeys, hashKey)
+					hashKey = ""
+				} else {
+					hashKey = key
+				}
+			}
+			require.Len(t, hashKeys, test.count)
+		})
+	}
+
+	for _, test := range []struct {
+		name  string
+		count int
+	}{
+		{"ZSCAN with encoding ziplist", 30},
+		{"ZSCAN with encoding skiplist", 1000},
+	} {
+		t.Run(test.name, func(t *testing.T) {
+			require.NoError(t, rdb.Del(ctx, "zset").Err())
+			var elements []redis.Z
+			for i := 0; i < test.count; i++ {
+				elements = append(elements, redis.Z{
+					Score:  float64(i),
+					Member: fmt.Sprintf("key:%d", i),
+				})
+			}
+			require.NoError(t, rdb.ZAdd(ctx, "zset", elements...).Err())
+			keys, _, err := rdb.ZScan(ctx, "zset", 0, "", 10000).Result()
+			require.NoError(t, err)
+			var zsetKeys []string
+
+			var zsetKey string
+			for _, key := range keys {
+				if zsetKey != "" {
+					require.Equal(t, fmt.Sprintf("key:%s", key), zsetKey)
+					zsetKeys = append(zsetKeys, zsetKey)
+					zsetKey = ""
+				} else {
+					zsetKey = key
+				}
+			}
+			require.Len(t, zsetKeys, test.count)
+		})
+	}
+}
+
+// SCAN of Kvrocks returns _cursor instead of cursor. Thus, redis.Client Scan can fail with
+// `cursor, err := rd.ReadInt()' returns error.
+//
+// This method provides an alternative to workaround it.
+func scan(t testing.TB, rdb *redis.Client, c string, args ...interface{}) (cursor string, keys []string) {
+	args = append([]interface{}{"SCAN", c}, args...)
+	r := rdb.Do(context.Background(), args...)
+	require.NoError(t, r.Err())
+	require.Len(t, r.Val(), 2)
+
+	rs := r.Val().([]interface{})
+	cursor = rs[0].(string)
+
+	for _, key := range rs[1].([]interface{}) {
+		keys = append(keys, key.(string))
+	}
+
+	return
+}
+
+func scanAll(t testing.TB, rdb *redis.Client, args ...interface{}) (keys []string) {
+	c := "0"
+	for {
+		cursor, keyList := scan(t, rdb, c, args...)
+
+		c = cursor
+		keys = append(keys, keyList...)
+
+		if c == "0" {
+			return
+		}
+	}
+}
diff --git a/tests/gocase/unit/scripting/scripting_test.go b/tests/gocase/unit/scripting/scripting_test.go
index c5e7aba..2fbee2f 100644
--- a/tests/gocase/unit/scripting/scripting_test.go
+++ b/tests/gocase/unit/scripting/scripting_test.go
@@ -488,17 +488,17 @@ func TestScriptingMasterSlave(t *testing.T) {
 	ctx := context.Background()
 
 	require.NoError(t, slaveClient.SlaveOf(ctx, master.Host(), fmt.Sprintf("%d", master.Port())).Err())
-	util.WaitForSync(t, ctx, slaveClient)
+	util.WaitForSync(t, slaveClient)
 
 	t.Run("SCRIPTING: script load on master, read on slave", func(t *testing.T) {
 		sha := masterClient.ScriptLoad(ctx, `return 'script loaded'`).Val()
 		require.Equal(t, "4167ea82ed9c381c7659f7cf93f394219147e8c4", sha)
-		util.WaitForOffsetSync(t, ctx, masterClient, slaveClient)
+		util.WaitForOffsetSync(t, masterClient, slaveClient)
 		require.Equal(t, []bool{true}, masterClient.ScriptExists(ctx, sha).Val())
 		require.Equal(t, []bool{true}, slaveClient.ScriptExists(ctx, sha).Val())
 
 		require.NoError(t, masterClient.ScriptFlush(ctx).Err())
-		util.WaitForOffsetSync(t, ctx, masterClient, slaveClient)
+		util.WaitForOffsetSync(t, masterClient, slaveClient)
 		require.Equal(t, []bool{false}, masterClient.ScriptExists(ctx, sha).Val())
 		require.Equal(t, []bool{false}, slaveClient.ScriptExists(ctx, sha).Val())
 	})
diff --git a/tests/gocase/util/conditions.go b/tests/gocase/util/client.go
similarity index 64%
rename from tests/gocase/util/conditions.go
rename to tests/gocase/util/client.go
index d6ac7a8..d999c11 100644
--- a/tests/gocase/util/conditions.go
+++ b/tests/gocase/util/client.go
@@ -31,25 +31,37 @@ import (
 	"github.com/stretchr/testify/require"
 )
 
-func FindInfoEntry(t *testing.T, ctx context.Context, rdb *redis.Client, key string, section ...string) string {
-	r := rdb.Info(ctx, section...)
+func FindInfoEntry(t testing.TB, rdb *redis.Client, key string, section ...string) string {
+	r := rdb.Info(context.Background(), section...)
 	p := regexp.MustCompile(fmt.Sprintf("%s:(.+)", key))
 	ms := p.FindStringSubmatch(r.Val())
 	require.Len(t, ms, 2)
 	return strings.TrimSpace(ms[1])
 }
 
-func WaitForSync(t *testing.T, ctx context.Context, slave *redis.Client) {
+func WaitForSync(t testing.TB, slave *redis.Client) {
 	require.Eventually(t, func() bool {
-		r := FindInfoEntry(t, ctx, slave, "master_link_status")
+		r := FindInfoEntry(t, slave, "master_link_status")
 		return r == "up"
 	}, 5*time.Second, 100*time.Millisecond)
 }
 
-func WaitForOffsetSync(t *testing.T, ctx context.Context, master, slave *redis.Client) {
+func WaitForOffsetSync(t testing.TB, master, slave *redis.Client) {
 	require.Eventually(t, func() bool {
-		o1 := FindInfoEntry(t, ctx, master, "master_repl_offset")
-		o2 := FindInfoEntry(t, ctx, slave, "master_repl_offset")
+		o1 := FindInfoEntry(t, master, "master_repl_offset")
+		o2 := FindInfoEntry(t, slave, "master_repl_offset")
 		return o1 == o2
 	}, 5*time.Second, 100*time.Millisecond)
 }
+
+func Populate(t testing.TB, rdb *redis.Client, prefix string, n, size int) {
+	ctx := context.Background()
+	p := rdb.Pipeline()
+
+	for i := 0; i < n; i++ {
+		p.Do(ctx, "SET", fmt.Sprintf("%s%d", prefix, i), strings.Repeat("A", size))
+	}
+
+	_, err := p.Exec(ctx)
+	require.NoError(t, err)
+}
diff --git a/tests/gocase/util/server.go b/tests/gocase/util/server.go
index d6f5285..11bb69e 100644
--- a/tests/gocase/util/server.go
+++ b/tests/gocase/util/server.go
@@ -61,7 +61,7 @@ func (s *KvrocksServer) NewClientWithOption(options *redis.Options) *redis.Clien
 	return redis.NewClient(options)
 }
 
-func (s *KvrocksServer) NewTCPClient() *tcpClient {
+func (s *KvrocksServer) NewTCPClient() *TCPClient {
 	c, err := net.Dial(s.addr.Network(), s.addr.String())
 	require.NoError(s.t, err)
 	return newTCPClient(c)
diff --git a/tests/gocase/util/tcp_client.go b/tests/gocase/util/tcp_client.go
index 2a69ffe..af6d45a 100644
--- a/tests/gocase/util/tcp_client.go
+++ b/tests/gocase/util/tcp_client.go
@@ -27,25 +27,25 @@ import (
 	"strings"
 )
 
-type tcpClient struct {
+type TCPClient struct {
 	c net.Conn
 	r *bufio.Reader
 	w *bufio.Writer
 }
 
-func newTCPClient(c net.Conn) *tcpClient {
-	return &tcpClient{
+func newTCPClient(c net.Conn) *TCPClient {
+	return &TCPClient{
 		c: c,
 		r: bufio.NewReader(c),
 		w: bufio.NewWriter(c),
 	}
 }
 
-func (c *tcpClient) Close() error {
+func (c *TCPClient) Close() error {
 	return c.c.Close()
 }
 
-func (c *tcpClient) ReadLine() (string, error) {
+func (c *TCPClient) ReadLine() (string, error) {
 	r, err := c.r.ReadString('\n')
 	if err != nil {
 		return "", err
@@ -53,7 +53,7 @@ func (c *tcpClient) ReadLine() (string, error) {
 	return strings.TrimSuffix(r, "\r\n"), nil
 }
 
-func (c *tcpClient) Write(s string) error {
+func (c *TCPClient) Write(s string) error {
 	_, err := c.w.WriteString(s)
 	if err != nil {
 		return err
@@ -61,7 +61,7 @@ func (c *tcpClient) Write(s string) error {
 	return c.w.Flush()
 }
 
-func (c *tcpClient) WriteArgs(args ...string) error {
+func (c *TCPClient) WriteArgs(args ...string) error {
 	if args == nil {
 		return errors.New("args cannot be nil")
 	}
diff --git a/tests/tcl/tests/test_helper.tcl b/tests/tcl/tests/test_helper.tcl
index 0596bf7..8808a74 100644
--- a/tests/tcl/tests/test_helper.tcl
+++ b/tests/tcl/tests/test_helper.tcl
@@ -33,7 +33,6 @@ source tests/support/test.tcl
 source tests/support/util.tcl
 
 set ::all_tests {
-    unit/scan
     unit/type/string
     unit/type/list
     unit/type/zset
diff --git a/tests/tcl/tests/unit/scan.tcl b/tests/tcl/tests/unit/scan.tcl
deleted file mode 100644
index 7653ca7..0000000
--- a/tests/tcl/tests/unit/scan.tcl
+++ /dev/null
@@ -1,292 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-# Copyright (c) 2006-2020, Salvatore Sanfilippo
-# See bundled license file licenses/LICENSE.redis for details.
-
-# This file is copied and modified from the Redis project,
-# which started out as: https://github.com/redis/redis/blob/dbcc0a8/tests/unit/scan.tcl
-
-start_server {tags {"scan network"}} {
-    test "SCAN basic" {
-        r flushdb
-        populate 1000 "" 10
-
-        set cur 0
-        set keys {}
-        while 1 {
-            set res [r scan $cur]
-            set cur [lindex $res 0]
-            set k [lindex $res 1]
-            lappend keys {*}$k
-            if {$cur == 0} break
-        }
-
-        set keys [lsort -unique $keys]
-        assert_equal 1000 [llength $keys]
-    }
-
-    test "SCAN COUNT" {
-        r flushdb
-        populate 1000 "" 10
-
-        set cur 0
-        set keys {}
-        while 1 {
-            set res [r scan $cur count 5]
-            set cur [lindex $res 0]
-            set k [lindex $res 1]
-            lappend keys {*}$k
-            if {$cur == 0} break
-        }
-
-        set keys [lsort -unique $keys]
-        assert_equal 1000 [llength $keys]
-    }
-
-    test "SCAN MATCH" {
-        r flushdb
-        populate 1000 "key:" 10
-
-        set cur 0
-        set keys {}
-        while 1 {
-            set res [r scan $cur match "key:*"]
-            set cur [lindex $res 0]
-            set k [lindex $res 1]
-            lappend keys {*}$k
-            if {$cur == 0} break
-        }
-
-        set keys [lsort -unique $keys]
-        assert_equal 1000 [llength $keys]
-    }
-
-    foreach enc {intset hashtable} {
-        test "SSCAN with encoding $enc" {
-            # Create the Set
-            r del set
-            if {$enc eq {intset}} {
-                set prefix ""
-            } else {
-                set prefix "ele:"
-            }
-            set elements {}
-            for {set j 0} {$j < 100} {incr j} {
-                lappend elements ${prefix}${j}
-            }
-            r sadd set {*}$elements
-
-            # Test SSCAN
-            set cur 0
-            set keys {}
-            while 1 {
-                set res [r sscan set $cur]
-                set cur [lindex $res 0]
-                set k [lindex $res 1]
-                lappend keys {*}$k
-                if {$cur == 0} break
-            }
-
-            set keys [lsort -unique $keys]
-            assert_equal 100 [llength $keys]
-        }
-    }
-
-    foreach enc {ziplist hashtable} {
-        test "HSCAN with encoding $enc" {
-            # Create the Hash
-            r del hash
-            if {$enc eq {ziplist}} {
-                set count 30
-            } else {
-                set count 1000
-            }
-            set elements {}
-            for {set j 0} {$j < $count} {incr j} {
-                lappend elements key:$j $j
-            }
-            r hmset hash {*}$elements
-
-            # Test HSCAN
-            set cur 0
-            set keys {}
-            while 1 {
-                set res [r hscan hash $cur]
-                set cur [lindex $res 0]
-                set k [lindex $res 1]
-                lappend keys {*}$k
-                if {$cur == 0} break
-            }
-
-            set keys2 {}
-            foreach {k v} $keys {
-                assert {$k eq "key:$v"}
-                lappend keys2 $k
-            }
-
-            set keys2 [lsort -unique $keys2]
-            assert_equal $count [llength $keys2]
-        }
-    }
-
-    foreach enc {ziplist skiplist} {
-        test "ZSCAN with encoding $enc" {
-            # Create the Sorted Set
-            r del zset
-            if {$enc eq {ziplist}} {
-                set count 30
-            } else {
-                set count 1000
-            }
-            set elements {}
-            for {set j 0} {$j < $count} {incr j} {
-                lappend elements $j key:$j
-            }
-            r zadd zset {*}$elements
-
-            # Test ZSCAN
-            set cur 0
-            set keys {}
-            while 1 {
-                set res [r zscan zset $cur]
-                set cur [lindex $res 0]
-                set k [lindex $res 1]
-                lappend keys {*}$k
-                if {$cur == 0} break
-            }
-
-            set keys2 {}
-            foreach {k v} $keys {
-                assert {$k eq "key:$v"}
-                lappend keys2 $k
-            }
-
-            set keys2 [lsort -unique $keys2]
-            assert_equal $count [llength $keys2]
-        }
-    }
-
-    test "SCAN guarantees check under write load" {
-        r flushdb
-        populate 100 "" 10
-
-        # We start scanning here, so keys from 0 to 99 should all be
-        # reported at the end of the iteration.
-        set keys {}
-        while 1 {
-            set res [r scan $cur]
-            set cur [lindex $res 0]
-            set k [lindex $res 1]
-            lappend keys {*}$k
-            if {$cur == 0} break
-            # Write 10 random keys at every SCAN iteration.
-            for {set j 0} {$j < 10} {incr j} {
-                r set addedkey:[randomInt 1000] foo
-            }
-        }
-
-        set keys2 {}
-        foreach k $keys {
-            if {[string length $k] > 6} continue
-            lappend keys2 $k
-        }
-
-        set keys2 [lsort -unique $keys2]
-        assert_equal 100 [llength $keys2]
-    }
-
-    test "SSCAN with PATTERN" {
-        r del mykey
-        r sadd mykey foo fab fiz foobar 1 2 3 4
-        set res [r sscan mykey 0 MATCH foo* COUNT 10000]
-        lsort -unique [lindex $res 1]
-    } {foo foobar}
-
-    test "HSCAN with PATTERN" {
-        r del mykey
-        r hmset mykey foo 1 fab 2 fiz 3 foobar 10 1 a 2 b 3 c 4 d
-        set res [r hscan mykey 0 MATCH foo* COUNT 10000]
-        lsort -unique [lindex $res 1]
-    } {1 10 foo foobar}
-
-    test "ZSCAN with PATTERN" {
-        r del mykey
-        r zadd mykey 1 foo 2 fab 3 fiz 10 foobar
-        set res [r zscan mykey 0 MATCH foo* COUNT 10000]
-        lsort -unique [lindex $res 1]
-    }
-
-    test {SCAN with multi namespace} {
-        r flushdb
-        r config set requirepass foobared
-
-        set namespaces {test_ns1 test_ns2}
-        set tokens {test_ns_token1 test_ns_token2}
-        set key_prefixs {key1* key2*}
-
-        # Add namespaces and write key
-        set index 0
-        foreach ns $namespaces {
-            r auth foobared
-            r namespace add $ns [lindex $tokens $index]
-
-            r auth [lindex $tokens $index]
-            set prefix [lindex $key_prefixs $index]
-            for {set j 0} {$j < 1000} {incr j} {
-                r set $prefix$j hello
-            }
-            for {set j 0} {$j < 100} {incr j} {
-                r set $j hello
-            }
-
-            incr index
-        }
-
-        # Check SCAN and SCAN MATCH in different namespace
-        set index 0
-        foreach token $tokens {
-            r auth $token
-
-            # SCAN to get all keys
-            set cur 0
-            set keys {}
-            while 1 {
-                set res [r scan $cur count 1]
-                set cur [lindex $res 0]
-                set k [lindex $res 1]
-                lappend keys {*}$k
-                if {$cur == 0} break
-            }
-            assert_equal 1100 [llength $keys]
-
-            # SCAN MATCH
-            set cur 0
-            set keys {}
-            while 1 {
-                set res [r scan $cur match [lindex $key_prefixs $index]]
-                set cur [lindex $res 0]
-                set k [lindex $res 1]
-                lappend keys {*}$k
-                if {$cur == 0} break
-            }
-            assert_equal 1000 [llength $keys]
-
-            incr index
-        }
-    }
-}