You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by tq...@apache.org on 2021/05/02 11:43:40 UTC

[tvm] branch main updated: Replace 0.0.0.0 with 127.0.0.1 for client connections (#7766)

This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new f4a680d  Replace 0.0.0.0 with 127.0.0.1 for client connections (#7766)
f4a680d is described below

commit f4a680d80cba38a503227de508c61faed29db458
Author: Robert Kimball <bo...@gmail.com>
AuthorDate: Sun May 2 04:43:12 2021 -0700

    Replace 0.0.0.0 with 127.0.0.1 for client connections (#7766)
    
    * Rename references to 0.0.0.0 to localhost. Also change references to 127.0.0.1 to localhost so that all references are consistent. 0.0.0.0 is not the same as localhost.
---
 apps/benchmark/arm_cpu_imagenet_bench.py           |  2 +-
 apps/benchmark/mobile_gpu_imagenet_bench.py        |  2 +-
 docs/deploy/arm_compute_lib.rst                    |  2 +-
 jvm/README.md                                      |  2 +-
 .../org/apache/tvm/contrib/GraphExecutorTest.java  |  2 +-
 .../src/test/java/org/apache/tvm/rpc/RPCTest.java  |  4 +-
 jvm/core/src/test/scripts/test_rpc_proxy_server.py |  2 +-
 python/tvm/auto_scheduler/measure.py               |  8 +--
 python/tvm/autotvm/database.py                     |  2 +-
 python/tvm/autotvm/graph_tuner/base_graph_tuner.py |  2 +-
 python/tvm/autotvm/measure/measure_methods.py      |  7 +-
 python/tvm/error.py                                |  2 +-
 python/tvm/exec/measure_peak.py                    |  6 +-
 python/tvm/exec/query_rpc_tracker.py               |  2 +-
 python/tvm/exec/rpc_proxy.py                       |  2 +-
 python/tvm/exec/rpc_server.py                      |  4 +-
 python/tvm/exec/rpc_tracker.py                     |  4 +-
 python/tvm/rpc/proxy.py                            |  2 +-
 python/tvm/rpc/server.py                           |  5 +-
 python/tvm/rpc/tracker.py                          |  8 +--
 .../contrib/test_arm_compute_lib/infrastructure.py |  2 +-
 .../contrib/test_arm_compute_lib/test_config.json  |  2 +-
 tests/python/contrib/test_bnns/infrastructure.py   |  2 +-
 tests/python/contrib/test_coreml_runtime.py        |  2 +-
 tests/python/contrib/test_edgetpu_runtime.py       |  2 +-
 tests/python/contrib/test_random.py                |  4 +-
 tests/python/contrib/test_rpc_tracker.py           | 14 ++--
 tests/python/contrib/test_tflite_runtime.py        |  2 +-
 tests/python/unittest/test_runtime_graph.py        |  2 +-
 tests/python/unittest/test_runtime_graph_debug.py  |  2 +-
 .../test_runtime_module_based_interface.py         |  2 +-
 tests/python/unittest/test_runtime_rpc.py          | 79 +++++++++++-----------
 tutorials/auto_scheduler/tune_network_arm.py       |  4 +-
 tutorials/auto_scheduler/tune_network_mali.py      | 10 +--
 tutorials/autotvm/tune_relay_arm.py                |  4 +-
 tutorials/autotvm/tune_relay_cuda.py               |  6 +-
 tutorials/autotvm/tune_relay_mobile_gpu.py         |  4 +-
 tutorials/frontend/deploy_model_on_android.py      |  2 +-
 vta/python/vta/exec/rpc_server.py                  |  4 +-
 vta/tutorials/autotvm/tune_relay_vta.py            |  2 +-
 web/tests/python/webgpu_rpc_test.py                |  2 +-
 web/tests/python/websock_rpc_test.py               |  2 +-
 42 files changed, 114 insertions(+), 111 deletions(-)

diff --git a/apps/benchmark/arm_cpu_imagenet_bench.py b/apps/benchmark/arm_cpu_imagenet_bench.py
index 656735e..30607f7 100644
--- a/apps/benchmark/arm_cpu_imagenet_bench.py
+++ b/apps/benchmark/arm_cpu_imagenet_bench.py
@@ -98,7 +98,7 @@ if __name__ == "__main__":
         help="The model of the test device. If your device is not listed in "
         "the choices list, pick the most similar one as argument.",
     )
-    parser.add_argument("--host", type=str, default="localhost")
+    parser.add_argument("--host", type=str, default="127.0.0.1")
     parser.add_argument("--port", type=int, default=9190)
     parser.add_argument("--rpc-key", type=str, required=True)
     parser.add_argument("--repeat", type=int, default=10)
diff --git a/apps/benchmark/mobile_gpu_imagenet_bench.py b/apps/benchmark/mobile_gpu_imagenet_bench.py
index 4eff259..01d1f13 100644
--- a/apps/benchmark/mobile_gpu_imagenet_bench.py
+++ b/apps/benchmark/mobile_gpu_imagenet_bench.py
@@ -98,7 +98,7 @@ if __name__ == "__main__":
         help="The model of the test device. If your device is not listed in "
         "the choices list, pick the most similar one as argument.",
     )
-    parser.add_argument("--host", type=str, default="localhost")
+    parser.add_argument("--host", type=str, default="127.0.0.1")
     parser.add_argument("--port", type=int, default=9190)
     parser.add_argument("--rpc-key", type=str, required=True)
     parser.add_argument("--repeat", type=int, default=30)
diff --git a/docs/deploy/arm_compute_lib.rst b/docs/deploy/arm_compute_lib.rst
index 4e43682..1abc31b 100644
--- a/docs/deploy/arm_compute_lib.rst
+++ b/docs/deploy/arm_compute_lib.rst
@@ -178,7 +178,7 @@ An example configuration for `test_config.json`:
 
     {
       "connection_type": "local",
-      "host": "localhost",
+      "host": "127.0.0.1",
       "port": 9090,
       "target": "llvm -mtriple=aarch64-linux-gnu -mattr=+neon",
       "device_key": "",
diff --git a/jvm/README.md b/jvm/README.md
index 6e71adf..a481756 100644
--- a/jvm/README.md
+++ b/jvm/README.md
@@ -164,7 +164,7 @@ server.start();
 This will open a socket and wait for remote requests. You can use Java, Python, or any other frontend to make an RPC call. Here's an example for calling remote function `test.rpc.strcat` in Java.
 
 ```java
-RPCSession client = Client.connect("localhost", port.value);
+RPCSession client = Client.connect("127.0.0.1", port.value);
 Function func = client.getFunction("test.rpc.strcat");
 String result = func.call("abc", 11L).asString();
 ```
diff --git a/jvm/core/src/test/java/org/apache/tvm/contrib/GraphExecutorTest.java b/jvm/core/src/test/java/org/apache/tvm/contrib/GraphExecutorTest.java
index 0a5fa9a..f79253e 100644
--- a/jvm/core/src/test/java/org/apache/tvm/contrib/GraphExecutorTest.java
+++ b/jvm/core/src/test/java/org/apache/tvm/contrib/GraphExecutorTest.java
@@ -86,7 +86,7 @@ public class GraphExecutorTest {
     Server server = null;
     try {
       server = TestUtils.startServer(port);
-      RPCSession remote = Client.connect("localhost", port.value);
+      RPCSession remote = Client.connect("127.0.0.1", port.value);
       Device dev = remote.cpu();
 
       remote.upload(new File(libPath));
diff --git a/jvm/core/src/test/java/org/apache/tvm/rpc/RPCTest.java b/jvm/core/src/test/java/org/apache/tvm/rpc/RPCTest.java
index 0718164..641633d 100644
--- a/jvm/core/src/test/java/org/apache/tvm/rpc/RPCTest.java
+++ b/jvm/core/src/test/java/org/apache/tvm/rpc/RPCTest.java
@@ -47,7 +47,7 @@ public class RPCTest {
     Server server = null;
     try {
       server = TestUtils.startServer(port);
-      RPCSession client = Client.connect("localhost", port.value);
+      RPCSession client = Client.connect("127.0.0.1", port.value);
       Function func = client.getFunction("test.rpc.addone");
       assertEquals(11L, func.call(10).asLong());
     } finally {
@@ -73,7 +73,7 @@ public class RPCTest {
     Server server = null;
     try {
       server = TestUtils.startServer(port);
-      RPCSession client = Client.connect("localhost", port.value);
+      RPCSession client = Client.connect("127.0.0.1", port.value);
       Function func = client.getFunction("test.rpc.strcat");
       assertEquals("abc:11", func.call("abc", 11L).asString());
     } finally {
diff --git a/jvm/core/src/test/scripts/test_rpc_proxy_server.py b/jvm/core/src/test/scripts/test_rpc_proxy_server.py
index d3e23e1..664b65d 100644
--- a/jvm/core/src/test/scripts/test_rpc_proxy_server.py
+++ b/jvm/core/src/test/scripts/test_rpc_proxy_server.py
@@ -19,7 +19,7 @@ from tvm.rpc import proxy
 
 
 def start_proxy_server(port, timeout):
-    prox = proxy.Proxy("localhost", port=port, port_end=port + 1)
+    prox = proxy.Proxy("127.0.0.1", port=port, port_end=port + 1)
     if timeout > 0:
         import time
 
diff --git a/python/tvm/auto_scheduler/measure.py b/python/tvm/auto_scheduler/measure.py
index e777214..55a9114 100644
--- a/python/tvm/auto_scheduler/measure.py
+++ b/python/tvm/auto_scheduler/measure.py
@@ -552,20 +552,18 @@ class LocalRPCMeasureContext:
         if dev.exist:
             cuda_arch = "sm_" + "".join(dev.compute_version.split("."))
             set_cuda_target_arch(cuda_arch)
-        host = "0.0.0.0"
-        self.tracker = Tracker(host, port=9000, port_end=10000, silent=True)
+        self.tracker = Tracker(port=9000, port_end=10000, silent=True)
         device_key = "$local$device$%d" % self.tracker.port
         self.server = Server(
-            host,
             port=self.tracker.port,
             port_end=10000,
             key=device_key,
             silent=True,
-            tracker_addr=(self.tracker.host, self.tracker.port),
+            tracker_addr=("127.0.0.1", self.tracker.port),
         )
         self.runner = RPCRunner(
             device_key,
-            host,
+            "127.0.0.1",
             self.tracker.port,
             priority,
             n_parallel,
diff --git a/python/tvm/autotvm/database.py b/python/tvm/autotvm/database.py
index 6bb02e8..4c4ed64 100644
--- a/python/tvm/autotvm/database.py
+++ b/python/tvm/autotvm/database.py
@@ -111,7 +111,7 @@ class RedisDatabase(Database):
         import redis
 
         if db_index == RedisDatabase.REDIS_TEST:
-            host = "localhost"
+            host = "127.0.0.1"
         else:
             host = os.environ.get("TVM_FLEET_HOST")
         self.db = redis.StrictRedis(host=host, port=6379, db=db_index)
diff --git a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py
index b307130..780e6c9 100644
--- a/python/tvm/autotvm/graph_tuner/base_graph_tuner.py
+++ b/python/tvm/autotvm/graph_tuner/base_graph_tuner.py
@@ -368,7 +368,7 @@ class BaseGraphTuner(object):
         timeout=10,
         use_rpc=False,
         device_key=None,
-        host="localhost",
+        host="127.0.0.1",
         port=9190,
         n_parallel=1,
         build_func="default",
diff --git a/python/tvm/autotvm/measure/measure_methods.py b/python/tvm/autotvm/measure/measure_methods.py
index 6d01140..60a26ec 100644
--- a/python/tvm/autotvm/measure/measure_methods.py
+++ b/python/tvm/autotvm/measure/measure_methods.py
@@ -399,18 +399,17 @@ class LocalRunner(RPCRunner):
         from ...rpc.server import Server
 
         self.task = task
-        tracker = Tracker("0.0.0.0", port=9000, port_end=10000, silent=True)
+        tracker = Tracker(port=9000, port_end=10000, silent=True)
         device_key = "$local$device$%d" % tracker.port
         server = Server(
-            "0.0.0.0",
             port=9000,
             port_end=10000,
             key=device_key,
             silent=True,
-            tracker_addr=(tracker.host, tracker.port),
+            tracker_addr=("127.0.0.1", tracker.port),
         )
         self.key = device_key
-        self.host = tracker.host
+        self.host = "127.0.0.1"
         self.port = tracker.port
 
         super(LocalRunner, self).set_task(task)
diff --git a/python/tvm/error.py b/python/tvm/error.py
index 5502fe8..9755270 100644
--- a/python/tvm/error.py
+++ b/python/tvm/error.py
@@ -63,7 +63,7 @@ register_error("IndexError", IndexError)
 
 
 @register_error
-class RPCError(RuntimeError):
+class RPCError(TVMError):
     """Error thrown by the remote server handling the RPC call."""
 
 
diff --git a/python/tvm/exec/measure_peak.py b/python/tvm/exec/measure_peak.py
index d8840fa..6db6108 100644
--- a/python/tvm/exec/measure_peak.py
+++ b/python/tvm/exec/measure_peak.py
@@ -17,7 +17,7 @@
 """measure bandwidth and compute peak
 
 e.g.
-python3 -m tvm.exec.measure_peak --target cuda --rpc-host 0.0.0.0 --rpc-port 9090
+python3 -m tvm.exec.measure_peak --target cuda --rpc-host 127.0.0.1 --rpc-port 9090
 python3 -m tvm.exec.measure_peak --target opencl --target-host "llvm -mtriple=aarch64-linux-gnu" \
         --rpc-host $TVM_OPENCL_DEVICE_HOST --rpc-port 9090
 """
@@ -30,14 +30,14 @@ from ..contrib.peak import measure_peak_all
 
 
 def main():
-    """Main funciton"""
+    """Main function"""
     parser = argparse.ArgumentParser()
     parser.add_argument("--target", type=str, default="llvm", help="The build target")
     parser.add_argument(
         "--target-host", type=str, default=None, help="The host code compilation target"
     )
     parser.add_argument(
-        "--rpc-host", type=str, default="0.0.0.0", help="the hostname of the server"
+        "--rpc-host", type=str, default="127.0.0.1", help="the hostname of the server"
     )
     parser.add_argument("--rpc-port", type=int, default=9090, help="The port of the RPC")
 
diff --git a/python/tvm/exec/query_rpc_tracker.py b/python/tvm/exec/query_rpc_tracker.py
index 3603251..febd093 100644
--- a/python/tvm/exec/query_rpc_tracker.py
+++ b/python/tvm/exec/query_rpc_tracker.py
@@ -33,7 +33,7 @@ def main():
 
     # default to local host or environment variable
     if not args.host:
-        args.host = os.environ.get("TVM_TRACKER_HOST", "localhost")
+        args.host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
 
     if not args.port:
         args.port = int(os.environ.get("TVM_TRACKER_PORT", "9190"))
diff --git a/python/tvm/exec/rpc_proxy.py b/python/tvm/exec/rpc_proxy.py
index bf315fd..3aa93e6 100644
--- a/python/tvm/exec/rpc_proxy.py
+++ b/python/tvm/exec/rpc_proxy.py
@@ -69,7 +69,7 @@ def main(args):
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()
-    parser.add_argument("--host", type=str, default="localhost", help="the hostname of the server")
+    parser.add_argument("--host", type=str, default="127.0.0.1", help="the hostname of the server")
     parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
     parser.add_argument(
         "--web-port", type=int, default=8888, help="The port of the http/websocket server"
diff --git a/python/tvm/exec/rpc_server.py b/python/tvm/exec/rpc_server.py
index 6b3e93e..ceab65c 100644
--- a/python/tvm/exec/rpc_server.py
+++ b/python/tvm/exec/rpc_server.py
@@ -54,7 +54,9 @@ def main(args):
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()
-    parser.add_argument("--host", type=str, default="0.0.0.0", help="the hostname of the server")
+    parser.add_argument(
+        "--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
+    )
     parser.add_argument("--port", type=int, default=9090, help="The port of the RPC")
     parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
     parser.add_argument(
diff --git a/python/tvm/exec/rpc_tracker.py b/python/tvm/exec/rpc_tracker.py
index 4a1a964..05809e0 100644
--- a/python/tvm/exec/rpc_tracker.py
+++ b/python/tvm/exec/rpc_tracker.py
@@ -33,7 +33,9 @@ def main(args):
 
 if __name__ == "__main__":
     parser = argparse.ArgumentParser()
-    parser.add_argument("--host", type=str, default="0.0.0.0", help="the hostname of the tracker")
+    parser.add_argument(
+        "--host", type=str, default="0.0.0.0", help="The host IP address the tracker binds to"
+    )
     parser.add_argument("--port", type=int, default=9190, help="The port of the RPC")
     parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
     parser.add_argument(
diff --git a/python/tvm/rpc/proxy.py b/python/tvm/rpc/proxy.py
index 7e02bd7..e5ec73d 100644
--- a/python/tvm/rpc/proxy.py
+++ b/python/tvm/rpc/proxy.py
@@ -512,7 +512,7 @@ class PopenProxyServerState(object):
                 self.port = my_port
                 break
             except socket.error as sock_err:
-                if sock_err.errno in [98, 48]:
+                if sock_err.errno in [errno.EADDRINUSE]:
                     continue
                 raise sock_err
         if not self.port:
diff --git a/python/tvm/rpc/server.py b/python/tvm/rpc/server.py
index 3fd6996..bfde74f 100644
--- a/python/tvm/rpc/server.py
+++ b/python/tvm/rpc/server.py
@@ -34,6 +34,7 @@ import logging
 import threading
 import multiprocessing
 import time
+import errno
 import tvm._ffi
 
 from tvm._ffi.base import py_str
@@ -333,7 +334,7 @@ class PopenRPCServerState(object):
                     self.port = my_port
                     break
                 except socket.error as sock_err:
-                    if sock_err.errno in [98, 48]:
+                    if sock_err.errno in [errno.EADDRINUSE]:
                         continue
                     raise sock_err
             if not self.port:
@@ -423,7 +424,7 @@ class Server(object):
 
     def __init__(
         self,
-        host,
+        host="0.0.0.0",
         port=9091,
         port_end=9199,
         is_proxy=False,
diff --git a/python/tvm/rpc/tracker.py b/python/tvm/rpc/tracker.py
index e1c366e..9dc4139 100644
--- a/python/tvm/rpc/tracker.py
+++ b/python/tvm/rpc/tracker.py
@@ -367,7 +367,7 @@ def _tracker_server(listen_sock, stop_key):
 
 
 class Tracker(object):
-    """Start RPC tracker on a seperate process.
+    """Start RPC tracker on a separate process.
 
     Python implementation based on multi-processing.
 
@@ -386,7 +386,7 @@ class Tracker(object):
         Whether run in silent mode
     """
 
-    def __init__(self, host, port=9190, port_end=9199, silent=False):
+    def __init__(self, host="0.0.0.0", port=9190, port_end=9199, silent=False):
         if silent:
             logger.setLevel(logging.WARN)
 
@@ -399,7 +399,7 @@ class Tracker(object):
                 self.port = my_port
                 break
             except socket.error as sock_err:
-                if sock_err.errno in [98, 48]:
+                if sock_err.errno in [errno.EADDRINUSE]:
                     continue
                 raise sock_err
         if not self.port:
@@ -414,7 +414,7 @@ class Tracker(object):
 
     def _stop_tracker(self):
         sock = socket.socket(base.get_addr_family((self.host, self.port)), socket.SOCK_STREAM)
-        sock.connect((self.host, self.port))
+        sock.connect(("127.0.0.1", self.port))
         sock.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
         magic = struct.unpack("<i", base.recvall(sock, 4))[0]
         assert magic == base.RPC_TRACKER_MAGIC
diff --git a/tests/python/contrib/test_arm_compute_lib/infrastructure.py b/tests/python/contrib/test_arm_compute_lib/infrastructure.py
index 35f345c..7e05194 100644
--- a/tests/python/contrib/test_arm_compute_lib/infrastructure.py
+++ b/tests/python/contrib/test_arm_compute_lib/infrastructure.py
@@ -69,7 +69,7 @@ class Device:
     """
 
     connection_type = "local"
-    host = "localhost"
+    host = "127.0.0.1"
     port = 9090
     target = "llvm -mtriple=aarch64-linux-gnu -mattr=+neon"
     device_key = ""
diff --git a/tests/python/contrib/test_arm_compute_lib/test_config.json b/tests/python/contrib/test_arm_compute_lib/test_config.json
index c8168ae..5c75e65 100644
--- a/tests/python/contrib/test_arm_compute_lib/test_config.json
+++ b/tests/python/contrib/test_arm_compute_lib/test_config.json
@@ -1,6 +1,6 @@
 {
   "connection_type": "local",
-  "host": "localhost",
+  "host": "127.0.0.1",
   "port": 9090,
   "target": "llvm -mtriple=aarch64-linux-gnu -mattr=+neon",
   "device_key": "",
diff --git a/tests/python/contrib/test_bnns/infrastructure.py b/tests/python/contrib/test_bnns/infrastructure.py
index d046ee9..ebdf815 100644
--- a/tests/python/contrib/test_bnns/infrastructure.py
+++ b/tests/python/contrib/test_bnns/infrastructure.py
@@ -67,7 +67,7 @@ class Device:
     """
 
     connection_type = "local"
-    host = "localhost"
+    host = "127.0.0.1"
     port = 9090
     target = "llvm"
     device_key = ""
diff --git a/tests/python/contrib/test_coreml_runtime.py b/tests/python/contrib/test_coreml_runtime.py
index 447b412..4a2ce48 100644
--- a/tests/python/contrib/test_coreml_runtime.py
+++ b/tests/python/contrib/test_coreml_runtime.py
@@ -23,7 +23,7 @@ from tvm.contrib import utils, xcode, coreml_runtime
 import pytest
 import os
 
-proxy_host = os.environ.get("TVM_IOS_RPC_PROXY_HOST", "localhost")
+proxy_host = os.environ.get("TVM_IOS_RPC_PROXY_HOST", "127.0.0.1")
 proxy_port = os.environ.get("TVM_IOS_RPC_PROXY_PORT", 9090)
 destination = os.environ.get("TVM_IOS_RPC_DESTINATION", "")
 key = "iphone"
diff --git a/tests/python/contrib/test_edgetpu_runtime.py b/tests/python/contrib/test_edgetpu_runtime.py
index a3a0160..0af2cf2 100644
--- a/tests/python/contrib/test_edgetpu_runtime.py
+++ b/tests/python/contrib/test_edgetpu_runtime.py
@@ -65,7 +65,7 @@ def skipped_test_tflite_runtime():
         tflite_output = interpreter.get_tensor(output_details[0]["index"])
 
         # inference via remote tvm tflite runtime
-        server = rpc.Server("localhost")
+        server = rpc.Server("127.0.0.1")
         remote = rpc.connect(server.host, server.port)
         dev = remote.cpu(0)
 
diff --git a/tests/python/contrib/test_random.py b/tests/python/contrib/test_random.py
index 0ebf255..e7a1a5e 100644
--- a/tests/python/contrib/test_random.py
+++ b/tests/python/contrib/test_random.py
@@ -118,7 +118,9 @@ def test_random_fill():
             return
         if not tvm.testing.device_enabled("rpc") or not tvm.runtime.enabled("llvm"):
             return
-        server = rpc.Server("localhost")
+
+        np_ones = np.ones((512, 512), dtype=dtype)
+        server = rpc.Server("127.0.0.1")
         remote = rpc.connect(server.host, server.port)
         value = tvm.nd.empty((512, 512), dtype, remote.cpu())
         random_fill = remote.get_function("tvm.contrib.random.random_fill")
diff --git a/tests/python/contrib/test_rpc_tracker.py b/tests/python/contrib/test_rpc_tracker.py
index 83cc52f..105d1aa 100644
--- a/tests/python/contrib/test_rpc_tracker.py
+++ b/tests/python/contrib/test_rpc_tracker.py
@@ -37,18 +37,18 @@ def check_server_drop():
             base.sendjson(tclient._sock, value)
             base.recvjson(tclient._sock)
 
-        tserver = tracker.Tracker("localhost", 8888)
-        tproxy = proxy.Proxy("localhost", 8881, tracker_addr=("localhost", tserver.port))
-        tclient = rpc.connect_tracker("localhost", tserver.port)
+        tserver = tracker.Tracker("127.0.0.1", 8888)
+        tproxy = proxy.Proxy("127.0.0.1", 8881, tracker_addr=("127.0.0.1", tserver.port))
+        tclient = rpc.connect_tracker("127.0.0.1", tserver.port)
 
         server0 = rpc.Server(
-            "localhost", port=9099, tracker_addr=("localhost", tserver.port), key="abc"
+            "127.0.0.1", port=9099, tracker_addr=("127.0.0.1", tserver.port), key="abc"
         )
         server1 = rpc.Server(
-            "localhost", port=9099, tracker_addr=("localhost", tserver.port), key="xyz"
+            "127.0.0.1", port=9099, tracker_addr=("127.0.0.1", tserver.port), key="xyz"
         )
-        server2 = rpc.Server("localhost", tproxy.port, is_proxy=True, key="xyz")
-        server3 = rpc.Server("localhost", tproxy.port, is_proxy=True, key="xyz1")
+        server2 = rpc.Server("127.0.0.1", tproxy.port, is_proxy=True, key="xyz")
+        server3 = rpc.Server("127.0.0.1", tproxy.port, is_proxy=True, key="xyz1")
 
         # Fault tolerence to un-handled requested value
         _put(tclient, [TrackerCode.REQUEST, "abc", "", 1])
diff --git a/tests/python/contrib/test_tflite_runtime.py b/tests/python/contrib/test_tflite_runtime.py
index 222dcd4..f3369ac 100644
--- a/tests/python/contrib/test_tflite_runtime.py
+++ b/tests/python/contrib/test_tflite_runtime.py
@@ -128,7 +128,7 @@ def test_remote():
     tflite_output = interpreter.get_tensor(output_details[0]["index"])
 
     # inference via remote tvm tflite runtime
-    server = rpc.Server("localhost")
+    server = rpc.Server("127.0.0.1")
     remote = rpc.connect(server.host, server.port)
     a = remote.upload(tflite_model_path)
 
diff --git a/tests/python/unittest/test_runtime_graph.py b/tests/python/unittest/test_runtime_graph.py
index 5f0c783..aac7e49 100644
--- a/tests/python/unittest/test_runtime_graph.py
+++ b/tests/python/unittest/test_runtime_graph.py
@@ -67,7 +67,7 @@ def test_graph_simple():
 
     def check_remote():
         mlib = tvm.build(s, [A, B], "llvm", name="myadd")
-        server = rpc.Server("localhost")
+        server = rpc.Server("127.0.0.1")
         remote = rpc.connect(server.host, server.port)
         temp = utils.tempdir()
         dev = remote.cpu(0)
diff --git a/tests/python/unittest/test_runtime_graph_debug.py b/tests/python/unittest/test_runtime_graph_debug.py
index 6cab75d..b7df33c 100644
--- a/tests/python/unittest/test_runtime_graph_debug.py
+++ b/tests/python/unittest/test_runtime_graph_debug.py
@@ -162,7 +162,7 @@ def test_graph_simple():
 
     def check_remote():
         mlib = tvm.build(s, [A, B], "llvm", name="myadd")
-        server = rpc.Server("localhost")
+        server = rpc.Server("127.0.0.1")
         remote = rpc.connect(server.host, server.port)
         temp = utils.tempdir()
         dev = remote.cpu(0)
diff --git a/tests/python/unittest/test_runtime_module_based_interface.py b/tests/python/unittest/test_runtime_module_based_interface.py
index 088f0a2..1d80c60 100644
--- a/tests/python/unittest/test_runtime_module_based_interface.py
+++ b/tests/python/unittest/test_runtime_module_based_interface.py
@@ -275,7 +275,7 @@ def test_mod_export():
 
         from tvm import rpc
 
-        server = rpc.Server("localhost", port=9094)
+        server = rpc.Server("127.0.0.1", port=9094)
         remote = rpc.connect(server.host, server.port)
         remote.upload(path_lib)
         loaded_lib = remote.load_module(path_lib)
diff --git a/tests/python/unittest/test_runtime_rpc.py b/tests/python/unittest/test_runtime_rpc.py
index a74f893..1fb8446 100644
--- a/tests/python/unittest/test_runtime_rpc.py
+++ b/tests/python/unittest/test_runtime_rpc.py
@@ -45,7 +45,8 @@ if __name__ == "__main__":
 # run individual functions. Somewhere along the way, the imports are being
 # lost, so the server ends up not registering the functions.
 pytestmark = pytest.mark.skipif(
-    multiprocessing.get_start_method() != "fork",
+    # Windows does not support fork so we can enable Windows for testing
+    sys.platform.startswith("win") == False and multiprocessing.get_start_method() != "fork",
     reason=(
         "pytest + multiprocessing spawn method causes tvm.register_func to "
         "not work on the rpc.Server."
@@ -87,13 +88,13 @@ def test_bigendian_rpc():
 
 @tvm.testing.requires_rpc
 def test_rpc_simple():
-    server = rpc.Server("localhost", key="x1")
-    client = rpc.connect(server.host, server.port, key="x1")
+    server = rpc.Server(key="x1")
+    client = rpc.connect("127.0.0.1", server.port, key="x1")
     f1 = client.get_function("rpc.test.addone")
     assert f1(10) == 11
     f3 = client.get_function("rpc.test.except")
 
-    with pytest.raises(tvm.error.RPCError):
+    with pytest.raises(tvm._ffi.base.TVMError):
         f3("abc")
 
     f2 = client.get_function("rpc.test.strcat")
@@ -102,8 +103,8 @@ def test_rpc_simple():
 
 @tvm.testing.requires_rpc
 def test_rpc_runtime_string():
-    server = rpc.Server("localhost", key="x1")
-    client = rpc.connect(server.host, server.port, key="x1")
+    server = rpc.Server(key="x1")
+    client = rpc.connect("127.0.0.1", server.port, key="x1")
     func = client.get_function("rpc.test.runtime_str_concat")
     x = tvm.runtime.container.String("abc")
     y = tvm.runtime.container.String("def")
@@ -114,8 +115,8 @@ def test_rpc_runtime_string():
 def test_rpc_array():
     x = np.ones((3, 4))
 
-    server = rpc.Server("localhost")
-    remote = rpc.connect(server.host, server.port)
+    server = rpc.Server()
+    remote = rpc.connect("127.0.0.1", server.port)
     r_cpu = tvm.nd.array(x, remote.cpu(0))
     assert str(r_cpu.device).startswith("remote")
     np.testing.assert_equal(r_cpu.asnumpy(), x)
@@ -126,8 +127,8 @@ def test_rpc_array():
 @tvm.testing.requires_rpc
 def test_rpc_large_array():
     # testcase of large array creation
-    server = rpc.Server("localhost")
-    remote = rpc.connect(server.host, server.port)
+    server = rpc.Server()
+    remote = rpc.connect("127.0.0.1", server.port)
     dev = remote.cpu(0)
     a_np = np.ones((5041, 720)).astype("float32")
     b_np = np.ones((720, 192)).astype("float32")
@@ -155,8 +156,8 @@ def test_rpc_echo():
             f3 = remote.system_lib()["notexist"]
 
     temp = rpc.server._server_env([])
-    server = rpc.Server("localhost")
-    client = rpc.connect(server.host, server.port)
+    server = rpc.Server()
+    client = rpc.connect("127.0.0.1", server.port)
     check(rpc.LocalSession())
 
     check(client)
@@ -170,9 +171,9 @@ def test_rpc_echo():
         tvm.rpc.with_minrpc(cc.create_executable)(minrpc_exec, [])
         check(rpc.PopenSession(minrpc_exec))
         # minrpc on the remote
-        server = rpc.Server("localhost")
+        server = rpc.Server()
         client = rpc.connect(
-            server.host,
+            "127.0.0.1",
             server.port,
             session_constructor_args=["rpc.PopenSession", open(minrpc_exec, "rb").read()],
         )
@@ -183,8 +184,8 @@ def test_rpc_echo():
 
 @tvm.testing.requires_rpc
 def test_rpc_file_exchange():
-    server = rpc.Server("localhost")
-    remote = rpc.connect(server.host, server.port)
+    server = rpc.Server()
+    remote = rpc.connect("127.0.0.1", server.port)
     blob = bytearray(np.random.randint(0, 10, size=(10)))
     remote.upload(blob, "dat.bin")
     rev = remote.download("dat.bin")
@@ -200,14 +201,14 @@ def test_rpc_remote_module():
     B = te.compute(A.shape, lambda *i: A(*i) + 1.0, name="B")
     s = te.create_schedule(B.op)
 
-    server0 = rpc.Server("localhost", key="x0")
-    server1 = rpc.Server("localhost", key="x1")
+    server0 = rpc.Server(key="x0")
+    server1 = rpc.Server(key="x1")
 
     client = rpc.connect(
-        server0.host,
+        "127.0.0.1",
         server0.port,
         key="x0",
-        session_constructor_args=["rpc.Connect", server1.host, server1.port, "x1"],
+        session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
     )
 
     def check_remote(remote):
@@ -318,8 +319,8 @@ def test_rpc_remote_module():
 
 @tvm.testing.requires_rpc
 def test_rpc_return_func():
-    server = rpc.Server("localhost", key="x1")
-    client = rpc.connect(server.host, server.port, key="x1")
+    server = rpc.Server(key="x1")
+    client = rpc.connect("127.0.0.1", server.port, key="x1")
     f1 = client.get_function("rpc.test.add_to_lhs")
     fadd = f1(10)
     assert fadd(12) == 22
@@ -328,16 +329,16 @@ def test_rpc_return_func():
 @tvm.testing.requires_rpc
 def test_rpc_session_constructor_args():
     # start server
-    server0 = rpc.Server("localhost", key="x0")
-    server1 = rpc.Server("localhost", key="x1")
+    server0 = rpc.Server(key="x0")
+    server1 = rpc.Server(key="x1")
 
     def check_multi_hop():
         # use server0 as proxy to connect to server1
         client = rpc.connect(
-            server0.host,
+            "127.0.0.1",
             server0.port,
             key="x0",
-            session_constructor_args=["rpc.Connect", server1.host, server1.port, "x1"],
+            session_constructor_args=["rpc.Connect", "127.0.0.1", server1.port, "x1"],
         )
 
         fecho = client.get_function("testing.echo")
@@ -352,7 +353,7 @@ def test_rpc_session_constructor_args():
     def check_error_handling():
         with pytest.raises(tvm.error.RPCError):
             client = rpc.connect(
-                server0.host,
+                "127.0.0.1",
                 server0.port,
                 key="x0",
                 session_constructor_args=["rpc.NonExistingConstructor"],
@@ -365,8 +366,8 @@ def test_rpc_session_constructor_args():
 @tvm.testing.requires_rpc
 def test_rpc_return_ndarray():
     # start server
-    server = rpc.Server("localhost", key="x1")
-    client = rpc.connect(server.host, server.port, key="x1")
+    server = rpc.Server(key="x1")
+    client = rpc.connect("127.0.0.1", server.port, key="x1")
 
     m = client.get_function("rpc.test.remote_return_nd")
     get_arr = m("get_arr")
@@ -398,17 +399,16 @@ def test_local_func():
 @tvm.testing.requires_rpc
 def test_rpc_tracker_register():
     # test registration
-    tracker = Tracker("localhost", port=9000, port_end=10000)
+    tracker = Tracker(port=9000, port_end=10000)
     device_key = "test_device"
     server = rpc.Server(
-        "localhost",
         port=9000,
         port_end=10000,
         key=device_key,
-        tracker_addr=(tracker.host, tracker.port),
+        tracker_addr=("127.0.0.1", tracker.port),
     )
-    client = rpc.connect_tracker(tracker.host, tracker.port)
     time.sleep(1)
+    client = rpc.connect_tracker("127.0.0.1", tracker.port)
 
     summary = client.summary()
     assert summary["queue_info"][device_key]["free"] == 1
@@ -443,22 +443,19 @@ def _target(host, port, device_key, timeout):
 @tvm.testing.requires_rpc
 def test_rpc_tracker_request():
     # test concurrent request
-    tracker = Tracker("localhost", port=9000, port_end=10000)
+    tracker = Tracker(port=9000, port_end=10000)
     device_key = "test_device"
     server = rpc.Server(
-        "localhost",
         port=9000,
         port_end=10000,
         key=device_key,
-        tracker_addr=(tracker.host, tracker.port),
+        tracker_addr=("127.0.0.1", tracker.port),
     )
-    client = rpc.connect_tracker(tracker.host, tracker.port)
+    client = rpc.connect_tracker("127.0.0.1", tracker.port)
 
-    proc1 = multiprocessing.Process(
-        target=_target, args=(tracker.host, tracker.port, device_key, 4)
-    )
+    proc1 = multiprocessing.Process(target=_target, args=("127.0.0.1", tracker.port, device_key, 4))
     proc2 = multiprocessing.Process(
-        target=_target, args=(tracker.host, tracker.port, device_key, 200)
+        target=_target, args=("127.0.0.1", tracker.port, device_key, 200)
     )
     proc1.start()
     time.sleep(0.5)
diff --git a/tutorials/auto_scheduler/tune_network_arm.py b/tutorials/auto_scheduler/tune_network_arm.py
index 46d95c3..d6d8097 100644
--- a/tutorials/auto_scheduler/tune_network_arm.py
+++ b/tutorials/auto_scheduler/tune_network_arm.py
@@ -25,7 +25,7 @@ Auto-tuning for specific devices and workloads is critical for getting the
 best performance. This is a tutorial on how to tune a whole neural
 network for ARM CPU with the auto-scheduler via RPC.
 
-To auto-tune a neural network, we partition the network into small subgraphs and 
+To auto-tune a neural network, we partition the network into small subgraphs and
 tune them independently. Each subgraph is treated as one search task.
 A task scheduler slices the time and dynamically allocates time resources to
 these tasks. The task scheduler predicts the impact of each task on the end-to-end
@@ -234,7 +234,7 @@ target = tvm.target.Target("llvm -mtriple=aarch64-linux-gnu -mattr=+neon")
 
 # Also replace this with the device key, rpc host and rpc port in your tracker
 device_key = "rasp4b-64"
-rpc_host = "0.0.0.0"
+rpc_host = "127.0.0.1"
 rpc_port = 9191
 
 # Set this to True if you use ndk tools for cross compiling
diff --git a/tutorials/auto_scheduler/tune_network_mali.py b/tutorials/auto_scheduler/tune_network_mali.py
index 35751fa..8275f96 100644
--- a/tutorials/auto_scheduler/tune_network_mali.py
+++ b/tutorials/auto_scheduler/tune_network_mali.py
@@ -23,7 +23,7 @@ Auto-tuning for specific devices and workloads is critical for getting the
 best performance. This is a tutorial on how to tune a whole neural
 network for mali GPU with the auto-scheduler.
 
-To auto-tune a neural network, we partition the network into small subgraphs and 
+To auto-tune a neural network, we partition the network into small subgraphs and
 tune them independently. Each subgraph is treated as one search task.
 A task scheduler slices the time and dynamically allocates time resources to
 these tasks. The task scheduler predicts the impact of each task on the end-to-end
@@ -180,11 +180,11 @@ for idx, task in enumerate(tasks):
 #   .. code-block:: python
 #
 #     from tvm.auto_scheduler.utils import request_remote
-#     remote = request_remote(device_key, "0.0.0.0", 9190)
+#     remote = request_remote(device_key, "127.0.0.1", 9190)
 #     dev = remote.cl()
 #     max_shared_memory_per_block = dev.max_shared_memory_per_block
 #     # There is no explicit local memory limition
-#     # so we can use INT32_MAX to disalbe the check on local_memory.
+#     # so we can use INT32_MAX to disable the check on local_memory.
 #     max_local_memory_per_block = 2147483647 # INT32_MAX
 #     max_threads_per_block = dev.max_threads_per_block
 #     max_vthread_extent = int(dev.warp_size / 4) if int(dev.warp_size / 4) > 1 else dev.warp_size
@@ -228,7 +228,7 @@ def tune_and_evaluate():
         num_measure_trials=200,  # change this to 20000 to achieve the best performance
         builder=auto_scheduler.LocalBuilder(build_func="ndk" if use_ndk else "default"),
         runner=auto_scheduler.RPCRunner(
-            device_key, host="0.0.0.0", port=9190, repeat=3, timeout=50
+            device_key, host="127.0.0.1", port=9190, repeat=3, timeout=50
         ),
         measure_callbacks=[auto_scheduler.RecordToFile(log_file)],
     )
@@ -247,7 +247,7 @@ def tune_and_evaluate():
     print("=============== Request Remote ===============")
     from tvm.auto_scheduler.utils import request_remote
 
-    remote = request_remote(device_key, "0.0.0.0", 9190)
+    remote = request_remote(device_key, "127.0.0.1", 9190)
     dev = remote.cl()
     from tvm.contrib import utils, ndk
 
diff --git a/tutorials/autotvm/tune_relay_arm.py b/tutorials/autotvm/tune_relay_arm.py
index 9223eb3..68d263b 100644
--- a/tutorials/autotvm/tune_relay_arm.py
+++ b/tutorials/autotvm/tune_relay_arm.py
@@ -224,7 +224,7 @@ tuning_option = {
         builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"),
         runner=autotvm.RPCRunner(
             device_key,
-            host="0.0.0.0",
+            host="127.0.0.1",
             port=9190,
             number=5,
             timeout=10,
@@ -343,7 +343,7 @@ def tune_and_evaluate(tuning_opt):
 
         # upload module to device
         print("Upload...")
-        remote = autotvm.measure.request_remote(device_key, "0.0.0.0", 9190, timeout=10000)
+        remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000)
         remote.upload(tmp.relpath(filename))
         rlib = remote.load_module(filename)
 
diff --git a/tutorials/autotvm/tune_relay_cuda.py b/tutorials/autotvm/tune_relay_cuda.py
index 50485c4..65991cc 100644
--- a/tutorials/autotvm/tune_relay_cuda.py
+++ b/tutorials/autotvm/tune_relay_cuda.py
@@ -345,13 +345,13 @@ def tune_and_evaluate(tuning_opt):
 #
 # .. code-block:: bash
 #
-#     python -m tvm.exec.rpc_server --tracker=0.0.0.0:9190 --key=1080ti
+#     python -m tvm.exec.rpc_server --tracker=127.0.0.1:9190 --key=1080ti
 #
 # After registering devices, we can confirm it by querying rpc_tracker
 #
 # .. code-block:: bash
 #
-#   python -m tvm.exec.query_rpc_tracker --host=0.0.0.0 --port=9190
+#   python -m tvm.exec.query_rpc_tracker --host=127.0.0.1 --port=9190
 #
 # For example, if we have four 1080ti, two titanx and one gfx900, the output can be
 #
@@ -378,7 +378,7 @@ tuning_option = {
         builder=autotvm.LocalBuilder(timeout=10),
         runner=autotvm.RPCRunner(
             "1080ti",  # change the device key to your key
-            "0.0.0.0",
+            "127.0.0.1",
             9190,
             number=20,
             repeat=3,
diff --git a/tutorials/autotvm/tune_relay_mobile_gpu.py b/tutorials/autotvm/tune_relay_mobile_gpu.py
index 2b10987..790c2ff 100644
--- a/tutorials/autotvm/tune_relay_mobile_gpu.py
+++ b/tutorials/autotvm/tune_relay_mobile_gpu.py
@@ -225,7 +225,7 @@ tuning_option = {
         builder=autotvm.LocalBuilder(build_func="ndk" if use_android else "default"),
         runner=autotvm.RPCRunner(
             device_key,
-            host="0.0.0.0",
+            host="127.0.0.1",
             port=9190,
             number=10,
             timeout=5,
@@ -340,7 +340,7 @@ def tune_and_evaluate(tuning_opt):
 
         # upload module to device
         print("Upload...")
-        remote = autotvm.measure.request_remote(device_key, "0.0.0.0", 9190, timeout=10000)
+        remote = autotvm.measure.request_remote(device_key, "127.0.0.1", 9190, timeout=10000)
         remote.upload(tmp.relpath(filename))
         rlib = remote.load_module(filename)
 
diff --git a/tutorials/frontend/deploy_model_on_android.py b/tutorials/frontend/deploy_model_on_android.py
index 158280f..864e813 100644
--- a/tutorials/frontend/deploy_model_on_android.py
+++ b/tutorials/frontend/deploy_model_on_android.py
@@ -289,7 +289,7 @@ lib.export_library(lib_fname, fcompile)
 # With RPC, you can deploy the model remotely from your host machine
 # to the remote android device.
 
-tracker_host = os.environ.get("TVM_TRACKER_HOST", "0.0.0.0")
+tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
 tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
 key = "android"
 
diff --git a/vta/python/vta/exec/rpc_server.py b/vta/python/vta/exec/rpc_server.py
index cd51913..b7a9c79 100644
--- a/vta/python/vta/exec/rpc_server.py
+++ b/vta/python/vta/exec/rpc_server.py
@@ -127,7 +127,9 @@ def server_start():
 def main():
     """Main funciton"""
     parser = argparse.ArgumentParser()
-    parser.add_argument("--host", type=str, default="0.0.0.0", help="the hostname of the server")
+    parser.add_argument(
+        "--host", type=str, default="0.0.0.0", help="The host IP address the server binds to"
+    )
     parser.add_argument("--port", type=int, default=9091, help="The port of the RPC")
     parser.add_argument("--port-end", type=int, default=9199, help="The end search port of the RPC")
     parser.add_argument(
diff --git a/vta/tutorials/autotvm/tune_relay_vta.py b/vta/tutorials/autotvm/tune_relay_vta.py
index 7deb740..38633b0 100644
--- a/vta/tutorials/autotvm/tune_relay_vta.py
+++ b/vta/tutorials/autotvm/tune_relay_vta.py
@@ -180,7 +180,7 @@ def compile_network(env, target, model, start_pack, stop_pack):
 # Here we use an Pynq-Z1 board as an example.
 
 # Tracker host and port can be set by your environment
-tracker_host = os.environ.get("TVM_TRACKER_HOST", "0.0.0.0")
+tracker_host = os.environ.get("TVM_TRACKER_HOST", "127.0.0.1")
 tracker_port = int(os.environ.get("TVM_TRACKER_PORT", 9190))
 
 # Load VTA parameters from the 3rdparty/vta-hw/config/vta_config.json file
diff --git a/web/tests/python/webgpu_rpc_test.py b/web/tests/python/webgpu_rpc_test.py
index 80e358b..e269370 100644
--- a/web/tests/python/webgpu_rpc_test.py
+++ b/web/tests/python/webgpu_rpc_test.py
@@ -26,7 +26,7 @@ from tvm import rpc
 from tvm.contrib import utils, emcc
 import numpy as np
 
-proxy_host = "localhost"
+proxy_host = "127.0.0.1"
 proxy_port = 9090
 
 
diff --git a/web/tests/python/websock_rpc_test.py b/web/tests/python/websock_rpc_test.py
index 9c1876a..84a0c9f 100644
--- a/web/tests/python/websock_rpc_test.py
+++ b/web/tests/python/websock_rpc_test.py
@@ -26,7 +26,7 @@ from tvm import rpc
 from tvm.contrib import utils, emcc
 import numpy as np
 
-proxy_host = "localhost"
+proxy_host = "127.0.0.1"
 proxy_port = 9090