You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by jr...@apache.org on 2020/10/24 22:50:30 UTC

[incubator-tvm] branch main updated: Update include and src dir CHECK* to ICHECK* (#6745)

This is an automated email from the ASF dual-hosted git repository.

jroesch pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 1831c17  Update include and src dir CHECK* to ICHECK* (#6745)
1831c17 is described below

commit 1831c17998b29f3797f364410980809bfef554ca
Author: Robert Kimball <bo...@gmail.com>
AuthorDate: Sat Oct 24 15:47:24 2020 -0700

    Update include and src dir CHECK* to ICHECK* (#6745)
---
 include/tvm/arith/analyzer.h                       |   4 +-
 include/tvm/ir/attrs.h                             |   8 +-
 include/tvm/ir/diagnostic.h                        |   4 +-
 include/tvm/ir/env_func.h                          |   4 +-
 include/tvm/ir/expr.h                              |  16 +-
 include/tvm/ir/module.h                            |   2 +-
 include/tvm/ir/op.h                                |   6 +-
 include/tvm/ir/transform.h                         |   8 +-
 include/tvm/ir/type_functor.h                      |   2 +-
 include/tvm/node/attr_registry_map.h               |   6 +-
 include/tvm/node/container.h                       |  16 +-
 include/tvm/node/functor.h                         |   8 +-
 include/tvm/node/reflection.h                      |   4 +-
 include/tvm/parser/source_map.h                    |   2 +-
 include/tvm/relay/base.h                           |  20 +-
 include/tvm/relay/dataflow_pattern_functor.h       |   2 +-
 include/tvm/relay/expr_functor.h                   |   4 +-
 include/tvm/relay/pattern_functor.h                |   2 +-
 include/tvm/runtime/container.h                    |  58 +--
 include/tvm/runtime/data_type.h                    |   8 +-
 include/tvm/runtime/ndarray.h                      |  38 +-
 include/tvm/runtime/packed_func.h                  |  42 +--
 include/tvm/runtime/vm/bytecode.h                  |   1 +
 include/tvm/support/logging.h                      |  12 +-
 include/tvm/target/target_kind.h                   |   4 +-
 include/tvm/tir/data_layout.h                      |   4 +-
 include/tvm/tir/expr_functor.h                     |   2 +-
 include/tvm/topi/broadcast.h                       |   6 +-
 include/tvm/topi/cuda/dense.h                      |   8 +-
 include/tvm/topi/cuda/reduction.h                  |   4 +-
 include/tvm/topi/detail/broadcast.h                |  14 +-
 include/tvm/topi/detail/constant_utils.h           |   4 +-
 include/tvm/topi/detail/extern.h                   |   4 +-
 include/tvm/topi/detail/ravel_unravel.h            |   4 +-
 include/tvm/topi/elemwise.h                        |   2 +-
 include/tvm/topi/nn.h                              |  28 +-
 include/tvm/topi/nn/bnn.h                          |  10 +-
 include/tvm/topi/nn/dense.h                        |   6 +-
 include/tvm/topi/nn/dilate.h                       |   6 +-
 include/tvm/topi/nn/local_response_norm.h          |   6 +-
 include/tvm/topi/nn/pooling.h                      |  44 +--
 include/tvm/topi/nn/softmax.h                      |   4 +-
 include/tvm/topi/reduction.h                       |  10 +-
 include/tvm/topi/rocm/dense.h                      |   8 +-
 include/tvm/topi/transform.h                       | 104 +++---
 src/arith/analyzer.cc                              |   6 +-
 src/arith/canonical_simplify.cc                    |  32 +-
 src/arith/const_fold.h                             |  20 +-
 src/arith/const_int_bound.cc                       |  30 +-
 src/arith/domain_touched.cc                        |   2 +-
 src/arith/int_constraints.cc                       |  16 +-
 src/arith/int_set.cc                               |  12 +-
 src/arith/ir_mutator_with_analyzer.cc              |   2 +-
 src/arith/ir_visitor_with_analyzer.h               |   2 +-
 src/arith/iter_affine_map.cc                       |   8 +-
 src/arith/modular_set.cc                           |  12 +-
 src/arith/pattern_match.h                          |   8 +-
 src/arith/rewrite_simplify.cc                      |  12 +-
 src/arith/solve_linear_equation.cc                 |   4 +-
 src/arith/solve_linear_inequality.cc               |   4 +-
 src/auto_scheduler/compute_dag.cc                  |  24 +-
 src/auto_scheduler/cost_model.cc                   |   6 +-
 src/auto_scheduler/feature.cc                      |  14 +-
 src/auto_scheduler/loop_state.cc                   |   6 +-
 src/auto_scheduler/measure.cc                      |   2 +-
 src/auto_scheduler/measure_record.cc               |  34 +-
 src/auto_scheduler/search_policy/empty_policy.cc   |   2 +-
 src/auto_scheduler/search_policy/search_policy.cc  |   2 +-
 src/auto_scheduler/search_policy/sketch_policy.cc  |   6 +-
 .../search_policy/sketch_policy_rules.cc           |  41 +--
 src/auto_scheduler/search_policy/utils.cc          |  22 +-
 src/auto_scheduler/search_policy/utils.h           |  26 +-
 src/auto_scheduler/search_task.cc                  |   2 +-
 src/auto_scheduler/transform_step.cc               | 129 +++----
 src/auto_scheduler/utils.h                         |   8 +-
 src/autotvm/feature_visitor.cc                     |   2 +-
 src/autotvm/touch_extractor.cc                     |   6 +-
 src/contrib/hybrid/codegen_hybrid.cc               |  26 +-
 src/contrib/tf_op/tvm_dso_op_kernels.cc            |   2 +-
 src/driver/driver_api.cc                           |  14 +-
 src/ir/diagnostic.cc                               |   2 +-
 src/ir/env_func.cc                                 |   4 +-
 src/ir/error.cc                                    |   6 +-
 src/ir/expr.cc                                     |  10 +-
 src/ir/module.cc                                   |  30 +-
 src/ir/op.cc                                       |   4 +-
 src/ir/span.cc                                     |   4 +-
 src/ir/transform.cc                                |  12 +-
 src/node/attr_registry.h                           |   8 +-
 src/node/container.cc                              |  40 +--
 src/node/reflection.cc                             |   8 +-
 src/node/serialization.cc                          |  20 +-
 src/node/structural_equal.cc                       |  12 +-
 src/node/structural_hash.cc                        |  22 +-
 src/parser/meta_ref.cc                             |   4 +-
 src/parser/parser.cc                               |  18 +-
 src/parser/source_map.cc                           |   2 +-
 src/parser/tokenizer.h                             |  28 +-
 src/printer/doc.cc                                 |   2 +-
 src/printer/meta_data.h                            |   2 +-
 src/printer/relay_text_printer.cc                  |   4 +-
 src/printer/tir_text_printer.cc                    |   2 +-
 src/printer/tvmscript_printer.cc                   |   6 +-
 src/relay/analysis/annotated_region_set.cc         |  12 +-
 src/relay/analysis/annotated_region_set.h          |  14 +-
 src/relay/analysis/call_graph.cc                   |  24 +-
 src/relay/analysis/call_graph.h                    |  22 +-
 src/relay/analysis/context_analysis.cc             |  36 +-
 src/relay/analysis/dependency_graph.cc             |   2 +-
 src/relay/analysis/feature.cc                      |   6 +-
 src/relay/analysis/get_calibration_data.cc         |  10 +-
 src/relay/analysis/mac_count.cc                    |  28 +-
 src/relay/analysis/match_exhaustion.cc             |   6 +-
 src/relay/analysis/type_solver.cc                  |  16 +-
 src/relay/analysis/type_solver.h                   |   2 +-
 src/relay/analysis/util.cc                         |  12 +-
 src/relay/analysis/well_formed.cc                  |  14 +-
 src/relay/backend/build_module.cc                  |  22 +-
 src/relay/backend/compile_engine.cc                |  86 ++---
 src/relay/backend/compile_engine.h                 |   4 +-
 .../backend/contrib/arm_compute_lib/codegen.cc     |  44 +--
 src/relay/backend/contrib/codegen_c/codegen.cc     |  16 +-
 src/relay/backend/contrib/codegen_c/codegen_c.h    |   6 +-
 .../backend/contrib/codegen_json/codegen_json.h    |  16 +-
 src/relay/backend/contrib/dnnl/codegen.cc          |  34 +-
 src/relay/backend/contrib/ethosn/codegen.cc        |  14 +-
 src/relay/backend/contrib/tensorrt/codegen.cc      |  16 +-
 src/relay/backend/graph_plan_memory.cc             |  30 +-
 src/relay/backend/graph_runtime_codegen.cc         |  24 +-
 src/relay/backend/interpreter.cc                   |  46 +--
 src/relay/backend/param_dict.cc                    |  12 +-
 src/relay/backend/utils.h                          |  24 +-
 src/relay/backend/vm/compiler.cc                   | 102 +++---
 src/relay/backend/vm/lambda_lift.cc                |   7 +-
 src/relay/ir/dataflow_matcher.cc                   |   8 +-
 src/relay/ir/expr.cc                               |   6 +-
 src/relay/ir/expr_functor.cc                       |  14 +-
 src/relay/ir/function.cc                           |   4 +-
 src/relay/ir/indexed_graph.h                       |   4 +-
 src/relay/ir/transform.cc                          |   2 +-
 src/relay/op/algorithm/argsort.cc                  |   4 +-
 src/relay/op/algorithm/topk.cc                     |   6 +-
 src/relay/op/dyn/algorithm/topk.cc                 |  14 +-
 src/relay/op/dyn/image/resize.cc                   |   6 +-
 src/relay/op/dyn/nn/pad.cc                         |  12 +-
 src/relay/op/dyn/nn/upsampling.cc                  |  16 +-
 src/relay/op/dyn/nn/upsampling.h                   |   2 +-
 src/relay/op/dyn/tensor/transform.cc               |  49 +--
 src/relay/op/image/dilation2d.cc                   |  10 +-
 src/relay/op/image/grid_sample.cc                  |  14 +-
 src/relay/op/image/resize.cc                       |  16 +-
 src/relay/op/memory/memory.cc                      |  42 +--
 src/relay/op/nn/bitserial.cc                       |  20 +-
 src/relay/op/nn/convolution.h                      | 214 +++++------
 src/relay/op/nn/correlation.cc                     |   6 +-
 src/relay/op/nn/nn.cc                              |  84 ++---
 src/relay/op/nn/nn.h                               |  10 +-
 src/relay/op/nn/pad.cc                             |  52 +--
 src/relay/op/nn/pooling.cc                         | 146 ++++----
 src/relay/op/nn/sparse.cc                          |  10 +-
 src/relay/op/nn/upsampling.cc                      |  12 +-
 src/relay/op/nn/upsampling.h                       |   2 +-
 src/relay/op/op_common.h                           |   6 +-
 src/relay/op/tensor/binary.cc                      |   2 +-
 src/relay/op/tensor/reduce.cc                      |  50 +--
 src/relay/op/tensor/transform.cc                   | 400 ++++++++++-----------
 src/relay/op/tensor/transform.h                    |   6 +-
 src/relay/op/tensor/unary.cc                       |  12 +-
 src/relay/op/type_relations.cc                     |  12 +-
 src/relay/op/vision/multibox_op.cc                 |  24 +-
 src/relay/op/vision/nms.cc                         |  10 +-
 src/relay/op/vision/rcnn_op.cc                     |  34 +-
 src/relay/op/vision/yolo.cc                        |   8 +-
 src/relay/op/vm/vm.cc                              |  22 +-
 src/relay/qnn/op/concatenate.cc                    |  28 +-
 src/relay/qnn/op/convolution.cc                    |  36 +-
 src/relay/qnn/op/dense.cc                          |  20 +-
 src/relay/qnn/op/dequantize.cc                     |  22 +-
 src/relay/qnn/op/op_common.h                       |  22 +-
 src/relay/qnn/op/quantize.cc                       |  22 +-
 src/relay/qnn/op/requantize.cc                     |  42 +--
 src/relay/qnn/utils.cc                             |   4 +-
 src/relay/qnn/utils.h                              |  29 +-
 src/relay/quantize/annotate.cc                     |   2 +-
 src/relay/quantize/calibrate.cc                    |   8 +-
 src/relay/quantize/quantize.cc                     |   6 +-
 src/relay/quantize/realize.cc                      |  58 +--
 src/relay/transforms/alter_op_layout.cc            |   2 +-
 src/relay/transforms/annotate_target.cc            |  12 +-
 src/relay/transforms/canonicalize_cast.cc          |   6 +-
 src/relay/transforms/canonicalize_ops.cc           |   2 +-
 src/relay/transforms/combine_parallel_conv2d.cc    |  10 +-
 src/relay/transforms/combine_parallel_dense.cc     |  16 +-
 src/relay/transforms/combine_parallel_op.cc        |   4 +-
 src/relay/transforms/convert_layout.cc             |   2 +-
 src/relay/transforms/convert_sparse_dense.cc       |   4 +-
 src/relay/transforms/de_duplicate.cc               |  10 +-
 src/relay/transforms/dead_code.cc                  |   2 +-
 src/relay/transforms/defunctionalization.cc        |  38 +-
 src/relay/transforms/device_annotation.cc          |  16 +-
 src/relay/transforms/dynamic_to_static.cc          |  48 +--
 src/relay/transforms/eliminate_common_subexpr.cc   |   4 +-
 src/relay/transforms/eta_expand.cc                 |   2 +-
 src/relay/transforms/fold_constant.cc              |   8 +-
 src/relay/transforms/fold_scale_axis.cc            |  60 ++--
 src/relay/transforms/forward_rewrite.cc            |   4 +-
 src/relay/transforms/fuse_ops.cc                   |  44 +--
 src/relay/transforms/gradient.cc                   |  40 +--
 src/relay/transforms/infer_layout_utils.h          |   6 +-
 src/relay/transforms/inline.cc                     |  10 +-
 src/relay/transforms/lazy_gradient_init.cc         |   4 +-
 src/relay/transforms/legalize.cc                   |   2 +-
 src/relay/transforms/let_list.h                    |   6 +-
 src/relay/transforms/merge_compiler_regions.cc     |   6 +-
 src/relay/transforms/merge_composite.cc            |   2 +-
 src/relay/transforms/partial_eval.cc               |  70 ++--
 src/relay/transforms/partition_graph.cc            |  16 +-
 src/relay/transforms/pattern_utils.h               |  20 +-
 src/relay/transforms/simplify_fc_transpose.cc      |   4 +-
 src/relay/transforms/simplify_inference.cc         |  16 +-
 src/relay/transforms/to_a_normal_form.cc           |  14 +-
 src/relay/transforms/to_basic_block_normal_form.cc |   2 +-
 src/relay/transforms/to_cps.cc                     |   8 +-
 src/relay/transforms/transform_layout.h            |  14 +-
 src/relay/transforms/type_infer.cc                 |  38 +-
 src/runtime/c_runtime_api.cc                       |  32 +-
 src/runtime/container.cc                           |   2 +-
 .../contrib/arm_compute_lib/acl_allocator.cc       |   2 +-
 src/runtime/contrib/arm_compute_lib/acl_runtime.cc |  12 +-
 src/runtime/contrib/arm_compute_lib/acl_utils.cc   |   7 +-
 src/runtime/contrib/cblas/cblas.cc                 |   8 +-
 src/runtime/contrib/cblas/gemm_common.h            |  60 ++--
 src/runtime/contrib/cblas/mkl.cc                   |  12 +-
 src/runtime/contrib/cblas/mkldnn.cc                |   4 +-
 src/runtime/contrib/coreml/coreml_runtime.mm       |   7 +-
 src/runtime/contrib/cublas/cublas.cc               |  80 ++---
 src/runtime/contrib/cublas/cublas_utils.h          |  10 +-
 src/runtime/contrib/cudnn/cudnn_utils.h            |  10 +-
 src/runtime/contrib/cudnn/softmax.cc               |   2 +-
 src/runtime/contrib/dnnl/dnnl_json_runtime.cc      |  18 +-
 src/runtime/contrib/json/json_node.h               |  22 +-
 src/runtime/contrib/json/json_runtime.h            |  24 +-
 src/runtime/contrib/miopen/miopen_utils.h          |  10 +-
 src/runtime/contrib/mps/conv.mm                    |  18 +-
 src/runtime/contrib/mps/gemm.mm                    |  26 +-
 src/runtime/contrib/mps/mps_utils.h                |   2 +-
 src/runtime/contrib/nnpack/convolution.cc          |  90 ++---
 src/runtime/contrib/nnpack/fully_connected.cc      |  24 +-
 src/runtime/contrib/nnpack/nnpack_utils.cc         |   4 +-
 src/runtime/contrib/nnpack/nnpack_utils.h          |   2 +-
 src/runtime/contrib/onnx/onnx_module.cc            |   4 +-
 src/runtime/contrib/random/mt_random_engine.cc     |  14 +-
 src/runtime/contrib/random/random.cc               |   6 +-
 src/runtime/contrib/rocblas/rocblas.cc             |  32 +-
 src/runtime/contrib/sort/sort.cc                   |  22 +-
 src/runtime/contrib/tensorrt/tensorrt_builder.cc   |  28 +-
 src/runtime/contrib/tensorrt/tensorrt_logger.h     |   2 +-
 src/runtime/contrib/tensorrt/tensorrt_ops.cc       | 162 ++++-----
 src/runtime/contrib/tensorrt/tensorrt_runtime.cc   |  14 +-
 src/runtime/contrib/tflite/tflite_runtime.cc       |   4 +-
 src/runtime/contrib/tflite/tflite_runtime.h        |   2 +-
 src/runtime/contrib/thrust/thrust.cu               |   4 +-
 src/runtime/cpu_device_api.cc                      |   2 +-
 src/runtime/cuda/cuda_common.h                     |   9 +-
 src/runtime/cuda/cuda_device_api.cc                |   2 +-
 src/runtime/cuda/cuda_module.cc                    |  10 +-
 src/runtime/dso_library.cc                         |   6 +-
 src/runtime/file_utils.cc                          |  10 +-
 src/runtime/graph/debug/graph_runtime_debug.cc     |  14 +-
 src/runtime/graph/graph_runtime.cc                 |  74 ++--
 src/runtime/graph/graph_runtime.h                  |  62 ++--
 src/runtime/graph/graph_runtime_factory.cc         |  30 +-
 src/runtime/hexagon/hexagon_device_api.cc          |  30 +-
 src/runtime/hexagon/hexagon_module.cc              |  18 +-
 src/runtime/hexagon/hexagon_module.h               |   2 +-
 src/runtime/hexagon/sim/hexagon_device_sim.cc      |  68 ++--
 src/runtime/hexagon/target/hexagon_dsprpcapi.cc    |   4 +-
 src/runtime/hexagon/target/hexagon_dsprpcapi.h     |   4 +-
 src/runtime/hexagon/target/hexagon_stubapi.cc      |   4 +-
 src/runtime/hexagon/target/hexagon_stubapi.h       |   2 +-
 src/runtime/library_module.cc                      |  20 +-
 src/runtime/metadata_module.cc                     |  24 +-
 src/runtime/metal/metal_common.h                   |  10 +-
 src/runtime/metal/metal_device_api.mm              |  16 +-
 src/runtime/metal/metal_module.mm                  |  18 +-
 src/runtime/micro/micro_session.cc                 |  18 +-
 src/runtime/minrpc/minrpc_server.h                 |   2 +-
 src/runtime/module.cc                              |  12 +-
 src/runtime/ndarray.cc                             |  42 +--
 src/runtime/object.cc                              |  26 +-
 src/runtime/opencl/opencl_common.h                 |   8 +-
 src/runtime/opencl/opencl_device_api.cc            |  10 +-
 src/runtime/opencl/opencl_module.cc                |  12 +-
 src/runtime/pack_args.h                            |   4 +-
 src/runtime/registry.cc                            |   4 +-
 src/runtime/rocm/rocm_common.h                     |   8 +-
 src/runtime/rocm/rocm_device_api.cc                |   4 +-
 src/runtime/rocm/rocm_module.cc                    |   8 +-
 src/runtime/rpc/rpc_device_api.cc                  |   6 +-
 src/runtime/rpc/rpc_endpoint.cc                    |  48 +--
 src/runtime/rpc/rpc_module.cc                      |  38 +-
 src/runtime/rpc/rpc_pipe_impl.cc                   |   4 +-
 src/runtime/rpc/rpc_server_env.cc                  |   2 +-
 src/runtime/rpc/rpc_session.cc                     |   4 +-
 src/runtime/rpc/rpc_socket_impl.cc                 |  14 +-
 src/runtime/stackvm/stackvm.cc                     |  18 +-
 src/runtime/stackvm/stackvm.h                      |   8 +-
 src/runtime/stackvm/stackvm_module.cc              |   6 +-
 src/runtime/thread_pool.cc                         |  10 +-
 src/runtime/threading_backend.cc                   |   6 +-
 src/runtime/vm/bytecode.cc                         |   1 -
 src/runtime/vm/executable.cc                       |  28 +-
 src/runtime/vm/memory_manager.cc                   |  12 +-
 src/runtime/vm/profiler/vm.cc                      |  16 +-
 src/runtime/vm/serialize_utils.h                   |   8 +-
 src/runtime/vm/vm.cc                               |  62 ++--
 src/runtime/vulkan/vulkan.cc                       |  34 +-
 src/runtime/vulkan/vulkan_common.h                 |  10 +-
 src/runtime/vulkan/vulkan_shader.h                 |   2 +-
 src/runtime/vulkan/vulkan_stream.h                 |   2 +-
 src/runtime/workspace_pool.cc                      |   6 +-
 src/support/base64.h                               |  16 +-
 src/support/parallel_for.cc                        |  12 +-
 src/support/pipe.h                                 |  12 +-
 src/support/ring_buffer.h                          |   4 +-
 src/support/socket.h                               |  22 +-
 src/target/build_common.h                          |   2 +-
 src/target/codegen.cc                              |   4 +-
 src/target/datatype/registry.cc                    |   6 +-
 src/target/generic_func.cc                         |   8 +-
 src/target/intrin_rule.cc                          |  14 +-
 src/target/intrin_rule.h                           |   6 +-
 src/target/llvm/codegen_amdgpu.cc                  |  28 +-
 src/target/llvm/codegen_arm.cc                     |   2 +-
 src/target/llvm/codegen_cpu.cc                     |  64 ++--
 src/target/llvm/codegen_hexagon.cc                 |  46 +--
 src/target/llvm/codegen_llvm.cc                    |  78 ++--
 src/target/llvm/codegen_nvptx.cc                   |  22 +-
 src/target/llvm/codegen_x86_64.cc                  |   6 +-
 src/target/llvm/intrin_rule_llvm.cc                |  10 +-
 src/target/llvm/intrin_rule_llvm.h                 |   4 +-
 src/target/llvm/intrin_rule_nvptx.cc               |   9 +-
 src/target/llvm/intrin_rule_rocm.cc                |  14 +-
 src/target/llvm/llvm_common.cc                     |   4 +-
 src/target/llvm/llvm_module.cc                     |  48 +--
 src/target/opt/build_cuda_on.cc                    |   6 +-
 src/target/source/codegen_aocl.cc                  |   4 +-
 src/target/source/codegen_c.cc                     |  57 +--
 src/target/source/codegen_c_host.cc                |  16 +-
 src/target/source/codegen_cuda.cc                  |  56 +--
 src/target/source/codegen_metal.cc                 |  19 +-
 src/target/source/codegen_opencl.cc                |  10 +-
 src/target/source/codegen_source_base.cc           |   6 +-
 src/target/source/codegen_vhls.cc                  |   8 +-
 src/target/source/intrin_rule_cuda.cc              |   6 +-
 src/target/source/intrin_rule_opencl.cc            |   6 +-
 src/target/source/source_module.cc                 |   8 +-
 src/target/spirv/build_vulkan.cc                   |  18 +-
 src/target/spirv/codegen_spirv.cc                  |  74 ++--
 src/target/spirv/codegen_spirv.h                   |   2 +-
 src/target/spirv/intrin_rule_spirv.cc              |   2 +-
 src/target/spirv/ir_builder.cc                     |  62 ++--
 src/target/spirv/ir_builder.h                      |   6 +-
 src/target/stackvm/codegen_stackvm.cc              |  48 +--
 src/target/tag.cc                                  |   2 +-
 src/target/target.cc                               |  10 +-
 src/target/target_kind.cc                          |   6 +-
 src/te/autodiff/ad_simplify.cc                     |  22 +-
 src/te/autodiff/jacobian.cc                        |  11 +-
 src/te/operation/compute_op.cc                     |  44 +--
 src/te/operation/cross_thread_reduction.cc         |   9 +-
 src/te/operation/extern_op.cc                      |  16 +-
 src/te/operation/hybrid_op.cc                      |  38 +-
 src/te/operation/op_utils.cc                       |  20 +-
 src/te/operation/placeholder_op.cc                 |   4 +-
 src/te/operation/scan_op.cc                        |  44 +--
 src/te/operation/tensor_compute_op.cc              |  16 +-
 src/te/operation/tensorize.cc                      |  92 ++---
 src/te/schedule/bound.cc                           |  26 +-
 src/te/schedule/graph.cc                           |  10 +-
 src/te/schedule/message_passing.cc                 |  64 ++--
 src/te/schedule/operation_inline.cc                |   6 +-
 src/te/schedule/schedule_dataflow_rewrite.cc       |  65 ++--
 src/te/schedule/schedule_lang.cc                   |  80 ++---
 src/te/schedule/schedule_ops.cc                    |  49 +--
 .../schedule_postproc_rewrite_for_tensor_core.cc   |  40 +--
 src/te/schedule/schedule_postproc_to_primfunc.cc   |   4 +-
 src/te/tensor.cc                                   |   4 +-
 src/tir/analysis/verify_gpu_code.cc                |   4 +-
 src/tir/analysis/verify_memory.cc                  |   2 +-
 src/tir/analysis/verify_ssa.cc                     |   2 +-
 src/tir/ir/buffer.cc                               |  14 +-
 src/tir/ir/data_layout.cc                          |  62 ++--
 src/tir/ir/expr.cc                                 | 152 ++++----
 src/tir/ir/stmt.cc                                 |  62 ++--
 src/tir/ir/transform.cc                            |   2 +-
 src/tir/op/op.cc                                   |  70 ++--
 src/tir/transforms/arg_binder.cc                   |  18 +-
 src/tir/transforms/bf16_legalize.cc                |  10 +-
 src/tir/transforms/combine_context_call.cc         |   4 +-
 src/tir/transforms/coproc_sync.cc                  |  24 +-
 src/tir/transforms/hoist_if_then_else.cc           |   2 +-
 src/tir/transforms/inject_copy_intrin.cc           |  14 +-
 src/tir/transforms/inject_double_buffer.cc         |  18 +-
 src/tir/transforms/inject_prefetch.cc              |   2 +-
 src/tir/transforms/inject_virtual_thread.cc        |  18 +-
 src/tir/transforms/ir_utils.cc                     |  18 +-
 src/tir/transforms/ir_utils.h                      |   4 +-
 src/tir/transforms/lift_attr_scope.cc              |   2 +-
 src/tir/transforms/loop_partition.cc               |  10 +-
 src/tir/transforms/lower_custom_datatypes.cc       |  24 +-
 .../transforms/lower_device_storage_access_info.cc |  12 +-
 src/tir/transforms/lower_intrin.cc                 |   8 +-
 src/tir/transforms/lower_thread_allreduce.cc       |  36 +-
 src/tir/transforms/lower_tvm_builtin.cc            |  22 +-
 src/tir/transforms/lower_warp_memory.cc            |  28 +-
 src/tir/transforms/make_packed_api.cc              |  10 +-
 src/tir/transforms/narrow_datatype.cc              |  22 +-
 src/tir/transforms/remap_thread_axis.cc            |   6 +-
 src/tir/transforms/remove_no_op.cc                 |   2 +-
 src/tir/transforms/split_host_device.cc            |  20 +-
 src/tir/transforms/storage_access.cc               |  16 +-
 src/tir/transforms/storage_flatten.cc              |  48 +--
 src/tir/transforms/storage_rewrite.cc              |  56 +--
 src/tir/transforms/tensorcore_infer_fragment.cc    |  60 ++--
 src/tir/transforms/thread_storage_sync.cc          |  14 +-
 src/tir/transforms/unroll_loop.cc                  |   4 +-
 src/tir/transforms/vectorize_loop.cc               |  18 +-
 src/topi/transform.cc                              |   2 +-
 429 files changed, 4251 insertions(+), 4234 deletions(-)

diff --git a/include/tvm/arith/analyzer.h b/include/tvm/arith/analyzer.h
index a9a0bed..cd20bdc 100644
--- a/include/tvm/arith/analyzer.h
+++ b/include/tvm/arith/analyzer.h
@@ -320,10 +320,10 @@ class CanonicalSimplifier {
  *  arith::Analyzer analyzer;
  *  {
  *    With<arith::ConstraintContext> scope(&analyzer, x % 3 == 0);
- *    CHECK_EQ(analyzer.modular_set(x)->coeff, 3);
+ *    ICHECK_EQ(analyzer.modular_set(x)->coeff, 3);
  *  }
  *  // constraint no longer in effect.
- *  CHECK_NE(analyzer.modular_set(x)->coeff, 3);
+ *  ICHECK_NE(analyzer.modular_set(x)->coeff, 3);
  *
  * \endcode
  */
diff --git a/include/tvm/ir/attrs.h b/include/tvm/ir/attrs.h
index e92baf1..afb8ef0 100644
--- a/include/tvm/ir/attrs.h
+++ b/include/tvm/ir/attrs.h
@@ -428,7 +428,7 @@ inline void SetValue<double>(double* ptr, const TVMArgValue& val) {
     *ptr = val.operator double();
   } else {
     ObjectRef expr = val;
-    CHECK(expr.defined());
+    ICHECK(expr.defined());
     if (const IntImmNode* op = expr.as<IntImmNode>()) {
       *ptr = static_cast<double>(op->value);
     } else if (const FloatImmNode* op = expr.as<FloatImmNode>()) {
@@ -664,7 +664,7 @@ class AttrsNode : public BaseAttrsNode {
   }
 
   void InitByPackedArgs(const runtime::TVMArgs& args, bool allow_unknown) final {
-    CHECK_EQ(args.size() % 2, 0);
+    ICHECK_EQ(args.size() % 2, 0);
     const int kLinearSearchBound = 16;
     int hit_count = 0;
     // applies two stratgies to lookup
@@ -672,7 +672,7 @@ class AttrsNode : public BaseAttrsNode {
       // linear search.
       auto ffind = [&args](const char* key, runtime::TVMArgValue* val) {
         for (int i = 0; i < args.size(); i += 2) {
-          CHECK_EQ(args.type_codes[i], kTVMStr);
+          ICHECK_EQ(args.type_codes[i], kTVMStr);
           if (!std::strcmp(key, args.values[i].v_str)) {
             *val = args[i + 1];
             return true;
@@ -687,7 +687,7 @@ class AttrsNode : public BaseAttrsNode {
       // construct a map then do lookup.
       std::unordered_map<std::string, runtime::TVMArgValue> kwargs;
       for (int i = 0; i < args.size(); i += 2) {
-        CHECK_EQ(args.type_codes[i], kTVMStr);
+        ICHECK_EQ(args.type_codes[i], kTVMStr);
         kwargs[args[i].operator std::string()] = args[i + 1];
       }
       auto ffind = [&kwargs](const char* key, runtime::TVMArgValue* val) {
diff --git a/include/tvm/ir/diagnostic.h b/include/tvm/ir/diagnostic.h
index 2a2a6cd..2053a29 100644
--- a/include/tvm/ir/diagnostic.h
+++ b/include/tvm/ir/diagnostic.h
@@ -149,7 +149,7 @@ class DiagnosticRenderer : public ObjectRef {
   void Render(const DiagnosticContext& ctx);
 
   DiagnosticRendererNode* operator->() {
-    CHECK(get() != nullptr);
+    ICHECK(get() != nullptr);
     return static_cast<DiagnosticRendererNode*>(get_mutable());
   }
 
@@ -203,7 +203,7 @@ class DiagnosticContext : public ObjectRef {
   void Render();
 
   DiagnosticContextNode* operator->() {
-    CHECK(get() != nullptr);
+    ICHECK(get() != nullptr);
     return static_cast<DiagnosticContextNode*>(get_mutable());
   }
 
diff --git a/include/tvm/ir/env_func.h b/include/tvm/ir/env_func.h
index 65653b7..386666a 100644
--- a/include/tvm/ir/env_func.h
+++ b/include/tvm/ir/env_func.h
@@ -83,7 +83,7 @@ class EnvFunc : public ObjectRef {
   template <typename... Args>
   runtime::TVMRetValue operator()(Args&&... args) const {
     const EnvFuncNode* n = operator->();
-    CHECK(n != nullptr);
+    ICHECK(n != nullptr);
     return n->func(std::forward<Args>(args)...);
   }
   /*!
@@ -137,7 +137,7 @@ class TypedEnvFunc<R(Args...)> : public ObjectRef {
    */
   R operator()(Args... args) const {
     const EnvFuncNode* n = operator->();
-    CHECK(n != nullptr);
+    ICHECK(n != nullptr);
     return runtime::detail::typed_packed_call_dispatcher<R>::run(n->func,
                                                                  std::forward<Args>(args)...);
   }
diff --git a/include/tvm/ir/expr.h b/include/tvm/ir/expr.h
index d6cfc5a..c982c5c 100644
--- a/include/tvm/ir/expr.h
+++ b/include/tvm/ir/expr.h
@@ -386,7 +386,7 @@ class Integer : public IntImm {
    * \brief convert to int64_t
    */
   operator int64_t() const {
-    CHECK(data_ != nullptr) << " Trying to reference a null Integer";
+    ICHECK(data_ != nullptr) << " Trying to reference a null Integer";
     return (*this)->value;
   }
   // comparators
@@ -461,9 +461,9 @@ class Range : public ObjectRef {
 
 // implementataions
 inline const Type& RelayExprNode::checked_type() const {
-  CHECK(checked_type_.defined()) << "internal error: the type checker has "
-                                 << "not populated the checked_type "
-                                 << "field for " << GetRef<RelayExpr>(this);
+  ICHECK(checked_type_.defined()) << "internal error: the type checker has "
+                                  << "not populated the checked_type "
+                                  << "field for " << GetRef<RelayExpr>(this);
   return this->checked_type_;
 }
 
@@ -471,11 +471,11 @@ template <typename TTypeNode>
 inline const TTypeNode* RelayExprNode::type_as() const {
   static_assert(std::is_base_of<TypeNode, TTypeNode>::value,
                 "TType must be a special case of type");
-  CHECK(checked_type_.defined())
+  ICHECK(checked_type_.defined())
       << "Type inference for this Expr has not completed. Try to call infer_type pass.";
   const TTypeNode* node = checked_type_.as<TTypeNode>();
-  CHECK(node != nullptr) << "Expected type to be " << TTypeNode::_type_key << ", but get "
-                         << checked_type_->GetTypeKey();
+  ICHECK(node != nullptr) << "Expected type to be " << TTypeNode::_type_key << ", but get "
+                          << checked_type_->GetTypeKey();
   return node;
 }
 
@@ -522,7 +522,7 @@ struct PackedFuncValueConverter<tvm::Bool> {
     }
     if (val.type_code() == kTVMArgInt) {
       int v = val.operator int();
-      CHECK(v == 0 || v == 1) << "ValueError: boolean value can only be 0 or 1, but get " << v;
+      ICHECK(v == 0 || v == 1) << "ValueError: boolean value can only be 0 or 1, but get " << v;
       return Bool(static_cast<bool>(v));
     }
     return val.AsObjectRef<tvm::Bool>();
diff --git a/include/tvm/ir/module.h b/include/tvm/ir/module.h
index b3f8438..d6fb6a2 100644
--- a/include/tvm/ir/module.h
+++ b/include/tvm/ir/module.h
@@ -300,7 +300,7 @@ class IRModule : public ObjectRef {
   /*! \return mutable pointers to the node. */
   IRModuleNode* operator->() const {
     auto* ptr = get_mutable();
-    CHECK(ptr != nullptr);
+    ICHECK(ptr != nullptr);
     return static_cast<IRModuleNode*>(ptr);
   }
 
diff --git a/include/tvm/ir/op.h b/include/tvm/ir/op.h
index e7b3577..c73be3c 100644
--- a/include/tvm/ir/op.h
+++ b/include/tvm/ir/op.h
@@ -146,7 +146,7 @@ class OpNode : public RelayExprNode {
   // Internal function to compute if it is primitive op
   bool IsPrimitiveOp_() const {
     const auto& fn_ty = this->op_type;
-    CHECK(fn_ty.get() != nullptr);
+    ICHECK(fn_ty.get() != nullptr);
     if (fn_ty->type_constraints.size() != 1) return false;
     const TypeRelationNode* rel = fn_ty->type_constraints[0].as<TypeRelationNode>();
     if (rel == nullptr) return false;
@@ -462,7 +462,7 @@ inline OpRegEntry& OpRegEntry::set_support_level(int32_t n) {  // NOLINT(*)
 template <typename ValueType>
 inline OpRegEntry& OpRegEntry::set_attr(  // NOLINT(*)
     const std::string& attr_name, const ValueType& value, int plevel) {
-  CHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0";
+  ICHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0";
   runtime::TVMRetValue rv;
   rv = value;
   UpdateAttr(attr_name, rv, plevel);
@@ -473,7 +473,7 @@ inline OpRegEntry& OpRegEntry::set_attr(  // NOLINT(*)
 
 template <typename ValueType>
 inline ValueType OpAttrMap<ValueType>::get(const RelayExpr& expr, ValueType def_value) const {
-  CHECK(expr.defined());
+  ICHECK(expr.defined());
   if (const OpNode* op = expr.as<OpNode>()) {
     return this->map_.get(GetRef<Op>(op), def_value);
   } else {
diff --git a/include/tvm/ir/transform.h b/include/tvm/ir/transform.h
index 2bbf283..d293112 100644
--- a/include/tvm/ir/transform.h
+++ b/include/tvm/ir/transform.h
@@ -166,7 +166,7 @@ class PassContext : public ObjectRef {
    * \return const access pointer.
    */
   const PassContextNode* operator->() const {
-    CHECK(get() != nullptr);
+    ICHECK(get() != nullptr);
     return static_cast<const PassContextNode*>(get());
   }
   /*!
@@ -174,7 +174,7 @@ class PassContext : public ObjectRef {
    * \return mutable access pointer.
    */
   PassContextNode* operator->() {
-    CHECK(get() != nullptr);
+    ICHECK(get() != nullptr);
     return static_cast<PassContextNode*>(get_mutable());
   }
 
@@ -344,7 +344,7 @@ class Pass : public ObjectRef {
    */
   IRModule operator()(IRModule mod) const {
     const PassNode* node = operator->();
-    CHECK(node != nullptr);
+    ICHECK(node != nullptr);
     return node->operator()(std::move(mod));
   }
   /*!
@@ -357,7 +357,7 @@ class Pass : public ObjectRef {
    */
   IRModule operator()(IRModule mod, const PassContext& pass_ctx) const {
     const PassNode* node = operator->();
-    CHECK(node != nullptr);
+    ICHECK(node != nullptr);
     return node->operator()(std::move(mod), pass_ctx);
   }
 
diff --git a/include/tvm/ir/type_functor.h b/include/tvm/ir/type_functor.h
index 2a6314c..11bf7d4 100644
--- a/include/tvm/ir/type_functor.h
+++ b/include/tvm/ir/type_functor.h
@@ -71,7 +71,7 @@ class TypeFunctor<R(const Type& n, Args...)> {
    * \return The result of the call
    */
   virtual R VisitType(const Type& n, Args... args) {
-    CHECK(n.defined());
+    ICHECK(n.defined());
     static FType vtable = InitVTable();
     return vtable(n, this, std::forward<Args>(args)...);
   }
diff --git a/include/tvm/node/attr_registry_map.h b/include/tvm/node/attr_registry_map.h
index 9c554af..552aa71 100644
--- a/include/tvm/node/attr_registry_map.h
+++ b/include/tvm/node/attr_registry_map.h
@@ -56,9 +56,9 @@ class AttrRegistryMapContainerMap {
    * \return the const reference to the content value.
    */
   const runtime::TVMRetValue& operator[](const KeyType& key) const {
-    CHECK(key.defined());
+    ICHECK(key.defined());
     const uint32_t idx = key->AttrRegistryIndex();
-    CHECK(idx < data_.size() && data_[idx].second != 0)
+    ICHECK(idx < data_.size() && data_[idx].second != 0)
         << "Attribute " << attr_name_ << " has not been registered for " << key->name;
     return data_[idx].first;
   }
@@ -71,7 +71,7 @@ class AttrRegistryMapContainerMap {
    */
   template <typename ValueType>
   ValueType get(const KeyType& key, ValueType def_value) const {
-    CHECK(key.defined());
+    ICHECK(key.defined());
     const uint32_t idx = key->AttrRegistryIndex();
     if (idx < data_.size() && data_[idx].second != 0) {
       return data_[idx].first;
diff --git a/include/tvm/node/container.h b/include/tvm/node/container.h
index 74dabc1..209bb9e 100644
--- a/include/tvm/node/container.h
+++ b/include/tvm/node/container.h
@@ -351,7 +351,7 @@ class SmallMapNode : public MapNode,
    */
   const mapped_type& at(const key_type& key) const {
     iterator itr = find(key);
-    CHECK(itr.index < size_) << "IndexError: key is not in Map";
+    ICHECK(itr.index < size_) << "IndexError: key is not in Map";
     return itr->second;
   }
   /*!
@@ -361,7 +361,7 @@ class SmallMapNode : public MapNode,
    */
   mapped_type& at(const key_type& key) {
     iterator itr = find(key);
-    CHECK(itr.index < size_) << "IndexError: key is not in Map";
+    ICHECK(itr.index < size_) << "IndexError: key is not in Map";
     return itr->second;
   }
   /*! \return begin iterator */
@@ -466,7 +466,7 @@ class SmallMapNode : public MapNode,
     }
     uint64_t next_size = std::max(map_node->slots_ * 2, uint64_t(kInitSize));
     next_size = std::min(next_size, uint64_t(kMaxSize));
-    CHECK_GT(next_size, map_node->slots_);
+    ICHECK_GT(next_size, map_node->slots_);
     ObjectPtr<Object> new_map = CreateFromRange(next_size, map_node->begin(), map_node->end());
     InsertMaybeReHash(kv, &new_map);
     *map = std::move(new_map);
@@ -656,7 +656,7 @@ class DenseMapNode : public MapNode {
    */
   mapped_type& At(const key_type& key) const {
     ListNode iter = Search(key);
-    CHECK(!iter.IsNone()) << "IndexError: key is not in Map";
+    ICHECK(!iter.IsNone()) << "IndexError: key is not in Map";
     return iter.Val();
   }
   /*!
@@ -823,7 +823,7 @@ class DenseMapNode : public MapNode {
    * \return The object created
    */
   static ObjectPtr<DenseMapNode> Empty(uint32_t fib_shift, uint64_t n_slots) {
-    CHECK_GT(n_slots, uint64_t(SmallMapNode::kMaxSize));
+    ICHECK_GT(n_slots, uint64_t(SmallMapNode::kMaxSize));
     ObjectPtr<DenseMapNode> p = make_object<DenseMapNode>();
     uint64_t n_blocks = CalcNumBlocks(n_slots - 1);
     Block* block = p->data_ = new Block[n_blocks];
@@ -855,7 +855,7 @@ class DenseMapNode : public MapNode {
       for (int j = 0; j < kBlockCap;
            ++j, ++meta_ptr_from, ++data_ptr_from, ++meta_ptr_to, ++data_ptr_to) {
         uint8_t& meta = *meta_ptr_to = *meta_ptr_from;
-        CHECK(meta != kProtectedSlot);
+        ICHECK(meta != kProtectedSlot);
         if (meta != uint8_t(kEmptySlot)) {
           new (data_ptr_to) KVType(*data_ptr_from);
         }
@@ -876,7 +876,7 @@ class DenseMapNode : public MapNode {
       iter.Val() = kv.second;
       return;
     }
-    CHECK_GT(map_node->slots_, uint64_t(SmallMapNode::kMaxSize));
+    ICHECK_GT(map_node->slots_, uint64_t(SmallMapNode::kMaxSize));
     // Otherwise, start rehash
     ObjectPtr<Object> p = Empty(map_node->fib_shift_ - 1, map_node->slots_ * 2 + 2);
     // Insert the given `kv` into the new hash map
@@ -963,7 +963,7 @@ class DenseMapNode : public MapNode {
       shift -= 1;
       slots <<= 1;
     }
-    CHECK_GT(slots, cap);
+    ICHECK_GT(slots, cap);
     if (slots < cap * 2) {
       *fib_shift = shift - 1;
       *n_slots = slots << 1;
diff --git a/include/tvm/node/functor.h b/include/tvm/node/functor.h
index 0837f35..9920500 100644
--- a/include/tvm/node/functor.h
+++ b/include/tvm/node/functor.h
@@ -92,8 +92,8 @@ class NodeFunctor<R(const ObjectRef& n, Args...)> {
    * \return The result.
    */
   R operator()(const ObjectRef& n, Args... args) const {
-    CHECK(can_dispatch(n)) << "NodeFunctor calls un-registered function on type "
-                           << n->GetTypeKey();
+    ICHECK(can_dispatch(n)) << "NodeFunctor calls un-registered function on type "
+                            << n->GetTypeKey();
     return (*func_[n->type_index()])(n, std::forward<Args>(args)...);
   }
   /*!
@@ -108,7 +108,7 @@ class NodeFunctor<R(const ObjectRef& n, Args...)> {
     if (func_.size() <= tindex) {
       func_.resize(tindex + 1, nullptr);
     }
-    CHECK(func_[tindex] == nullptr) << "Dispatch for " << TNode::_type_key << " is already set";
+    ICHECK(func_[tindex] == nullptr) << "Dispatch for " << TNode::_type_key << " is already set";
     func_[tindex] = f;
     return *this;
   }
@@ -121,7 +121,7 @@ class NodeFunctor<R(const ObjectRef& n, Args...)> {
   template <typename TNode>
   TSelf& clear_dispatch() {  // NOLINT(*)
     uint32_t tindex = TNode::RuntimeTypeIndex();
-    CHECK_LT(tindex, func_.size()) << "clear_dispatch: index out of range";
+    ICHECK_LT(tindex, func_.size()) << "clear_dispatch: index out of range";
     func_[tindex] = nullptr;
     return *this;
   }
diff --git a/include/tvm/node/reflection.h b/include/tvm/node/reflection.h
index e8ff26b..d842c33 100644
--- a/include/tvm/node/reflection.h
+++ b/include/tvm/node/reflection.h
@@ -208,7 +208,7 @@ class ReflectionVTable::Registry {
    * \return rference to self.
    */
   Registry& set_creator(FCreate f) {  // NOLINT(*)
-    CHECK_LT(type_index_, parent_->fcreate_.size());
+    ICHECK_LT(type_index_, parent_->fcreate_.size());
     parent_->fcreate_[type_index_] = f;
     return *this;
   }
@@ -218,7 +218,7 @@ class ReflectionVTable::Registry {
    * \return rference to self.
    */
   Registry& set_repr_bytes(FReprBytes f) {  // NOLINT(*)
-    CHECK_LT(type_index_, parent_->frepr_bytes_.size());
+    ICHECK_LT(type_index_, parent_->frepr_bytes_.size());
     parent_->frepr_bytes_[type_index_] = f;
     return *this;
   }
diff --git a/include/tvm/parser/source_map.h b/include/tvm/parser/source_map.h
index 1153deb..424af5c 100644
--- a/include/tvm/parser/source_map.h
+++ b/include/tvm/parser/source_map.h
@@ -108,7 +108,7 @@ class SourceMap : public ObjectRef {
   void Add(const Source& source);
 
   SourceMapNode* operator->() {
-    CHECK(get() != nullptr);
+    ICHECK(get() != nullptr);
     return static_cast<SourceMapNode*>(get_mutable());
   }
 
diff --git a/include/tvm/relay/base.h b/include/tvm/relay/base.h
index 76a6a22..e94bd27 100644
--- a/include/tvm/relay/base.h
+++ b/include/tvm/relay/base.h
@@ -42,18 +42,18 @@ namespace tvm {
  */
 namespace relay {
 
-#define RELAY_DEBUG(...)                                               \
-  {                                                                    \
-    auto fdebug = runtime::Registry::Get("relay.debug");               \
-    CHECK(fdebug) << "Could not find Relay Python debugger function."; \
-    (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__);         \
+#define RELAY_DEBUG(...)                                                \
+  {                                                                     \
+    auto fdebug = runtime::Registry::Get("relay.debug");                \
+    ICHECK(fdebug) << "Could not find Relay Python debugger function."; \
+    (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__);          \
   }
 
-#define RELAY_DEBUG_INTERP(...)                                        \
-  {                                                                    \
-    auto fdebug = runtime::Registry::Get("relay.debug_interp");        \
-    CHECK(fdebug) << "Could not find Relay Python debugger function."; \
-    (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__);         \
+#define RELAY_DEBUG_INTERP(...)                                         \
+  {                                                                     \
+    auto fdebug = runtime::Registry::Get("relay.debug_interp");         \
+    ICHECK(fdebug) << "Could not find Relay Python debugger function."; \
+    (*fdebug)("RELAY_DEBUG", __FILE__, __LINE__, __VA_ARGS__);          \
   }
 
 /*!
diff --git a/include/tvm/relay/dataflow_pattern_functor.h b/include/tvm/relay/dataflow_pattern_functor.h
index 98c81c9..364daac 100644
--- a/include/tvm/relay/dataflow_pattern_functor.h
+++ b/include/tvm/relay/dataflow_pattern_functor.h
@@ -76,7 +76,7 @@ class DFPatternFunctor<R(const DFPattern& n, Args...)> {
    * \return The result of the call
    */
   virtual R VisitDFPattern(const DFPattern& n, Args... args) {
-    CHECK(n.defined());
+    ICHECK(n.defined());
     static FType vtable = InitVTable();
     return vtable(n, this, std::forward<Args>(args)...);
   }
diff --git a/include/tvm/relay/expr_functor.h b/include/tvm/relay/expr_functor.h
index c3d2f72..df0940f 100644
--- a/include/tvm/relay/expr_functor.h
+++ b/include/tvm/relay/expr_functor.h
@@ -87,7 +87,7 @@ class ExprFunctor<R(const Expr& n, Args...)> {
    * \return The result of the call
    */
   virtual R VisitExpr(const Expr& n, Args... args) {
-    CHECK(n.defined());
+    ICHECK(n.defined());
     static FType vtable = InitVTable();
     return vtable(n, this, std::forward<Args>(args)...);
   }
@@ -345,7 +345,7 @@ class ExprRewriter {
    * \return The result of the call
    */
   virtual Expr Rewrite(const Expr& pre, const Expr& post) {
-    CHECK(pre.defined());
+    ICHECK(pre.defined());
     static FType vtable = InitVTable();
     return vtable(pre, this, post);
   }
diff --git a/include/tvm/relay/pattern_functor.h b/include/tvm/relay/pattern_functor.h
index de3bafa..711d832 100644
--- a/include/tvm/relay/pattern_functor.h
+++ b/include/tvm/relay/pattern_functor.h
@@ -89,7 +89,7 @@ class PatternFunctor<R(const Pattern& n, Args...)> {
    * \return The result of the call
    */
   virtual R VisitPattern(const Pattern& n, Args... args) {
-    CHECK(n.defined());
+    ICHECK(n.defined());
     static FType vtable = InitVTable();
     return vtable(n, this, std::forward<Args>(args)...);
   }
diff --git a/include/tvm/runtime/container.h b/include/tvm/runtime/container.h
index 7778c5d..796ab7b 100644
--- a/include/tvm/runtime/container.h
+++ b/include/tvm/runtime/container.h
@@ -146,7 +146,7 @@ class InplaceArrayBase {
    */
   const ElemType& operator[](size_t idx) const {
     size_t size = Self()->GetSize();
-    CHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n";
+    ICHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n";
     return *(reinterpret_cast<ElemType*>(AddressOf(idx)));
   }
 
@@ -157,7 +157,7 @@ class InplaceArrayBase {
    */
   ElemType& operator[](size_t idx) {
     size_t size = Self()->GetSize();
-    CHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n";
+    ICHECK_LT(idx, size) << "Index " << idx << " out of bounds " << size << "\n";
     return *(reinterpret_cast<ElemType*>(AddressOf(idx)));
   }
 
@@ -361,7 +361,7 @@ class ArrayNode : public Object, public InplaceArrayBase<ArrayNode, ObjectRef> {
    */
   static ObjectPtr<ArrayNode> CopyFrom(int64_t cap, ArrayNode* from) {
     int64_t size = from->size_;
-    CHECK_GE(cap, size) << "ValueError: not enough capacity";
+    ICHECK_GE(cap, size) << "ValueError: not enough capacity";
     ObjectPtr<ArrayNode> p = ArrayNode::Empty(cap);
     ObjectRef* write = p->MutableBegin();
     ObjectRef* read = from->MutableBegin();
@@ -380,7 +380,7 @@ class ArrayNode : public Object, public InplaceArrayBase<ArrayNode, ObjectRef> {
    */
   static ObjectPtr<ArrayNode> MoveFrom(int64_t cap, ArrayNode* from) {
     int64_t size = from->size_;
-    CHECK_GE(cap, size) << "ValueError: not enough capacity";
+    ICHECK_GE(cap, size) << "ValueError: not enough capacity";
     ObjectPtr<ArrayNode> p = ArrayNode::Empty(cap);
     ObjectRef* write = p->MutableBegin();
     ObjectRef* read = from->MutableBegin();
@@ -429,7 +429,7 @@ class ArrayNode : public Object, public InplaceArrayBase<ArrayNode, ObjectRef> {
    * \return Ref-counted ArrayNode requested
    */
   static ObjectPtr<ArrayNode> Empty(int64_t n = kInitSize) {
-    CHECK_GE(n, 0);
+    ICHECK_GE(n, 0);
     ObjectPtr<ArrayNode> p = make_inplace_array_object<ArrayNode, ObjectRef>(n);
     p->capacity_ = n;
     p->size_ = 0;
@@ -679,9 +679,9 @@ class Array : public ObjectRef {
    */
   const T operator[](int64_t i) const {
     ArrayNode* p = GetArrayNode();
-    CHECK(p != nullptr) << "ValueError: cannot index a null array";
-    CHECK(0 <= i && i < p->size_) << "IndexError: indexing " << i << " on an array of size "
-                                  << p->size_;
+    ICHECK(p != nullptr) << "ValueError: cannot index a null array";
+    ICHECK(0 <= i && i < p->size_)
+        << "IndexError: indexing " << i << " on an array of size " << p->size_;
     return DowncastNoCheck<T>(*(p->begin() + i));
   }
 
@@ -703,16 +703,16 @@ class Array : public ObjectRef {
   /*! \return The first element of the array */
   const T front() const {
     ArrayNode* p = GetArrayNode();
-    CHECK(p != nullptr) << "ValueError: cannot index a null array";
-    CHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array";
+    ICHECK(p != nullptr) << "ValueError: cannot index a null array";
+    ICHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array";
     return DowncastNoCheck<T>(*(p->begin()));
   }
 
   /*! \return The last element of the array */
   const T back() const {
     ArrayNode* p = GetArrayNode();
-    CHECK(p != nullptr) << "ValueError: cannot index a null array";
-    CHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array";
+    ICHECK(p != nullptr) << "ValueError: cannot index a null array";
+    ICHECK_GT(p->size_, 0) << "IndexError: cannot index an empty array";
     return DowncastNoCheck<T>(*(p->end() - 1));
   }
 
@@ -734,7 +734,7 @@ class Array : public ObjectRef {
    * \param val The element to insert
    */
   void insert(iterator position, const T& val) {
-    CHECK(data_ != nullptr) << "ValueError: cannot insert a null array";
+    ICHECK(data_ != nullptr) << "ValueError: cannot insert a null array";
     int64_t idx = std::distance(begin(), position);
     int64_t size = GetArrayNode()->size_;
     auto addr = CopyOnWrite(1)                               //
@@ -755,7 +755,7 @@ class Array : public ObjectRef {
     if (first == last) {
       return;
     }
-    CHECK(data_ != nullptr) << "ValueError: cannot insert a null array";
+    ICHECK(data_ != nullptr) << "ValueError: cannot insert a null array";
     int64_t idx = std::distance(begin(), position);
     int64_t size = GetArrayNode()->size_;
     int64_t numel = std::distance(first, last);
@@ -767,9 +767,9 @@ class Array : public ObjectRef {
 
   /*! \brief Remove the last item of the list */
   void pop_back() {
-    CHECK(data_ != nullptr) << "ValueError: cannot pop_back because array is null";
+    ICHECK(data_ != nullptr) << "ValueError: cannot pop_back because array is null";
     int64_t size = GetArrayNode()->size_;
-    CHECK_GT(size, 0) << "ValueError: cannot pop_back because array is empty";
+    ICHECK_GT(size, 0) << "ValueError: cannot pop_back because array is empty";
     CopyOnWrite()->ShrinkBy(1);
   }
 
@@ -778,11 +778,11 @@ class Array : public ObjectRef {
    * \param position An iterator pointing to the element to be erased
    */
   void erase(iterator position) {
-    CHECK(data_ != nullptr) << "ValueError: cannot erase a null array";
+    ICHECK(data_ != nullptr) << "ValueError: cannot erase a null array";
     int64_t st = std::distance(begin(), position);
     int64_t size = GetArrayNode()->size_;
-    CHECK(0 <= st && st < size) << "ValueError: cannot erase at index " << st
-                                << ", because Array size is " << size;
+    ICHECK(0 <= st && st < size) << "ValueError: cannot erase at index " << st
+                                 << ", because Array size is " << size;
     CopyOnWrite()                             //
         ->MoveElementsLeft(st, st + 1, size)  //
         ->ShrinkBy(1);
@@ -797,12 +797,12 @@ class Array : public ObjectRef {
     if (first == last) {
       return;
     }
-    CHECK(data_ != nullptr) << "ValueError: cannot erase a null array";
+    ICHECK(data_ != nullptr) << "ValueError: cannot erase a null array";
     int64_t size = GetArrayNode()->size_;
     int64_t st = std::distance(begin(), first);
     int64_t ed = std::distance(begin(), last);
-    CHECK_LT(st, ed) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")";
-    CHECK(0 <= st && st <= size && 0 <= ed && ed <= size)
+    ICHECK_LT(st, ed) << "ValueError: cannot erase array in range [" << st << ", " << ed << ")";
+    ICHECK(0 <= st && st <= size && 0 <= ed && ed <= size)
         << "ValueError: cannot erase array in range [" << st << ", " << ed << ")"
         << ", because array size is " << size;
     CopyOnWrite()                         //
@@ -815,7 +815,7 @@ class Array : public ObjectRef {
    * \param n The new size.
    */
   void resize(int64_t n) {
-    CHECK_GE(n, 0) << "ValueError: cannot resize an Array to negative size";
+    ICHECK_GE(n, 0) << "ValueError: cannot resize an Array to negative size";
     if (data_ == nullptr) {
       SwitchContainer(n);
       return;
@@ -856,8 +856,8 @@ class Array : public ObjectRef {
    */
   void Set(int64_t i, T value) {
     ArrayNode* p = this->CopyOnWrite();
-    CHECK(0 <= i && i < p->size_) << "IndexError: indexing " << i << " on an array of size "
-                                  << p->size_;
+    ICHECK(0 <= i && i < p->size_)
+        << "IndexError: indexing " << i << " on an array of size " << p->size_;
     *(p->MutableBegin() + i) = std::move(value);
   }
 
@@ -923,7 +923,7 @@ class Array : public ObjectRef {
   template <typename IterType>
   void Assign(IterType first, IterType last) {
     int64_t cap = std::distance(first, last);
-    CHECK_GE(cap, 0) << "ValueError: cannot construct an Array of negative size";
+    ICHECK_GE(cap, 0) << "ValueError: cannot construct an Array of negative size";
     ArrayNode* p = GetArrayNode();
     if (p != nullptr && data_.unique() && p->capacity_ >= cap) {
       // do not have to make new space
@@ -1565,8 +1565,8 @@ struct NullOptType {};
  *
  *  Optional<String> opt0 = nullptr;
  *  Optional<String> opt1 = String("xyz");
- *  CHECK(opt0 == nullptr);
- *  CHECK(opt1 == "xyz");
+ *  ICHECK(opt0 == nullptr);
+ *  ICHECK(opt1 == "xyz");
  *
  * \endcode
  */
@@ -1613,7 +1613,7 @@ class Optional : public ObjectRef {
    * \note This function performs not-null checking.
    */
   T value() const {
-    CHECK(data_ != nullptr);
+    ICHECK(data_ != nullptr);
     return T(data_);
   }
   /*!
diff --git a/include/tvm/runtime/data_type.h b/include/tvm/runtime/data_type.h
index cb817a8..25aadb5 100644
--- a/include/tvm/runtime/data_type.h
+++ b/include/tvm/runtime/data_type.h
@@ -24,8 +24,8 @@
 #ifndef TVM_RUNTIME_DATA_TYPE_H_
 #define TVM_RUNTIME_DATA_TYPE_H_
 
-#include <dmlc/logging.h>
 #include <tvm/runtime/c_runtime_api.h>
+#include <tvm/support/logging.h>
 
 #include <string>
 #include <type_traits>
@@ -74,7 +74,7 @@ class DataType {
     data_.bits = static_cast<uint8_t>(bits);
     data_.lanes = static_cast<uint16_t>(lanes);
     if (code == kBFloat) {
-      CHECK_EQ(bits, 16);
+      ICHECK_EQ(bits, 16);
     }
   }
   /*! \return The type code. */
@@ -212,7 +212,7 @@ inline int GetVectorBytes(DataType dtype) {
       dtype == DataType::Int(1)) {
     return 1;
   }
-  CHECK_EQ(data_bits % 8, 0U) << "Need to load/store by multiple of bytes";
+  ICHECK_EQ(data_bits % 8, 0U) << "Need to load/store by multiple of bytes";
   return data_bits / 8;
 }
 
@@ -373,7 +373,7 @@ inline DLDataType String2DLDataType(std::string s) {
   if (*xdelim == 'x') {
     t.lanes = static_cast<uint16_t>(strtoul(xdelim + 1, &endpt, 10));
   }
-  CHECK(endpt == s.c_str() + s.length()) << "unknown type " << s;
+  ICHECK(endpt == s.c_str() + s.length()) << "unknown type " << s;
   return t;
 }
 
diff --git a/include/tvm/runtime/ndarray.h b/include/tvm/runtime/ndarray.h
index 92b3857..0ff171d 100644
--- a/include/tvm/runtime/ndarray.h
+++ b/include/tvm/runtime/ndarray.h
@@ -325,29 +325,29 @@ inline bool NDArray::IsContiguous() const {
 }
 
 inline void NDArray::CopyFrom(const DLTensor* other) {
-  CHECK(data_ != nullptr);
+  ICHECK(data_ != nullptr);
   CopyFromTo(other, &(get_mutable()->dl_tensor));
 }
 
 inline void NDArray::CopyFrom(const NDArray& other) {
-  CHECK(data_ != nullptr);
-  CHECK(other.data_ != nullptr);
+  ICHECK(data_ != nullptr);
+  ICHECK(other.data_ != nullptr);
   CopyFromTo(&(other.get_mutable()->dl_tensor), &(get_mutable()->dl_tensor));
 }
 
 inline void NDArray::CopyTo(DLTensor* other) const {
-  CHECK(data_ != nullptr);
+  ICHECK(data_ != nullptr);
   CopyFromTo(&(get_mutable()->dl_tensor), other);
 }
 
 inline void NDArray::CopyTo(const NDArray& other) const {
-  CHECK(data_ != nullptr);
-  CHECK(other.data_ != nullptr);
+  ICHECK(data_ != nullptr);
+  ICHECK(other.data_ != nullptr);
   CopyFromTo(&(get_mutable()->dl_tensor), &(other.get_mutable()->dl_tensor));
 }
 
 inline NDArray NDArray::CopyTo(const DLContext& ctx) const {
-  CHECK(data_ != nullptr);
+  ICHECK(data_ != nullptr);
   const DLTensor* dptr = operator->();
   NDArray ret =
       Empty(std::vector<int64_t>(dptr->shape, dptr->shape + dptr->ndim), dptr->dtype, ctx);
@@ -422,7 +422,7 @@ inline bool SaveDLTensor(dmlc::Stream* strm, const DLTensor* tensor) {
     strm->Write(tensor->data, data_byte_size);
   } else {
     std::vector<uint8_t> bytes(data_byte_size);
-    CHECK_EQ(
+    ICHECK_EQ(
         TVMArrayCopyToBytes(const_cast<DLTensor*>(tensor), dmlc::BeginPtr(bytes), data_byte_size),
         0)
         << TVMGetLastError();
@@ -438,19 +438,19 @@ inline void NDArray::Save(dmlc::Stream* strm) const { SaveDLTensor(strm, operato
 
 inline bool NDArray::Load(dmlc::Stream* strm) {
   uint64_t header, reserved;
-  CHECK(strm->Read(&header)) << "Invalid DLTensor file format";
-  CHECK(strm->Read(&reserved)) << "Invalid DLTensor file format";
-  CHECK(header == kTVMNDArrayMagic) << "Invalid DLTensor file format";
+  ICHECK(strm->Read(&header)) << "Invalid DLTensor file format";
+  ICHECK(strm->Read(&reserved)) << "Invalid DLTensor file format";
+  ICHECK(header == kTVMNDArrayMagic) << "Invalid DLTensor file format";
   DLContext ctx;
   int ndim;
   DLDataType dtype;
-  CHECK(strm->Read(&ctx)) << "Invalid DLTensor file format";
-  CHECK(strm->Read(&ndim)) << "Invalid DLTensor file format";
-  CHECK(strm->Read(&dtype)) << "Invalid DLTensor file format";
-  CHECK_EQ(ctx.device_type, kDLCPU) << "Invalid DLTensor context: can only save as CPU tensor";
+  ICHECK(strm->Read(&ctx)) << "Invalid DLTensor file format";
+  ICHECK(strm->Read(&ndim)) << "Invalid DLTensor file format";
+  ICHECK(strm->Read(&dtype)) << "Invalid DLTensor file format";
+  ICHECK_EQ(ctx.device_type, kDLCPU) << "Invalid DLTensor context: can only save as CPU tensor";
   std::vector<int64_t> shape(ndim);
   if (ndim != 0) {
-    CHECK(strm->ReadArray(&shape[0], ndim)) << "Invalid DLTensor file format";
+    ICHECK(strm->ReadArray(&shape[0], ndim)) << "Invalid DLTensor file format";
   }
   NDArray ret = NDArray::Empty(shape, dtype, ctx);
   int64_t num_elems = 1;
@@ -459,12 +459,12 @@ inline bool NDArray::Load(dmlc::Stream* strm) {
     num_elems *= ret->shape[i];
   }
   int64_t data_byte_size;
-  CHECK(strm->Read(&data_byte_size)) << "Invalid DLTensor file format";
-  CHECK(data_byte_size == num_elems * elem_bytes) << "Invalid DLTensor file format";
+  ICHECK(strm->Read(&data_byte_size)) << "Invalid DLTensor file format";
+  ICHECK(data_byte_size == num_elems * elem_bytes) << "Invalid DLTensor file format";
   auto read_ret = strm->Read(ret->data, data_byte_size);
   // Only check non-empty data
   if (ndim > 0 && shape[0] != 0) {
-    CHECK(read_ret) << "Invalid DLTensor file format";
+    ICHECK(read_ret) << "Invalid DLTensor file format";
   }
   if (!DMLC_IO_NO_ENDIAN_SWAP) {
     dmlc::ByteSwap(ret->data, elem_bytes, num_elems);
diff --git a/include/tvm/runtime/packed_func.h b/include/tvm/runtime/packed_func.h
index 2305f12..4303899 100644
--- a/include/tvm/runtime/packed_func.h
+++ b/include/tvm/runtime/packed_func.h
@@ -195,7 +195,7 @@ class TypedPackedFunc<R(Args...)> {
    * // construct from packed function
    * TypedPackedFunc<int(int)> ftyped(packed);
    * // call the typed version.
-   * CHECK_EQ(ftyped(1), 2);
+   * ICHECK_EQ(ftyped(1), 2);
    * \endcode
    *
    * \param packed The packed function
@@ -225,7 +225,7 @@ class TypedPackedFunc<R(Args...)> {
    * // construct from packed function
    * TypedPackedFunc<int(int)> ftyped(typed_lambda);
    * // call the typed version.
-   * CHECK_EQ(ftyped(1), 2);
+   * ICHECK_EQ(ftyped(1), 2);
    * \endcode
    *
    * \param typed_lambda typed lambda function.
@@ -246,7 +246,7 @@ class TypedPackedFunc<R(Args...)> {
    * TypedPackedFunc<int(int)> ftyped;
    * ftyped = [](int x) { return x + 1; }
    * // call the typed version.
-   * CHECK_EQ(ftyped(1), 2);
+   * ICHECK_EQ(ftyped(1), 2);
    * \endcode
    *
    * \param typed_lambda typed lambda function.
@@ -337,7 +337,7 @@ inline const char* ArgTypeCode2Str(int type_code);
 
 // macro to check type code.
 #define TVM_CHECK_TYPE_CODE(CODE, T) \
-  CHECK_EQ(CODE, T) << " expected " << ArgTypeCode2Str(T) << " but get " << ArgTypeCode2Str(CODE)
+  ICHECK_EQ(CODE, T) << " expected " << ArgTypeCode2Str(T) << " but get " << ArgTypeCode2Str(CODE)
 
 /*!
  * \brief Type traits for runtime type check during FFI conversion.
@@ -382,8 +382,8 @@ class TVMPODValue_ {
   }
   operator int() const {
     TVM_CHECK_TYPE_CODE(type_code_, kDLInt);
-    CHECK_LE(value_.v_int64, std::numeric_limits<int>::max());
-    CHECK_GE(value_.v_int64, std::numeric_limits<int>::min());
+    ICHECK_LE(value_.v_int64, std::numeric_limits<int>::max());
+    ICHECK_GE(value_.v_int64, std::numeric_limits<int>::min());
     return static_cast<int>(value_.v_int64);
   }
   operator bool() const {
@@ -491,7 +491,7 @@ class TVMArgValue : public TVMPODValue_ {
     } else if (type_code_ == kTVMStr) {
       return std::string(value_.v_str);
     } else {
-      CHECK(IsObjectRef<tvm::runtime::String>());
+      ICHECK(IsObjectRef<tvm::runtime::String>());
       return AsObjectRef<tvm::runtime::String>().operator std::string();
     }
   }
@@ -719,7 +719,7 @@ class TVMRetValue : public TVMPODValue_ {
    */
   void MoveToCHost(TVMValue* ret_value, int* ret_type_code) {
     // cannot move str; need specially handle.
-    CHECK(type_code_ != kTVMStr && type_code_ != kTVMBytes);
+    ICHECK(type_code_ != kTVMStr && type_code_ != kTVMBytes);
     *ret_value = value_;
     *ret_type_code = type_code_;
     type_code_ = kTVMNullptr;
@@ -733,7 +733,7 @@ class TVMRetValue : public TVMPODValue_ {
    */
   static TVMRetValue MoveFromCHost(TVMValue value, int type_code) {
     // Can move POD and everything under the object system.
-    CHECK(type_code <= kTVMPackedFuncHandle || type_code == kTVMNDArrayHandle);
+    ICHECK(type_code <= kTVMPackedFuncHandle || type_code == kTVMNDArrayHandle);
     TVMRetValue ret;
     ret.value_ = value;
     ret.type_code_ = type_code;
@@ -741,8 +741,8 @@ class TVMRetValue : public TVMPODValue_ {
   }
   /*! \return The value field, if the data is POD */
   const TVMValue& value() const {
-    CHECK(type_code_ != kTVMObjectHandle && type_code_ != kTVMPackedFuncHandle &&
-          type_code_ != kTVMModuleHandle && type_code_ != kTVMStr)
+    ICHECK(type_code_ != kTVMObjectHandle && type_code_ != kTVMPackedFuncHandle &&
+           type_code_ != kTVMModuleHandle && type_code_ != kTVMStr)
         << "TVMRetValue.value can only be used for POD data";
     return value_;
   }
@@ -966,8 +966,8 @@ struct PackedFuncValueConverter {
   }
 
 inline TVMArgValue TVMArgs::operator[](int i) const {
-  CHECK_LT(i, num_args) << "not enough argument passed, " << num_args << " passed"
-                        << " but request arg[" << i << "].";
+  ICHECK_LT(i, num_args) << "not enough argument passed, " << num_args << " passed"
+                         << " but request arg[" << i << "].";
   return TVMArgValue(values[i], type_codes[i]);
 }
 
@@ -1090,7 +1090,7 @@ class TVMArgsSetter {
   }
   TVM_ALWAYS_INLINE void operator()(size_t i, uint64_t value) const {
     values_[i].v_int64 = static_cast<int64_t>(value);
-    CHECK_LE(value, static_cast<uint64_t>(std::numeric_limits<int64_t>::max()));
+    ICHECK_LE(value, static_cast<uint64_t>(std::numeric_limits<int64_t>::max()));
     type_codes_[i] = kDLInt;
   }
   TVM_ALWAYS_INLINE void operator()(size_t i, double value) const {
@@ -1155,7 +1155,7 @@ class TVMArgsSetter {
       values_[i].v_str = value.ptr<std::string>()->c_str();
       type_codes_[i] = kTVMStr;
     } else {
-      CHECK_NE(value.type_code(), kTVMBytes) << "not handled.";
+      ICHECK_NE(value.type_code(), kTVMBytes) << "not handled.";
       values_[i] = value.value_;
       type_codes_[i] = value.type_code();
     }
@@ -1234,7 +1234,7 @@ struct unpack_call_dispatcher<void, 0, index, F> {
 
 template <typename R, int nargs, typename F>
 TVM_ALWAYS_INLINE void unpack_call(const F& f, const TVMArgs& args, TVMRetValue* rv) {
-  CHECK_EQ(nargs, args.size()) << "Expect " << nargs << " arguments but get " << args.size();
+  ICHECK_EQ(nargs, args.size()) << "Expect " << nargs << " arguments but get " << args.size();
   unpack_call_dispatcher<R, nargs, 0, F>::run(f, args, rv);
 }
 
@@ -1363,7 +1363,7 @@ inline TObjectRef TVMPODValue_::AsObjectRef() const {
   using ContainerType = typename TObjectRef::ContainerType;
 
   if (type_code_ == kTVMNullptr) {
-    CHECK(TObjectRef::_type_is_nullable)
+    ICHECK(TObjectRef::_type_is_nullable)
         << "Expect a not null value of " << ContainerType::_type_key;
     return TObjectRef(ObjectPtr<Object>(nullptr));
   }
@@ -1373,7 +1373,7 @@ inline TObjectRef TVMPODValue_::AsObjectRef() const {
     TVM_CHECK_TYPE_CODE(type_code_, kTVMNDArrayHandle);
     ObjectPtr<Object> data =
         NDArray::FFIDataFromHandle(static_cast<TVMArrayHandle>(value_.v_handle));
-    CHECK(data->IsInstance<ContainerType>())
+    ICHECK(data->IsInstance<ContainerType>())
         << "Expect " << ContainerType::_type_key << " but get " << data->GetTypeKey();
     return TObjectRef(data);
   }
@@ -1381,20 +1381,20 @@ inline TObjectRef TVMPODValue_::AsObjectRef() const {
     // Casting to a sub-class of Module
     TVM_CHECK_TYPE_CODE(type_code_, kTVMModuleHandle);
     ObjectPtr<Object> data = GetObjectPtr<Object>(static_cast<Object*>(value_.v_handle));
-    CHECK(data->IsInstance<ContainerType>())
+    ICHECK(data->IsInstance<ContainerType>())
         << "Expect " << ContainerType::_type_key << " but get " << data->GetTypeKey();
     return TObjectRef(data);
   }
   if (type_code_ == kTVMObjectHandle) {
     // normal object type check.
     Object* ptr = static_cast<Object*>(value_.v_handle);
-    CHECK(ObjectTypeChecker<TObjectRef>::Check(ptr))
+    ICHECK(ObjectTypeChecker<TObjectRef>::Check(ptr))
         << "Expect " << ObjectTypeChecker<TObjectRef>::TypeName() << " but get "
         << ptr->GetTypeKey();
     return TObjectRef(GetObjectPtr<Object>(ptr));
   } else if (type_code_ == kTVMObjectRValueRefArg) {
     Object* ptr = *static_cast<Object**>(value_.v_handle);
-    CHECK(ObjectTypeChecker<TObjectRef>::Check(ptr))
+    ICHECK(ObjectTypeChecker<TObjectRef>::Check(ptr))
         << "Expect " << ObjectTypeChecker<TObjectRef>::TypeName() << " but get "
         << ptr->GetTypeKey();
     return TObjectRef(GetObjectPtr<Object>(ptr));
diff --git a/include/tvm/runtime/vm/bytecode.h b/include/tvm/runtime/vm/bytecode.h
index edcbd88..e858c44 100644
--- a/include/tvm/runtime/vm/bytecode.h
+++ b/include/tvm/runtime/vm/bytecode.h
@@ -25,6 +25,7 @@
 #define TVM_RUNTIME_VM_BYTECODE_H_
 
 #include <tvm/runtime/data_type.h>
+#include <tvm/support/logging.h>
 
 #include <iostream>
 #include <vector>
diff --git a/include/tvm/support/logging.h b/include/tvm/support/logging.h
index 4322435..d98363e 100644
--- a/include/tvm/support/logging.h
+++ b/include/tvm/support/logging.h
@@ -125,13 +125,13 @@ constexpr const char* kTVM_INTERNAL_ERROR_MESSAGE =
 #define ICHECK_BINARY_OP(name, op, x, y)                           \
   if (dmlc::LogCheckError _check_err = dmlc::LogCheck##name(x, y)) \
   dmlc::LogMessageFatal(__FILE__, __LINE__).stream()               \
-      << kTVM_INTERNAL_ERROR_MESSAGE << std::endl                  \
+      << tvm::kTVM_INTERNAL_ERROR_MESSAGE << std::endl             \
       << ICHECK_INDENT << "Check failed: " << #x " " #op " " #y << *(_check_err.str) << ": "
 
 #define ICHECK(x)                                    \
   if (!(x))                                          \
   dmlc::LogMessageFatal(__FILE__, __LINE__).stream() \
-      << kTVM_INTERNAL_ERROR_MESSAGE << ICHECK_INDENT << "Check failed: " #x << " == false: "
+      << tvm::kTVM_INTERNAL_ERROR_MESSAGE << ICHECK_INDENT << "Check failed: " #x << " == false: "
 
 #define ICHECK_LT(x, y) ICHECK_BINARY_OP(_LT, <, x, y)
 #define ICHECK_GT(x, y) ICHECK_BINARY_OP(_GT, >, x, y)
@@ -139,10 +139,10 @@ constexpr const char* kTVM_INTERNAL_ERROR_MESSAGE =
 #define ICHECK_GE(x, y) ICHECK_BINARY_OP(_GE, >=, x, y)
 #define ICHECK_EQ(x, y) ICHECK_BINARY_OP(_EQ, ==, x, y)
 #define ICHECK_NE(x, y) ICHECK_BINARY_OP(_NE, !=, x, y)
-#define ICHECK_NOTNULL(x)                                                                   \
-  ((x) == nullptr ? dmlc::LogMessageFatal(__FILE__, __LINE__).stream()                      \
-                        << kTVM_INTERNAL_ERROR_MESSAGE << __INDENT << "Check not null: " #x \
-                        << ' ',                                                             \
+#define ICHECK_NOTNULL(x)                                                                        \
+  ((x) == nullptr ? dmlc::LogMessageFatal(__FILE__, __LINE__).stream()                           \
+                        << tvm::kTVM_INTERNAL_ERROR_MESSAGE << __INDENT << "Check not null: " #x \
+                        << ' ',                                                                  \
    (x) : (x))  // NOLINT(*)
 
 /*! \brief The diagnostic level, controls the printing of the message. */
diff --git a/include/tvm/target/target_kind.h b/include/tvm/target/target_kind.h
index dd14602..c9ef736 100644
--- a/include/tvm/target/target_kind.h
+++ b/include/tvm/target/target_kind.h
@@ -295,7 +295,7 @@ inline TargetKindAttrMap<ValueType> TargetKind::GetAttrMap(const String& attr_na
 template <typename ValueType>
 inline TargetKindRegEntry& TargetKindRegEntry::set_attr(const String& attr_name,
                                                         const ValueType& value, int plevel) {
-  CHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0";
+  ICHECK_GT(plevel, 0) << "plevel in set_attr must be greater than 0";
   runtime::TVMRetValue rv;
   rv = value;
   UpdateAttr(attr_name, rv, plevel);
@@ -321,7 +321,7 @@ inline TargetKindRegEntry& TargetKindRegEntry::set_attrs_preprocessor(FLambda f)
 
 template <typename ValueType>
 inline TargetKindRegEntry& TargetKindRegEntry::add_attr_option(const String& key) {
-  CHECK(!kind_->key2vtype_.count(key))
+  ICHECK(!kind_->key2vtype_.count(key))
       << "AttributeError: add_attr_option failed because '" << key << "' has been set once";
   kind_->key2vtype_[key] = detail::ValueTypeInfoMaker<ValueType>()();
   return *this;
diff --git a/include/tvm/tir/data_layout.h b/include/tvm/tir/data_layout.h
index ee93a06..73da05c 100644
--- a/include/tvm/tir/data_layout.h
+++ b/include/tvm/tir/data_layout.h
@@ -255,9 +255,9 @@ class Layout : public ObjectRef {
   }
 
   const LayoutAxis& operator[](int32_t i) const {
-    CHECK(defined()) << "Try to access axis from an undefined layout.";
+    ICHECK(defined()) << "Try to access axis from an undefined layout.";
     int32_t index = i < 0 ? static_cast<int32_t>(ndim() + i) : i;
-    CHECK(index >= 0 && static_cast<size_t>(index) < ndim()) << "Invalid index " << i;
+    ICHECK(index >= 0 && static_cast<size_t>(index) < ndim()) << "Invalid index " << i;
     const tir::IterVar axis = operator->()->axes[index];
     return LayoutAxis::Get(axis);
   }
diff --git a/include/tvm/tir/expr_functor.h b/include/tvm/tir/expr_functor.h
index 3f73d21..b5f1d64 100644
--- a/include/tvm/tir/expr_functor.h
+++ b/include/tvm/tir/expr_functor.h
@@ -58,7 +58,7 @@ namespace tir {
  *  };
  *  MyExprFunctor f;
  *  Var x("x");
- *  CHECK_EQ(f(x + 1, 2), 3);
+ *  ICHECK_EQ(f(x + 1, 2), 3);
  * \endcode
  *
  * \note Why do we need this more powerful Functor:
diff --git a/include/tvm/topi/broadcast.h b/include/tvm/topi/broadcast.h
index d03ddc9..f4f4f2c 100644
--- a/include/tvm/topi/broadcast.h
+++ b/include/tvm/topi/broadcast.h
@@ -49,17 +49,17 @@ inline tvm::te::Tensor broadcast_to(const tvm::te::Tensor& t,
                                     const tvm::Array<tvm::PrimExpr>& output_shape,
                                     std::string name = "T_broadcast_to",
                                     std::string tag = kBroadcast) {
-  CHECK_GE(output_shape.size(), t->shape.size())
+  ICHECK_GE(output_shape.size(), t->shape.size())
       << "Not a broadcast, output dimensionality smaller than input.\noutput: " << output_shape
       << "\nvs\ninput: " << t;
   auto bh = detail::BroadcastShape(output_shape, t->shape);
-  CHECK_EQ(output_shape.size(), bh.common_shape.size());
+  ICHECK_EQ(output_shape.size(), bh.common_shape.size());
   Array<PrimExpr> oshape;
   for (size_t i = 0; i < output_shape.size(); ++i) {
     if (output_shape[i].as<tir::IntImmNode>() == nullptr) {
       oshape.push_back(output_shape[i]);
     } else {
-      CHECK(topi::detail::EqualCheck(output_shape[i], bh.common_shape[i]));
+      ICHECK(topi::detail::EqualCheck(output_shape[i], bh.common_shape[i]));
       oshape.push_back(bh.common_shape[i]);
     }
   }
diff --git a/include/tvm/topi/cuda/dense.h b/include/tvm/topi/cuda/dense.h
index 447486d..7fd3107 100644
--- a/include/tvm/topi/cuda/dense.h
+++ b/include/tvm/topi/cuda/dense.h
@@ -53,10 +53,10 @@ namespace cuda {
 inline tvm::te::Tensor dense_cuda(const Target& target, const tvm::te::Tensor& data,
                                   const tvm::te::Tensor& weight, const tvm::te::Tensor& bias,
                                   const DataType& out_dtype) {
-  CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
-  CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
+  ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
+  ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
   if (bias.defined()) {
-    CHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
+    ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
   }
 
   auto batch = data->shape[0];
@@ -64,7 +64,7 @@ inline tvm::te::Tensor dense_cuda(const Target& target, const tvm::te::Tensor& d
   auto out_dim = weight->shape[0];
 
   if (target->GetLibs().count("cublas")) {
-    CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
+    ICHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
     auto mm = topi::contrib::cublas_matmul(data, weight, false, true);
     if (bias.defined()) {
       mm = tvm::te::compute(
diff --git a/include/tvm/topi/cuda/reduction.h b/include/tvm/topi/cuda/reduction.h
index acfcc76..7160419 100644
--- a/include/tvm/topi/cuda/reduction.h
+++ b/include/tvm/topi/cuda/reduction.h
@@ -60,7 +60,7 @@ Schedule ScheduleReduce(const Target& target, Operation op, Schedule sch,
   }
 
   auto out_stage = sch[data_out];
-  CHECK_GT(out_stage->op.as<ComputeOpNode>()->reduce_axis.size(), 0)
+  ICHECK_GT(out_stage->op.as<ComputeOpNode>()->reduce_axis.size(), 0)
       << "reduce_axis must be greater than zero";
 
   bool all_reduce;
@@ -183,7 +183,7 @@ void TraverseAfterReduce(const Target& target, Schedule s, Operation op) {
  * \return A schedule for the given ops.
  */
 Schedule schedule_reduce(const Target& target, Array<Tensor> outs) {
-  CHECK_EQ(outs.size(), 1) << "outs must have size 1";
+  ICHECK_EQ(outs.size(), 1) << "outs must have size 1";
   Array<Operation> out_ops;
   for (auto t : outs) {
     out_ops.push_back(t->op);
diff --git a/include/tvm/topi/detail/broadcast.h b/include/tvm/topi/detail/broadcast.h
index e719348..5c70182 100644
--- a/include/tvm/topi/detail/broadcast.h
+++ b/include/tvm/topi/detail/broadcast.h
@@ -59,7 +59,7 @@ inline BroadcastHelper BroadcastShape(const tvm::Array<tvm::PrimExpr>& shape1,
       bh.vars1.push_front(bh.all_vars[0]);
       bh.vars2.push_front(bh.all_vars[0]);
     } else if (topi::detail::EqualCheck(one, shape1[s1_size - i])) {
-      CHECK(!topi::detail::EqualCheck(one, shape2[s2_size - i]));
+      ICHECK(!topi::detail::EqualCheck(one, shape2[s2_size - i]));
       bh.common_shape.push_front(shape2[s2_size - i]);
       bh.vars2.push_front(bh.all_vars[0]);
     } else if (topi::detail::EqualCheck(one, shape2[s2_size - i])) {
@@ -78,10 +78,10 @@ inline BroadcastHelper BroadcastShape(const tvm::Array<tvm::PrimExpr>& shape1,
       bh.vars1.push_front(bh.all_vars[0]);
       bh.vars2.push_front(bh.all_vars[0]);
     } else {
-      CHECK(false) << "Incompatible broadcast dims: " << shape1[s1_size - i] << " and "
-                   << shape2[s2_size - i]
-                   << " in: " << tvm::Array<tvm::PrimExpr>(shape1.begin(), shape1.end()) << " and "
-                   << tvm::Array<tvm::PrimExpr>(shape2.begin(), shape2.end());
+      ICHECK(false) << "Incompatible broadcast dims: " << shape1[s1_size - i] << " and "
+                    << shape2[s2_size - i]
+                    << " in: " << tvm::Array<tvm::PrimExpr>(shape1.begin(), shape1.end()) << " and "
+                    << tvm::Array<tvm::PrimExpr>(shape2.begin(), shape2.end());
     }
   }
   // Remaining dimensions whether on shape1 or shape2 can always be completed
@@ -100,7 +100,7 @@ inline tvm::Array<tvm::PrimExpr> InputIndexFromBroadcast(
     const tvm::Array<tvm::tir::Var>& ovars, const tvm::te::Tensor& T,
     const std::deque<tvm::tir::Var>& my_vars, const std::deque<tvm::tir::Var>& all_vars) {
   tvm::Array<tvm::PrimExpr> ivars;
-  CHECK_EQ(ovars.size(), all_vars.size());
+  ICHECK_EQ(ovars.size(), all_vars.size());
   // N^2, could use a map but NBD.
   size_t expected_dims = T->shape.size();
   for (size_t i = 0; i < ovars.size(); ++i) {
@@ -118,7 +118,7 @@ inline tvm::Array<tvm::PrimExpr> InputIndexFromBroadcast(
       ivars.push_back(tvm::tir::make_zero(ovars[i].dtype()));
     }
   }
-  CHECK(expected_dims == ivars.size());
+  ICHECK(expected_dims == ivars.size());
   return ivars;
 }
 
diff --git a/include/tvm/topi/detail/constant_utils.h b/include/tvm/topi/detail/constant_utils.h
index 201a0da..412c793 100644
--- a/include/tvm/topi/detail/constant_utils.h
+++ b/include/tvm/topi/detail/constant_utils.h
@@ -76,7 +76,7 @@ inline std::vector<int> GetConstIntValues(Array<PrimExpr> exprs, const std::stri
   std::vector<int> result;
   if (!exprs.defined()) return result;
   for (auto expr : exprs) {
-    CHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers";
+    ICHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers";
     result.push_back(GetConstInt(expr));
   }
   return result;
@@ -96,7 +96,7 @@ inline std::vector<int64_t> GetConstInt64Values(Array<PrimExpr> exprs,
   std::vector<int64_t> result;
   if (!exprs.defined()) return result;
   for (auto expr : exprs) {
-    CHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers";
+    ICHECK(IsConstInt(expr)) << "All elements of " << var_name << " must be constant integers";
     result.push_back(GetConstInt(expr));
   }
   return result;
diff --git a/include/tvm/topi/detail/extern.h b/include/tvm/topi/detail/extern.h
index 48c3e18..caca1e8 100644
--- a/include/tvm/topi/detail/extern.h
+++ b/include/tvm/topi/detail/extern.h
@@ -79,7 +79,7 @@ inline Array<Tensor> make_extern(const Array<Array<PrimExpr> >& out_shapes,
                                  const std::vector<DataType>& out_types,
                                  const Array<Tensor>& inputs, FExtern fextern, std::string name,
                                  std::string tag, ::tvm::Map<String, ObjectRef> attrs) {
-  CHECK_EQ(out_shapes.size(), out_types.size())
+  ICHECK_EQ(out_shapes.size(), out_types.size())
       << "make_extern: out_shapes and out_types must have equal size";
 
   Array<Buffer> input_placeholders;
@@ -112,7 +112,7 @@ inline Array<Tensor> make_extern(const Array<Array<PrimExpr> >& out_shapes,
  * \return An expression representing the pack operation
  */
 inline PrimExpr pack_buffer(Buffer buf) {
-  CHECK_GT(buf->shape.size(), 0) << "buf shape must have at least one element";
+  ICHECK_GT(buf->shape.size(), 0) << "buf shape must have at least one element";
   auto shape =
       tvm::tir::Call(DataType::Handle(), tvm::tir::builtin::tvm_stack_make_shape(), buf->shape);
   PrimExpr strides;
diff --git a/include/tvm/topi/detail/ravel_unravel.h b/include/tvm/topi/detail/ravel_unravel.h
index fc77509..dd7bcac 100644
--- a/include/tvm/topi/detail/ravel_unravel.h
+++ b/include/tvm/topi/detail/ravel_unravel.h
@@ -43,8 +43,8 @@ using namespace tvm::te;
  * \return The index after flattening
  */
 inline PrimExpr RavelIndex(Array<PrimExpr> indices, Array<PrimExpr> shape) {
-  CHECK_EQ(indices.size(), shape.size()) << "indices and shape must have equal size";
-  CHECK_GT(indices.size(), 0) << "indices must not be empty";
+  ICHECK_EQ(indices.size(), shape.size()) << "indices and shape must have equal size";
+  ICHECK_GT(indices.size(), 0) << "indices must not be empty";
   PrimExpr idx;
   for (size_t i = 0; i < indices.size(); ++i) {
     if (i == 0) {
diff --git a/include/tvm/topi/elemwise.h b/include/tvm/topi/elemwise.h
index f537c9c..cad72cb 100644
--- a/include/tvm/topi/elemwise.h
+++ b/include/tvm/topi/elemwise.h
@@ -327,7 +327,7 @@ inline Tensor reinterpret(const Tensor& x, DataType type, std::string name = "te
  */
 inline Tensor elemwise_sum(const Array<Tensor>& xs, std::string name = "T_elemwise_sum",
                            std::string tag = kElementWise) {
-  CHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor.";
+  ICHECK_GT(xs.size(), 0) << "elemwise sum must have at least one input tensor.";
   return compute(
       xs[0]->shape,
       [&](const Array<Var>& i) {
diff --git a/include/tvm/topi/nn.h b/include/tvm/topi/nn.h
index d257d3c..ba1be34 100644
--- a/include/tvm/topi/nn.h
+++ b/include/tvm/topi/nn.h
@@ -98,8 +98,8 @@ inline tvm::te::Tensor leaky_relu(const tvm::te::Tensor& t, double alpha = 0.1,
 inline tvm::te::Tensor prelu(const tvm::te::Tensor& x, const tvm::te::Tensor& slope,
                              const int axis = 1, std::string name = "T_prelu",
                              std::string tag = kBroadcast) {
-  CHECK((size_t)axis < x->shape.size()) << "Wrong axis (" << axis << ")value. ";
-  CHECK(topi::detail::GetConstInt(slope->shape[0]) == topi::detail::GetConstInt(x->shape[axis]))
+  ICHECK((size_t)axis < x->shape.size()) << "Wrong axis (" << axis << ")value. ";
+  ICHECK(topi::detail::GetConstInt(slope->shape[0]) == topi::detail::GetConstInt(x->shape[axis]))
       << "Wrong slope shape received.";
 
   return tvm::te::compute(
@@ -162,8 +162,8 @@ inline tvm::te::Tensor pad(const tvm::te::Tensor& t, const tvm::Array<tvm::PrimE
   }
 
   arith::Analyzer analyzer;
-  CHECK_GE(pad_before.size(), 1);
-  CHECK_EQ(pad_before.size(), pad_after.size());
+  ICHECK_GE(pad_before.size(), 1);
+  ICHECK_EQ(pad_before.size(), pad_after.size());
   tvm::Array<tvm::PrimExpr> pad_before_int32;
   tvm::Array<tvm::PrimExpr> pad_after_int32;
 
@@ -262,8 +262,8 @@ inline tvm::te::Tensor conv2d_nchw(const tvm::te::Tensor& I, const tvm::te::Tens
                                    int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1,
                                    std::string name = "T_conv2d_nchw",
                                    std::string tag = kConv2dNCHW) {
-  CHECK_EQ(4, I->shape.size());
-  CHECK_EQ(4, W->shape.size());
+  ICHECK_EQ(4, I->shape.size());
+  ICHECK_EQ(4, W->shape.size());
   auto pH = I->shape[2];
   auto pW = I->shape[3];
   tvm::Array<tvm::PrimExpr> output_shape{
@@ -306,8 +306,8 @@ inline tvm::te::Tensor conv2d_hwcn(const tvm::te::Tensor& I, const tvm::te::Tens
                                    int pad_h = 0, int pad_w = 0, int stride_h = 1, int stride_w = 1,
                                    std::string name = "T_conv2d_hwcn",
                                    std::string tag = kConv2dHWCN) {
-  CHECK_EQ(4, I->shape.size());
-  CHECK_EQ(4, W->shape.size());
+  ICHECK_EQ(4, I->shape.size());
+  ICHECK_EQ(4, W->shape.size());
   auto pH = I->shape[2];
   auto pW = I->shape[3];
   tvm::Array<tvm::PrimExpr> output_shape{
@@ -351,8 +351,8 @@ inline tvm::te::Tensor depthwise_conv2d_nchw(const tvm::te::Tensor& I, const tvm
                                              int stride_w = 1,
                                              std::string name = "T_depthwise_conv2d_nchw",
                                              std::string tag = kDepthwiseConv2dNCHW) {
-  CHECK_EQ(4, I->shape.size());
-  CHECK_EQ(4, W->shape.size());
+  ICHECK_EQ(4, I->shape.size());
+  ICHECK_EQ(4, W->shape.size());
   auto pH = I->shape[2];
   auto pW = I->shape[3];
   auto pCM = W->shape[1];  // channel_multiplier
@@ -380,8 +380,8 @@ inline tvm::te::Tensor depthwise_conv2d_nhwc(const tvm::te::Tensor& I, const tvm
                                              int stride_w = 1,
                                              std::string name = "T_depthwise_conv2d_nhwc",
                                              std::string tag = kDepthwiseConv2dNHWC) {
-  CHECK_EQ(4, I->shape.size());
-  CHECK_EQ(4, W->shape.size());
+  ICHECK_EQ(4, I->shape.size());
+  ICHECK_EQ(4, W->shape.size());
   auto pH = I->shape[1];
   auto pW = I->shape[2];
   auto pCM = W->shape[1];  // channel_multiplier
@@ -429,8 +429,8 @@ inline tvm::te::Tensor group_conv2d_ngchw(const tvm::te::Tensor& I, const tvm::t
                                           int stride_w = 1,
                                           std::string name = "T_group_conv2d_ngchw",
                                           std::string tag = kGroupConv2d) {
-  CHECK_EQ(5, I->shape.size());
-  CHECK_EQ(5, W->shape.size());
+  ICHECK_EQ(5, I->shape.size());
+  ICHECK_EQ(5, W->shape.size());
   auto pH = I->shape[2];
   auto pW = I->shape[3];
   tvm::Array<tvm::PrimExpr> output_shape{
diff --git a/include/tvm/topi/nn/bnn.h b/include/tvm/topi/nn/bnn.h
index f729508..815b8a2 100644
--- a/include/tvm/topi/nn/bnn.h
+++ b/include/tvm/topi/nn/bnn.h
@@ -52,7 +52,7 @@ inline tvm::te::Tensor binarize_pack(const tvm::te::Tensor& data, int axis,
                                      std::string name = "PackedInput",
                                      std::string tag = "binarize_pack") {
   auto ishape = data->shape;
-  CHECK_EQ(GetConstInt(ishape[axis]) % 32, 0)
+  ICHECK_EQ(GetConstInt(ishape[axis]) % 32, 0)
       << "binarize_pack: axis size must be a multiple of 32";
 
   arith::Analyzer analyzer;
@@ -99,10 +99,10 @@ inline tvm::te::Tensor binarize_pack(const tvm::te::Tensor& data, int axis,
  * \return Tensor with shape [batch, out_dim], dtype is float32
  */
 inline tvm::te::Tensor binary_dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight) {
-  CHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data";
-  CHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight";
-  CHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data";
-  CHECK_EQ(weight->dtype, DataType::UInt(32)) << "binary_dense requires uint32 weight";
+  ICHECK_EQ(data->shape.size(), 2) << "binary_dense requires 2-D data";
+  ICHECK_EQ(weight->shape.size(), 2) << "binary_dense requires 2-D weight";
+  ICHECK_EQ(data->dtype, DataType::UInt(32)) << "binary_dense requires uint32 data";
+  ICHECK_EQ(weight->dtype, DataType::UInt(32)) << "binary_dense requires uint32 weight";
 
   auto batch = data->shape[0];
   auto in_dim = data->shape[1];
diff --git a/include/tvm/topi/nn/dense.h b/include/tvm/topi/nn/dense.h
index ad18cb0..113002d 100644
--- a/include/tvm/topi/nn/dense.h
+++ b/include/tvm/topi/nn/dense.h
@@ -47,10 +47,10 @@ using namespace tvm::te;
  */
 inline tvm::te::Tensor dense(const tvm::te::Tensor& data, const tvm::te::Tensor& weight,
                              const tvm::te::Tensor& bias, const DataType& out_dtype) {
-  CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
-  CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
+  ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
+  ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
   if (bias.defined()) {
-    CHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
+    ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
   }
 
   auto batch = data->shape[0];
diff --git a/include/tvm/topi/nn/dilate.h b/include/tvm/topi/nn/dilate.h
index 9b5a804..3369316 100644
--- a/include/tvm/topi/nn/dilate.h
+++ b/include/tvm/topi/nn/dilate.h
@@ -45,7 +45,7 @@ using namespace tvm::te;
  * \return The logical conjunction expression
  */
 PrimExpr all(Array<PrimExpr> args) {
-  CHECK_GT(args.size(), 0) << "all requires at least one argument";
+  ICHECK_GT(args.size(), 0) << "all requires at least one argument";
 
   PrimExpr ret = args[0];
   for (size_t i = 1; i < args.size(); ++i) {
@@ -70,8 +70,8 @@ PrimExpr all(Array<PrimExpr> args) {
 inline Tensor dilate(const Tensor& x, Array<PrimExpr> strides, double dilation_value,
                      std::string name = "tensor", std::string tag = kInjective) {
   auto n = x->shape.size();
-  CHECK_EQ(n, strides.size()) << "strides size (" << strides.size()
-                              << ") must match dimension of x (" << n << ")";
+  ICHECK_EQ(n, strides.size()) << "strides size (" << strides.size()
+                               << ") must match dimension of x (" << n << ")";
 
   Array<PrimExpr> out_shape;
   arith::Analyzer analyzer;
diff --git a/include/tvm/topi/nn/local_response_norm.h b/include/tvm/topi/nn/local_response_norm.h
index 0170c50..717adb8 100644
--- a/include/tvm/topi/nn/local_response_norm.h
+++ b/include/tvm/topi/nn/local_response_norm.h
@@ -52,9 +52,9 @@ using namespace tvm::te;
 inline Tensor lrn(const Tensor& data, int size, int axis = 1, float alpha = 0.0001,
                   float beta = 0.75, float bias = 2, std::string name = "tensor",
                   std::string tag = kBroadcast) {
-  CHECK_EQ(data->shape.size(), 4) << "LRN requires 4-D input";
-  CHECK_EQ(size % 2, 1) << "size should be odd number";
-  CHECK(axis == 1 || axis == 3) << "axis should be 1 or 3 for NCHW and NHWC";
+  ICHECK_EQ(data->shape.size(), 4) << "LRN requires 4-D input";
+  ICHECK_EQ(size % 2, 1) << "size should be odd number";
+  ICHECK(axis == 1 || axis == 3) << "axis should be 1 or 3 for NCHW and NHWC";
   auto input_shape = data->shape;
   Array<PrimExpr> pad_before{0, 0, 0, 0};
   Array<PrimExpr> pad_after{0, 0, 0, 0};
diff --git a/include/tvm/topi/nn/pooling.h b/include/tvm/topi/nn/pooling.h
index 2396fc2..8827938 100644
--- a/include/tvm/topi/nn/pooling.h
+++ b/include/tvm/topi/nn/pooling.h
@@ -65,10 +65,10 @@ inline Tensor pool_impl(const Tensor& x, const Array<PrimExpr>& kernel_size,
                         const Array<PrimExpr>& stride_size, const Array<PrimExpr>& padding_size,
                         PoolType pool_type, bool ceil_mode, const size_t height_axis,
                         const size_t width_axis, bool count_include_pad) {
-  CHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)";
-  CHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements";
-  CHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements";
-  CHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements";
+  ICHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)";
+  ICHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements";
+  ICHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements";
+  ICHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements";
 
   auto kernel_height = cast(DataType::DataType::Int(32), kernel_size[0]);
   auto kernel_width = cast(DataType::DataType::Int(32), kernel_size[1]);
@@ -181,11 +181,11 @@ inline Tensor pool_grad_impl(const Tensor& out_grad, const Tensor& x,
                              const Array<PrimExpr>& padding_size, PoolType pool_type,
                              bool ceil_mode, const size_t height_axis, const size_t width_axis,
                              bool count_include_pad) {
-  CHECK(out_grad->shape.size() >= 2) << "Pooling grad output must >= 2-D (H, W)";
-  CHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)";
-  CHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements";
-  CHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements";
-  CHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements";
+  ICHECK(out_grad->shape.size() >= 2) << "Pooling grad output must >= 2-D (H, W)";
+  ICHECK(x->shape.size() >= 2) << "Pooling input must >= 2-D (H, W)";
+  ICHECK_EQ(kernel_size.size(), 2) << "Pooling kernel_size must have 2 elements";
+  ICHECK_EQ(stride_size.size(), 2) << "Pooling stride_size must have 2 elements";
+  ICHECK_EQ(padding_size.size(), 4) << "Pooling padding_size must have 4 elements";
 
   auto kernel_height = cast(DataType::DataType::Int(32), kernel_size[0]);
   auto kernel_width = cast(DataType::DataType::Int(32), kernel_size[1]);
@@ -372,7 +372,7 @@ inline bool find_depth_height_width(const std::string& layout, int* depth_axis,
 
 inline bool find_height_width(const std::string& layout, int* height_axis, int* width_axis) {
   int dummy;
-  CHECK_EQ(find_depth_height_width(layout, &dummy, height_axis, width_axis), false);
+  ICHECK_EQ(find_depth_height_width(layout, &dummy, height_axis, width_axis), false);
   if (*height_axis != -1 && *width_axis != -1) {
     return true;
   }
@@ -381,7 +381,7 @@ inline bool find_height_width(const std::string& layout, int* height_axis, int*
 
 inline bool find_width(const std::string& layout, int* width_axis) {
   int dummy;
-  CHECK_EQ(find_depth_height_width(layout, &dummy, &dummy, width_axis), false);
+  ICHECK_EQ(find_depth_height_width(layout, &dummy, &dummy, width_axis), false);
   if (*width_axis != -1) {
     return true;
   }
@@ -422,7 +422,7 @@ inline Tensor pool(const Tensor& x, const Array<PrimExpr>& kernel_size,
                    PoolType pool_type, bool ceil_mode, const std::string& layout = "NCHW",
                    bool count_include_pad = true) {
   int height_axis = -1, width_axis = -1;
-  CHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout;
+  ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout;
   return pool_impl(x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, height_axis,
                    width_axis, count_include_pad);
 }
@@ -462,7 +462,7 @@ inline Tensor pool_grad(const Tensor& out_grad, const Tensor& x, const Array<Pri
                         PoolType pool_type, bool ceil_mode, const std::string& layout = "NCHW",
                         bool count_include_pad = true) {
   int height_axis = -1, width_axis = -1;
-  CHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout;
+  ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout;
   return pool_grad_impl(out_grad, x, kernel_size, stride_size, padding_size, pool_type, ceil_mode,
                         height_axis, width_axis, count_include_pad);
 }
@@ -489,7 +489,7 @@ inline PrimExpr end_index(const Var& out_index, const PrimExpr& odim, const Prim
 inline Tensor adaptive_pool_impl(const Tensor& x, const Array<PrimExpr>& output_size,
                                  PoolType pool_type, const std::vector<int>& axes) {
   const auto n_dim = output_size.size();
-  CHECK_EQ(axes.size(), n_dim) << "The number of axes not equal to the in/out dimension";
+  ICHECK_EQ(axes.size(), n_dim) << "The number of axes not equal to the in/out dimension";
 
   Array<PrimExpr> data_shape = x->shape;
   for (size_t i = 0; i < data_shape.size(); ++i) {
@@ -591,7 +591,7 @@ inline Tensor adaptive_pool_impl(const Tensor& x, const Array<PrimExpr>& output_
 inline Tensor adaptive_pool(const Tensor& x, const Array<PrimExpr>& output_size, PoolType pool_type,
                             const std::string& layout = "NCHW") {
   int height_axis = -1, width_axis = -1;
-  CHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout;
+  ICHECK(find_height_width(layout, &height_axis, &width_axis)) << "Unsupported layout " << layout;
   return adaptive_pool_impl(x, output_size, pool_type, {height_axis, width_axis});
 }
 
@@ -606,7 +606,7 @@ inline Tensor adaptive_pool(const Tensor& x, const Array<PrimExpr>& output_size,
 inline Tensor adaptive_pool3d(const Tensor& x, const Array<PrimExpr>& output_size,
                               PoolType pool_type, const std::string& layout = "NCDHW") {
   int depth_axis = -1, height_axis = -1, width_axis = -1;
-  CHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis))
+  ICHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis))
       << "Unsupported layout " << layout;
   return adaptive_pool_impl(x, output_size, pool_type, {depth_axis, height_axis, width_axis});
 }
@@ -661,10 +661,10 @@ inline Tensor pool_impl_nd(const Tensor& x, const Array<PrimExpr>& kernel_size,
                            bool count_include_pad) {
   int k_size = kernel_size.size();
   int x_size = x->shape.size();
-  CHECK_EQ(stride_size.size(), k_size) << "Pooling stride_size must have same elements as kernel";
-  CHECK_EQ(padding_size.size(), k_size * 2) << "Pooling padding_size must has double elements of"
-                                               " kernel";
-  CHECK_EQ(axis.size(), k_size) << "axis must have same elements as kernel";
+  ICHECK_EQ(stride_size.size(), k_size) << "Pooling stride_size must have same elements as kernel";
+  ICHECK_EQ(padding_size.size(), k_size * 2) << "Pooling padding_size must has double elements of"
+                                                " kernel";
+  ICHECK_EQ(axis.size(), k_size) << "axis must have same elements as kernel";
 
   Array<IterVar> daxis;
   std::vector<PrimExpr> kernel(k_size);
@@ -812,7 +812,7 @@ inline Tensor pool1d(const Tensor& x, const Array<PrimExpr>& kernel_size,
                      PoolType pool_type, bool ceil_mode, const std::string& layout = "NCW",
                      bool count_include_pad = true) {
   int width_axis = -1;
-  CHECK(find_width(layout, &width_axis)) << "Unsupported layout " << layout;
+  ICHECK(find_width(layout, &width_axis)) << "Unsupported layout " << layout;
   std::vector<int> axis = {width_axis};
   return pool_impl_nd(x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, axis,
                       count_include_pad);
@@ -853,7 +853,7 @@ inline Tensor pool3d(const Tensor& x, const Array<PrimExpr>& kernel_size,
                      PoolType pool_type, bool ceil_mode, const std::string& layout = "NCDHW",
                      bool count_include_pad = true) {
   int depth_axis = -1, height_axis = -1, width_axis = -1;
-  CHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis))
+  ICHECK(find_depth_height_width(layout, &depth_axis, &height_axis, &width_axis))
       << "Unsupported layout " << layout;
   std::vector<int> axis = {depth_axis, height_axis, width_axis};
   return pool_impl_nd(x, kernel_size, stride_size, padding_size, pool_type, ceil_mode, axis,
diff --git a/include/tvm/topi/nn/softmax.h b/include/tvm/topi/nn/softmax.h
index 2e94f91..78a9ec4 100644
--- a/include/tvm/topi/nn/softmax.h
+++ b/include/tvm/topi/nn/softmax.h
@@ -54,7 +54,7 @@ inline Tensor softmax(const Tensor& x, int axis = -1, std::string name = "tensor
   if (axis < 0) {
     axis = ndim + axis;
   }
-  CHECK_LT(axis, ndim) << "axis parameter should be less than input dim";
+  ICHECK_LT(axis, ndim) << "axis parameter should be less than input dim";
 
   auto k1 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k1");
   auto k2 = tvm::te::reduce_axis(Range(0, input_shape[axis]), "k2");
@@ -124,7 +124,7 @@ inline Tensor softmax(const Tensor& x, int axis = -1, std::string name = "tensor
  */
 inline Tensor log_softmax(const Tensor& x, std::string name = "tensor",
                           std::string tag = "log_softmax_output") {
-  CHECK_EQ(x->shape.size(), 2) << "Log softmax requires 2-D input";
+  ICHECK_EQ(x->shape.size(), 2) << "Log softmax requires 2-D input";
 
   PrimExpr m = x->shape[0];
   PrimExpr n = x->shape[1];
diff --git a/include/tvm/topi/reduction.h b/include/tvm/topi/reduction.h
index 75c8265..2a2f211 100644
--- a/include/tvm/topi/reduction.h
+++ b/include/tvm/topi/reduction.h
@@ -75,8 +75,8 @@ inline std::vector<int> GetRealAxis(int ndim, const Array<Integer>& axis) {
       if (val < 0) {
         val += ndim;
       }
-      CHECK_LE(val, ndim) << " exceeds the maximum dimension " << ndim;
-      CHECK_GE(val, 0);
+      ICHECK_LE(val, ndim) << " exceeds the maximum dimension " << ndim;
+      ICHECK_GE(val, 0);
       real_axis.push_back(static_cast<int>(val));
     }
     std::sort(real_axis.begin(), real_axis.end());
@@ -181,7 +181,7 @@ inline Tensor DoCommReduce(const Tensor& data, FReduce func, const Array<PrimExp
 inline Tensor CommReduce(const Tensor& data, const Array<Integer>& axis, FReduce func,
                          bool keepdims, bool atleast1d) {
   auto ndim = data->shape.size();
-  CHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor";
+  ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor";
   auto real_axis = GetRealAxis(static_cast<int>(ndim), axis);
   auto target_shape = MakeReduceTargetShape(real_axis, data, keepdims, atleast1d);
   return DoCommReduce(data, func, target_shape, real_axis,
@@ -204,7 +204,7 @@ inline Tensor CommReduce(const Tensor& data, const Array<Integer>& axis, FReduce
 inline Tensor CommReduceIdx(const Tensor& data, const Array<Integer>& axis, FCommReduce func,
                             bool keepdims, bool atleast1d) {
   auto ndim = data->shape.size();
-  CHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor";
+  ICHECK_NE(ndim, 0) << "Cannot reduce a 0 dim Tensor";
   auto real_axis = GetRealAxis(static_cast<int>(ndim), axis);
   auto reduce_axes = MakeReduceAxes(real_axis, data);
   auto target_shape = MakeReduceTargetShape(real_axis, data, keepdims, atleast1d);
@@ -325,7 +325,7 @@ inline Tensor sum(const Tensor& data, const Array<Integer>& axis, bool keepdims
 }
 
 inline Tensor collapse_sum(const Tensor& data, Array<PrimExpr> target_shape) {
-  CHECK_GE(data->shape.size(), target_shape.size());
+  ICHECK_GE(data->shape.size(), target_shape.size());
   auto ishape = detail::GetConstIntValues(data->shape, "ishape");
   auto oshape = detail::GetConstIntValues(target_shape, "oshape");
 
diff --git a/include/tvm/topi/rocm/dense.h b/include/tvm/topi/rocm/dense.h
index a1e4d14..b861e6c 100644
--- a/include/tvm/topi/rocm/dense.h
+++ b/include/tvm/topi/rocm/dense.h
@@ -53,10 +53,10 @@ namespace rocm {
 inline tvm::te::Tensor dense_rocm(const Target& target, const tvm::te::Tensor& data,
                                   const tvm::te::Tensor& weight, const tvm::te::Tensor& bias,
                                   const DataType& out_dtype) {
-  CHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
-  CHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
+  ICHECK_EQ(data->shape.size(), 2) << "dense requires 2-D data";
+  ICHECK_EQ(weight->shape.size(), 2) << "dense requires 2-D weight";
   if (bias.defined()) {
-    CHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
+    ICHECK_EQ(bias->shape.size(), 1) << "dense requires 1-D bias";
   }
 
   auto batch = data->shape[0];
@@ -64,7 +64,7 @@ inline tvm::te::Tensor dense_rocm(const Target& target, const tvm::te::Tensor& d
   auto out_dim = weight->shape[0];
 
   if (target->GetLibs().count("rocblas")) {
-    CHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
+    ICHECK_EQ(data->dtype, out_dtype) << "Mixed precision not supported.";
     auto mm = topi::contrib::rocblas_matmul(data, weight, false, true);
     if (bias.defined()) {
       mm = tvm::te::compute(
diff --git a/include/tvm/topi/transform.h b/include/tvm/topi/transform.h
index aa5c6d2..fa27faf 100644
--- a/include/tvm/topi/transform.h
+++ b/include/tvm/topi/transform.h
@@ -60,11 +60,11 @@ using namespace topi::detail;
 inline Tensor expand_dims(const Tensor& x, int axis, int num_newaxis = 1,
                           std::string name = "T_expand_dims", std::string tag = kBroadcast) {
   int ndim = static_cast<int>(x->shape.size());
-  CHECK(-ndim - 1 <= axis && axis <= ndim)
+  ICHECK(-ndim - 1 <= axis && axis <= ndim)
       << "expand_dims only accepts `axis` in [-data.ndim - 1, data.ndim]"
       << ", but got axis = " << axis << ", and data.ndim = " << ndim;
-  CHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`"
-                          << ", but got num_newaxis = " << num_newaxis;
+  ICHECK(num_newaxis >= 0) << "expand_dims only accepts `num_newaxis >= 0`"
+                           << ", but got num_newaxis = " << num_newaxis;
   if (axis < 0) {
     // Calculate offset from last dimension
     axis = ndim + axis + 1;
@@ -123,13 +123,13 @@ inline Tensor transpose(const Tensor& x, Array<Integer> axes, std::string name =
       new_axis = static_cast<int>(x->shape.size()) + axis;
       axes.Set(i, new_axis);
     }
-    CHECK((new_axis >= 0) && (new_axis < static_cast<int>(x->shape.size())))
+    ICHECK((new_axis >= 0) && (new_axis < static_cast<int>(x->shape.size())))
         << "axis=" << axis << " is invalid for the " << static_cast<int>(x->shape.size())
         << "-dimensional input tensor";
 
     for (size_t j = 0; j < axes.size(); ++j) {
       if (i != j) {
-        CHECK(new_axis != static_cast<int>(axes[j]->value)) << "repeated axis in transpose";
+        ICHECK(new_axis != static_cast<int>(axes[j]->value)) << "repeated axis in transpose";
       }
     }
     new_shape.push_back(x->shape[new_axis]);
@@ -178,14 +178,14 @@ inline Tensor reverse_sequence(const Tensor& x, const Tensor& seq_lengths, int s
       batch_axis = static_cast<int>(x->shape.size()) + batch_axis;
     }
 
-    CHECK(seq_lengths_dim == 1) << "seq_lengths should be 1D vector";
+    ICHECK(seq_lengths_dim == 1) << "seq_lengths should be 1D vector";
 
-    CHECK(GetConstInt(seq_lengths->shape[0]) == GetConstInt(x->shape[batch_axis]))
+    ICHECK(GetConstInt(seq_lengths->shape[0]) == GetConstInt(x->shape[batch_axis]))
         << "For reverse_sequnece seq_lengths size should match with dimension of batch axis"
         << ", but got dimension of batch_axis = " << GetConstInt(x->shape[batch_axis])
         << ", and seq_length size = " << GetConstInt(seq_lengths->shape[0]);
 
-    CHECK((0 <= batch_axis) && (batch_axis < static_cast<int>(x->shape.size())))
+    ICHECK((0 <= batch_axis) && (batch_axis < static_cast<int>(x->shape.size())))
         << "batch_axis=" << batch_axis_inp << " is invalid for the "
         << static_cast<int>(x->shape.size()) << "-dimensional input tensor";
   }
@@ -193,7 +193,7 @@ inline Tensor reverse_sequence(const Tensor& x, const Tensor& seq_lengths, int s
   if (seq_axis < 0) {
     seq_axis = static_cast<int>(x->shape.size()) + seq_axis;
   }
-  CHECK((0 <= seq_axis) && (seq_axis < static_cast<int>(x->shape.size())))
+  ICHECK((0 <= seq_axis) && (seq_axis < static_cast<int>(x->shape.size())))
       << "seq_axis=" << seq_axis_inp << " is invalid for the " << static_cast<int>(x->shape.size())
       << "-dimensional input tensor";
 
@@ -332,7 +332,7 @@ inline Tensor squeeze(const Tensor& x, Array<Integer> axis, bool atleast1d = fal
       if (val < 0) {
         val += static_cast<int>(x->shape.size());
       }
-      CHECK_EQ(GetConstInt(x->shape[val]), 1) << "Dimension " << val << " must have size 1";
+      ICHECK_EQ(GetConstInt(x->shape[val]), 1) << "Dimension " << val << " must have size 1";
       axis_val.push_back(val);
     }
   }
@@ -380,12 +380,12 @@ inline Tensor squeeze(const Tensor& x, Array<Integer> axis, bool atleast1d = fal
 inline Tensor concatenate(const Array<Tensor>& inputs, int axis = 0, std::string name = "T_concat",
                           std::string tag = kInjective) {
   int ndim = static_cast<int>(inputs[0]->shape.size());
-  CHECK(-ndim <= axis && axis < ndim) << "concatenate only accepts `axis` in [-ndim, ndim)"
-                                      << ", but got axis = " << axis << ", and ndim = " << ndim;
+  ICHECK(-ndim <= axis && axis < ndim) << "concatenate only accepts `axis` in [-ndim, ndim)"
+                                       << ", but got axis = " << axis << ", and ndim = " << ndim;
   if (axis < 0) {
     axis += ndim;
   }
-  CHECK_LT(axis, inputs[0]->shape.size()) << "axis out of bounds";
+  ICHECK_LT(axis, inputs[0]->shape.size()) << "axis out of bounds";
 
   Array<PrimExpr> axis_sizes;
   for (auto t : inputs) {
@@ -439,13 +439,13 @@ inline Tensor concatenate(const Array<Tensor>& inputs, int axis = 0, std::string
 inline Tensor stack(const Array<Tensor>& inputs, int axis = 0, std::string name = "T_stack",
                     std::string tag = kInjective) {
   int ndim = static_cast<int>(inputs[0]->shape.size());
-  CHECK(-ndim - 1 <= axis && axis <= ndim)
+  ICHECK(-ndim - 1 <= axis && axis <= ndim)
       << "stack only accepts `axis` in [-ndim, ndim)"
       << ", but got axis = " << axis << ", and ndim = " << ndim;
   if (axis < 0) {
     axis += ndim + 1;
   }
-  CHECK_LT(axis, inputs[0]->shape.size() + 1) << "axis out of bounds";
+  ICHECK_LT(axis, inputs[0]->shape.size() + 1) << "axis out of bounds";
 
   const int stack_size = static_cast<int>(inputs.size());
   Array<PrimExpr> out_shape;
@@ -487,7 +487,7 @@ inline Array<Tensor> split(const Tensor& x, Array<PrimExpr> split_indices, int a
   if (axis < 0) {
     axis += static_cast<int>(x->shape.size());
   }
-  CHECK_LT(axis, x->shape.size()) << "axis out of bounds";
+  ICHECK_LT(axis, x->shape.size()) << "axis out of bounds";
 
   auto src_axis_size = x->shape[axis];
   std::vector<PrimExpr> begin_ids;
@@ -497,7 +497,7 @@ inline Array<Tensor> split(const Tensor& x, Array<PrimExpr> split_indices, int a
     auto idx_node = idx.as<IntImmNode>();
     auto back_node = begin_ids.back().as<IntImmNode>();
     if (idx_node && back_node) {
-      CHECK_GT(idx_node->value, back_node->value) << "split_indices must be sorted";
+      ICHECK_GT(idx_node->value, back_node->value) << "split_indices must be sorted";
     }
     begin_ids.push_back(idx);
   }
@@ -569,7 +569,7 @@ inline Tensor strided_slice(const Tensor& x, const Array<Integer>& begin, const
   // Consider to refactor in the future.
   std::vector<int64_t> stride_vec(src_tensor_dim, 1);
   for (size_t i = 0; i < strides.size(); ++i) {
-    CHECK(strides[i].defined());
+    ICHECK(strides[i].defined());
     stride_vec[i] = strides[i]->value;
   }
 
@@ -630,7 +630,7 @@ inline Tensor strided_slice(const Tensor& x, const Array<Integer>& begin, const
     int interval = std::abs(end_i - begin_i);
     int slice_size =
         static_cast<int>((interval + std::abs(stride_vec[i]) - 1) / std::abs(stride_vec[i]));
-    CHECK(stride_vec[i] < 0 ? (end_i <= begin_i) : (begin_i <= end_i))
+    ICHECK(stride_vec[i] < 0 ? (end_i <= begin_i) : (begin_i <= end_i))
         << ": Input [Begin=" << begin_vec[i] << ", End=" << end_vec[i]
         << "] is invalid for axis=" << i;
 
@@ -670,14 +670,14 @@ inline Array<Tensor> split_sections(const Tensor& x, int num_sections, int axis,
   if (axis < 0) {
     axis += static_cast<int>(x->shape.size());
   }
-  CHECK_LT(axis, x->shape.size()) << "axis out of bounds";
+  ICHECK_LT(axis, x->shape.size()) << "axis out of bounds";
 
   auto src_axis_size = x->shape[axis];
 
-  CHECK_GT(num_sections, 0) << "Slice count must be > 0";
+  ICHECK_GT(num_sections, 0) << "Slice count must be > 0";
 
   if (auto node = src_axis_size.as<IntImmNode>()) {
-    CHECK_EQ(node->value % num_sections, 0)
+    ICHECK_EQ(node->value % num_sections, 0)
         << "num_sections must be an integer factor of the size of axis " << axis << " ("
         << node->value << ")";
   }
@@ -756,8 +756,8 @@ inline Tensor take(const Tensor& a, const Tensor& indices, std::string mode = "c
 inline Tensor sequence_mask(const Tensor& data, const Tensor& valid_length, double mask_value,
                             int axis, std::string name = "T_sequence_mask",
                             std::string tag = kInjective) {
-  CHECK(axis == 0 || axis == 1) << "axis must be either 0 or 1";
-  CHECK_EQ(valid_length->shape.size(), 1) << "valid_length must have ndim=1, i.e., (batch_size,).";
+  ICHECK(axis == 0 || axis == 1) << "axis must be either 0 or 1";
+  ICHECK_EQ(valid_length->shape.size(), 1) << "valid_length must have ndim=1, i.e., (batch_size,).";
   auto length_dim = data->shape[axis];
   auto batch_dim = data->shape[1 - axis];
   Array<PrimExpr> out_shape = data->shape;
@@ -795,8 +795,8 @@ inline Tensor take(const Tensor& a, const Tensor& indices, int axis, std::string
   if (axis < 0) {
     axis += static_cast<int>(a->shape.size());
   }
-  CHECK_GE(axis, 0) << "axis out of bounds";
-  CHECK_LT(axis, a->shape.size()) << "axis out of bounds";
+  ICHECK_GE(axis, 0) << "axis out of bounds";
+  ICHECK_LT(axis, a->shape.size()) << "axis out of bounds";
   auto axis_dim = a->shape[axis];
 
   int indices_len = static_cast<int>(indices->shape.size());
@@ -887,11 +887,11 @@ inline Tensor take(const Tensor& a, const Tensor& indices, int axis, std::string
  */
 inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y,
                     std::string name = "T_where", std::string tag = kBroadcast) {
-  CHECK_EQ(x->shape.size(), y->shape.size())
+  ICHECK_EQ(x->shape.size(), y->shape.size())
       << "x and y must have the same shape.Got different number of dimension: " << x->shape.size()
       << " vs " << y->shape.size();
-  CHECK_EQ(x->dtype, y->dtype) << "x and y must have the same dtype: " << x->dtype << " vs "
-                               << y->dtype;
+  ICHECK_EQ(x->dtype, y->dtype) << "x and y must have the same dtype: " << x->dtype << " vs "
+                                << y->dtype;
 
   if (x->shape.size() == 0) {
     return compute(
@@ -908,7 +908,7 @@ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y,
         },
         name, tag);
   } else if (condition->shape.size() != 1) {
-    CHECK_EQ(condition->shape.size(), x->shape.size())
+    ICHECK_EQ(condition->shape.size(), x->shape.size())
         << "condition array must be either have the same shape as x or to be a "
            "1-D array.Got different number of dimension: "
         << condition->shape.size() << " vs " << x->shape.size();
@@ -922,7 +922,7 @@ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y,
     int64_t cond_first_dim = topi::GetConstInt(condition->shape[0]);
     int64_t x_first_dim = topi::GetConstInt(x->shape[0]);
     if (cond_first_dim > 0 && x_first_dim > 0) {
-      CHECK_EQ(cond_first_dim, x_first_dim)
+      ICHECK_EQ(cond_first_dim, x_first_dim)
           << "If condition is 1-D, the first dimension must be the same as x: " << cond_first_dim
           << " vs " << x_first_dim;
     }
@@ -951,11 +951,11 @@ inline Tensor where(const Tensor& condition, const Tensor& x, const Tensor& y,
 inline Tensor repeat(const Tensor& x, int repeats, int axis, std::string name = "T_repeat",
                      std::string tag = kBroadcast) {
   int ndim = static_cast<int>(x->shape.size());
-  CHECK(-ndim - 1 <= axis && axis <= ndim)
+  ICHECK(-ndim - 1 <= axis && axis <= ndim)
       << "repeat only accepts `axis` in [-data.ndim - 1, data.ndim]"
       << ", but got axis = " << axis << ", and data.ndim = " << ndim;
-  CHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`"
-                      << ", but got repeats = " << repeats;
+  ICHECK(repeats >= 1) << "repeat only accepts `repeats >= 1`"
+                       << ", but got repeats = " << repeats;
   if (axis < 0) {
     // Calculate offset from last dimension
     axis += ndim;
@@ -1091,13 +1091,13 @@ inline Tensor gather(const Tensor& data, int axis, const Tensor& indices,
                      std::string name = "T_gather", std::string tag = kInjective) {
   size_t ndim_d = data->shape.size();
   size_t ndim_i = indices->shape.size();
-  CHECK_GE(ndim_d, 1) << "Cannot gather from a scalar.";
-  CHECK_EQ(ndim_d, ndim_i);
-  CHECK_GE(axis, 0);
-  CHECK_LT(axis, ndim_d);
+  ICHECK_GE(ndim_d, 1) << "Cannot gather from a scalar.";
+  ICHECK_EQ(ndim_d, ndim_i);
+  ICHECK_GE(axis, 0);
+  ICHECK_LT(axis, ndim_d);
   size_t indices_dim_i = static_cast<size_t>(GetConstInt(indices->shape[axis]));
-  CHECK_GE(indices_dim_i, 1);
-  CHECK(indices->dtype.is_int());
+  ICHECK_GE(indices_dim_i, 1);
+  ICHECK(indices->dtype.is_int());
 
   Array<PrimExpr> out_shape;
   for (size_t i = 0; i < ndim_i; ++i) {
@@ -1138,10 +1138,10 @@ inline Tensor gather_nd(const Tensor& data, const Tensor& indices, std::string n
                         std::string tag = kInjective) {
   size_t ndim_d = data->shape.size();
   size_t ndim_i = indices->shape.size();
-  CHECK_GE(ndim_i, 1) << "indices tensor must have at least 1 dimensions";
+  ICHECK_GE(ndim_i, 1) << "indices tensor must have at least 1 dimensions";
   size_t indices_dim0 = static_cast<size_t>(GetConstInt(indices->shape[0]));
-  CHECK_LE(indices_dim0, ndim_d) << "dim 0 of indices tensor must be no more "
-                                 << "than dimensions of data tensor";
+  ICHECK_LE(indices_dim0, ndim_d) << "dim 0 of indices tensor must be no more "
+                                  << "than dimensions of data tensor";
   Array<PrimExpr> out_shape;
   for (size_t i = 1; i < ndim_i; ++i) {
     out_shape.push_back(indices->shape[i]);
@@ -1216,8 +1216,8 @@ inline tvm::te::Tensor matmul(const tvm::te::Tensor& A, const tvm::te::Tensor& B
  */
 inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, int axes = 2,
                         std::string name = "T_tensordot", std::string tag = kMatMul) {
-  CHECK_GE(A->shape.size(), axes);
-  CHECK_GE(B->shape.size(), axes);
+  ICHECK_GE(A->shape.size(), axes);
+  ICHECK_GE(B->shape.size(), axes);
 
   Array<PrimExpr> output_shape(A->shape.begin(), A->shape.end() + (-axes));
   for (auto it = B->shape.begin() + axes; it != B->shape.end(); ++it) output_shape.push_back(*it);
@@ -1262,7 +1262,7 @@ inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, int axes = 2,
 inline Tensor tensordot(const Tensor& A, const tvm::te::Tensor& B, Array<PrimExpr> A_axes,
                         Array<PrimExpr> B_axes, std::string name = "T_tensordot",
                         std::string tag = kMatMul) {
-  CHECK_EQ(A_axes.size(), B_axes.size());
+  ICHECK_EQ(A_axes.size(), B_axes.size());
 
   auto A_axes_val = GetConstIntValues(A_axes, "A_axes");
   auto B_axes_val = GetConstIntValues(B_axes, "B_axes");
@@ -1366,11 +1366,12 @@ inline Tensor layout_transform(const Tensor& src, const std::string& src_layout,
     return src;
   }
 
-  CHECK(src_layout_struct.defined() && dst_layout_struct.defined())
+  ICHECK(src_layout_struct.defined() && dst_layout_struct.defined())
       << "cannot convert from/to undefined layout";
 
   auto layout_converter = tir::BijectiveLayout(src_layout_struct, dst_layout_struct);
-  CHECK(layout_converter.defined()) << "cannot convert from " << src_layout << " to " << dst_layout;
+  ICHECK(layout_converter.defined())
+      << "cannot convert from " << src_layout << " to " << dst_layout;
 
   Array<PrimExpr> dst_shape = layout_converter.ForwardShape(src->shape);
 
@@ -1499,9 +1500,10 @@ inline Tensor sparse_to_dense(const Tensor& sparse_indices, const Array<Integer>
                               const Tensor& sparse_values, const PrimExpr& default_value,
                               const std::string name = "T_sparse_to_dense",
                               const std::string tag = kInjective) {
-  CHECK(sparse_indices->dtype.is_int()) << "sparse_indices only accepts integer values";
-  CHECK_LE(sparse_indices->shape.size(), 3) << "sparse_indices tensor should be 0D, 1D, or 2D only";
-  CHECK_LE(sparse_values->shape.size(), 2) << "sparse_values tensor should be 0D or 1D only";
+  ICHECK(sparse_indices->dtype.is_int()) << "sparse_indices only accepts integer values";
+  ICHECK_LE(sparse_indices->shape.size(), 3)
+      << "sparse_indices tensor should be 0D, 1D, or 2D only";
+  ICHECK_LE(sparse_values->shape.size(), 2) << "sparse_values tensor should be 0D or 1D only";
 
   const auto rank_sparse_indices = static_cast<int>(sparse_indices->shape.size());
   Array<PrimExpr> oshape;
diff --git a/src/arith/analyzer.cc b/src/arith/analyzer.cc
index daf6144..9737b53 100644
--- a/src/arith/analyzer.cc
+++ b/src/arith/analyzer.cc
@@ -47,7 +47,7 @@ void Analyzer::Bind(const Var& var, const PrimExpr& expr, bool allow_override) {
 }
 
 void Analyzer::Bind(const Var& var, const Range& range, bool allow_override) {
-  CHECK(range.defined());
+  ICHECK(range.defined());
   if (tir::is_one(range->extent)) {
     this->Bind(var, range->min, allow_override);
   } else {
@@ -64,7 +64,7 @@ void Analyzer::Bind(const Map<Var, Range>& variables, bool allow_override) {
 }
 
 void ConstraintContext::EnterWithScope() {
-  CHECK(exit_ == nullptr);
+  ICHECK(exit_ == nullptr);
   // entering the scope.
   auto f0 = analyzer_->const_int_bound.EnterConstraint(constraint_);
   auto f1 = analyzer_->modular_set.EnterConstraint(constraint_);
@@ -78,7 +78,7 @@ void ConstraintContext::EnterWithScope() {
 }
 
 void ConstraintContext::ExitWithScope() {
-  CHECK(exit_ != nullptr);
+  ICHECK(exit_ != nullptr);
   exit_();
 }
 
diff --git a/src/arith/canonical_simplify.cc b/src/arith/canonical_simplify.cc
index a88849b..d0a0702 100644
--- a/src/arith/canonical_simplify.cc
+++ b/src/arith/canonical_simplify.cc
@@ -63,7 +63,7 @@ inline PrimExpr ModImpl(PrimExpr a, PrimExpr b, DivMode mode) {
   if (mode == kTruncDiv) {
     return truncmod(a, b);
   } else {
-    CHECK_EQ(mode, kFloorDiv);
+    ICHECK_EQ(mode, kFloorDiv);
     return floormod(a, b);
   }
 }
@@ -72,7 +72,7 @@ inline PrimExpr DivImpl(PrimExpr a, PrimExpr b, DivMode mode) {
   if (mode == kTruncDiv) {
     return truncdiv(a, b);
   } else {
-    CHECK_EQ(mode, kFloorDiv);
+    ICHECK_EQ(mode, kFloorDiv);
     return floordiv(a, b);
   }
 }
@@ -102,7 +102,7 @@ class SplitExprNode : public CanonicalExprNode {
   DivMode div_mode{kTruncDiv};
 
   /*! \brief verify that this is a valid entry. */
-  void Verify() const { CHECK(upper_factor == kPosInf || upper_factor % lower_factor == 0); }
+  void Verify() const { ICHECK(upper_factor == kPosInf || upper_factor % lower_factor == 0); }
 
   PrimExpr NormalizeWithScale(int64_t sscale) const {
     PrimExpr res = this->index;
@@ -118,7 +118,7 @@ class SplitExprNode : public CanonicalExprNode {
     }
     sscale *= this->scale;
     if (sscale != 1) {
-      CHECK(!dtype.is_uint() || sscale > 0);
+      ICHECK(!dtype.is_uint() || sscale > 0);
       res = res * make_const(dtype, sscale);
     }
     return res;
@@ -209,10 +209,10 @@ class SumExprNode : public CanonicalExprNode {
    * \param scale The scale to be applied.
    */
   void DivideBy(int64_t scale) {
-    CHECK_EQ(this->base % scale, 0);
+    ICHECK_EQ(this->base % scale, 0);
     this->base /= scale;
     for (size_t i = 0; i < this->args.size(); ++i) {
-      CHECK_EQ(args[i]->scale % scale, 0);
+      ICHECK_EQ(args[i]->scale % scale, 0);
       args[i].CopyOnWrite()->scale /= scale;
     }
   }
@@ -508,7 +508,7 @@ class CanonicalSimplifier::Impl : public RewriteSimplifier::Impl {
       return expr;
     }
     expr = ToSplitExpr(Normalize(expr));
-    CHECK(expr->DivModeCompatibleTo(div_mode));
+    ICHECK(expr->DivModeCompatibleTo(div_mode));
     expr.CopyOnWrite()->div_mode = div_mode;
     return expr;
   }
@@ -648,7 +648,7 @@ void CanonicalSimplifier::Impl::SeparateDivisibleParts(const SumExprNode* psum,
 }
 
 SplitExpr CanonicalSimplifier::Impl::SplitDivConst(SplitExpr lhs, int64_t cval, DivMode div_mode) {
-  CHECK_GT(cval, 0);
+  ICHECK_GT(cval, 0);
   lhs = ConvertDivMode(lhs, div_mode);
 
   // the following rule works for both floordiv and truncdiv
@@ -682,8 +682,8 @@ SplitExpr CanonicalSimplifier::Impl::SplitDivConst(SplitExpr lhs, int64_t cval,
   }
   // directly return the split with cval == 1
   lhs = ToSplitExpr(Normalize(lhs));
-  CHECK(lhs->DivModeCompatibleTo(div_mode));
-  CHECK_EQ(lhs->scale, 1);
+  ICHECK(lhs->DivModeCompatibleTo(div_mode));
+  ICHECK_EQ(lhs->scale, 1);
   lhs.CopyOnWrite()->lower_factor *= cval;
   lhs.CopyOnWrite()->div_mode = div_mode;
   return lhs;
@@ -803,7 +803,7 @@ PrimExpr CanonicalSimplifier::Impl::VisitExpr_(const FloorDivNode* op) {
 }
 
 SplitExpr CanonicalSimplifier::Impl::SplitModConst(SplitExpr lhs, int64_t cval, DivMode div_mode) {
-  CHECK_GT(cval, 0);
+  ICHECK_GT(cval, 0);
   lhs = ConvertDivMode(lhs, div_mode);
 
   if (lhs->scale % cval == 0) {
@@ -842,9 +842,9 @@ SplitExpr CanonicalSimplifier::Impl::SplitModConst(SplitExpr lhs, int64_t cval,
   }
   // Normalize the value.
   lhs = ToSplitExpr(Normalize(lhs));
-  CHECK(lhs->DivModeCompatibleTo(div_mode));
-  CHECK_EQ(lhs->scale, 1);
-  CHECK_EQ(lhs->lower_factor, 1);
+  ICHECK(lhs->DivModeCompatibleTo(div_mode));
+  ICHECK_EQ(lhs->scale, 1);
+  ICHECK_EQ(lhs->lower_factor, 1);
   lhs.CopyOnWrite()->div_mode = div_mode;
   lhs.CopyOnWrite()->upper_factor = cval;
   return lhs;
@@ -886,7 +886,7 @@ PrimExpr CanonicalSimplifier::Impl::VisitExpr_(const ModNode* op) {
             // contonue to use logic below.
             a = extra;
             psum = a.as<SumExprNode>();
-            CHECK(psum != nullptr);
+            ICHECK(psum != nullptr);
           }
         }
       }
@@ -948,7 +948,7 @@ PrimExpr CanonicalSimplifier::Impl::VisitExpr_(const FloorModNode* op) {
           // contonue to use logic below.
           a = extra;
           psum = a.as<SumExprNode>();
-          CHECK(psum != nullptr);
+          ICHECK(psum != nullptr);
         }
       }
       // Simplify the offset constant if necessary.
diff --git a/src/arith/const_fold.h b/src/arith/const_fold.h
index 876d336..7bc04a1 100644
--- a/src/arith/const_fold.h
+++ b/src/arith/const_fold.h
@@ -150,7 +150,7 @@ inline PrimExpr TryConstFold<tir::Div>(PrimExpr a, PrimExpr b) {
     if (pa && pb) {
       // due to division and mod can have different modes
       // NOTE: this will assumes truc div.
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
       return IntImm(rtype, pa->value / pb->value);
     }
     if (pa) {
@@ -158,7 +158,7 @@ inline PrimExpr TryConstFold<tir::Div>(PrimExpr a, PrimExpr b) {
     }
     if (pb) {
       if (pb->value == 1) return a;
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
     }
     if (fa && fb && fb->value != 0) {
       return FloatImm(rtype, fa->value / fb->value);
@@ -166,7 +166,7 @@ inline PrimExpr TryConstFold<tir::Div>(PrimExpr a, PrimExpr b) {
     if (fa && fa->value == 0) return a;
     if (fb) {
       if (fb->value == 1) return a;
-      CHECK_NE(fb->value, 0) << "Divide by zero";
+      ICHECK_NE(fb->value, 0) << "Divide by zero";
     }
   });
   return PrimExpr();
@@ -177,7 +177,7 @@ inline PrimExpr TryConstFold<tir::Mod>(PrimExpr a, PrimExpr b) {
   TVM_INDEX_CONST_PROPAGATION({
     const DataType& rtype = a.dtype();
     if (pa && pb) {
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
       return IntImm(rtype, pa->value % pb->value);
     }
     if (pa) {
@@ -185,7 +185,7 @@ inline PrimExpr TryConstFold<tir::Mod>(PrimExpr a, PrimExpr b) {
     }
     if (pb) {
       if (pb->value == 1) return tir::make_zero(rtype);
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
     }
   });
   return PrimExpr();
@@ -196,7 +196,7 @@ inline PrimExpr TryConstFold<tir::FloorDiv>(PrimExpr a, PrimExpr b) {
   TVM_ARITH_CONST_PROPAGATION({
     const DataType& rtype = a.dtype();
     if (pa && pb) {
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
       return IntImm(rtype, arith::floordiv(pa->value, pb->value));
     }
     if (pa) {
@@ -204,7 +204,7 @@ inline PrimExpr TryConstFold<tir::FloorDiv>(PrimExpr a, PrimExpr b) {
     }
     if (pb) {
       if (pb->value == 1) return a;
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
     }
     if (fa && fb && fb->value != 0) {
       return FloatImm(rtype, std::floor(fa->value / fb->value));
@@ -212,7 +212,7 @@ inline PrimExpr TryConstFold<tir::FloorDiv>(PrimExpr a, PrimExpr b) {
     if (fa && fa->value == 0) return a;
     if (fb) {
       if (fb->value == 1) return a;
-      CHECK_NE(fb->value, 0) << "Divide by zero";
+      ICHECK_NE(fb->value, 0) << "Divide by zero";
     }
   });
   return PrimExpr();
@@ -223,7 +223,7 @@ inline PrimExpr TryConstFold<tir::FloorMod>(PrimExpr a, PrimExpr b) {
   TVM_INDEX_CONST_PROPAGATION({
     const DataType& rtype = a.dtype();
     if (pa && pb) {
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
       return IntImm(rtype, floormod(pa->value, pb->value));
     }
     if (pa) {
@@ -231,7 +231,7 @@ inline PrimExpr TryConstFold<tir::FloorMod>(PrimExpr a, PrimExpr b) {
     }
     if (pb) {
       if (pb->value == 1) return tir::make_zero(rtype);
-      CHECK_NE(pb->value, 0) << "Divide by zero";
+      ICHECK_NE(pb->value, 0) << "Divide by zero";
     }
   });
   return PrimExpr();
diff --git a/src/arith/const_int_bound.cc b/src/arith/const_int_bound.cc
index 876b7db..f39ce4b 100644
--- a/src/arith/const_int_bound.cc
+++ b/src/arith/const_int_bound.cc
@@ -109,11 +109,11 @@ class ConstIntBoundAnalyzer::Impl
     if (!allow_override) {
       auto it = var_map_.find(var);
       if (it != var_map_.end()) {
-        CHECK(it->second == info) << "Trying to update var \'" << var << "\'"
-                                  << " with a different const bound: "
-                                  << "original="
-                                  << ConstIntBound(it->second.min_value, it->second.max_value)
-                                  << ", new=" << ConstIntBound(info.min_value, info.max_value);
+        ICHECK(it->second == info)
+            << "Trying to update var \'" << var << "\'"
+            << " with a different const bound: "
+            << "original=" << ConstIntBound(it->second.min_value, it->second.max_value)
+            << ", new=" << ConstIntBound(info.min_value, info.max_value);
       }
     }
     var_map_[var] = info;
@@ -155,7 +155,7 @@ class ConstIntBoundAnalyzer::Impl
       auto val = bound_->find(expr);
       if (val != bound_->end()) {
         auto everything = Everything(expr->dtype);
-        CHECK(
+        ICHECK(
             (val->second->min_value == res.min_value && val->second->max_value == res.max_value) ||
             (val->second->min_value == everything.min_value &&
              val->second->max_value == everything.max_value))
@@ -211,7 +211,7 @@ class ConstIntBoundAnalyzer::Impl
   Entry VisitExpr_(const DivNode* op) final {
     Entry a = VisitExpr(op->a);
     Entry b = VisitExpr(op->b);
-    CHECK(!b.is_const(0)) << "divide by zero";
+    ICHECK(!b.is_const(0)) << "divide by zero";
     return HandleDivision(a, b, op->dtype, InfAwareDiv);
   }
 
@@ -230,7 +230,7 @@ class ConstIntBoundAnalyzer::Impl
                          std::min(std::max(a.max_value, (int64_t)0), b_max_cap));
       }
     } else {
-      CHECK(!b.is_const(0)) << "mod by zero";
+      ICHECK(!b.is_const(0)) << "mod by zero";
       // mod by negative value is rare,
       // and we just use the simpliest rule.
       return Everything(op->dtype);
@@ -240,7 +240,7 @@ class ConstIntBoundAnalyzer::Impl
   Entry VisitExpr_(const FloorDivNode* op) final {
     Entry a = VisitExpr(op->a);
     Entry b = VisitExpr(op->b);
-    CHECK(!b.is_const(0)) << "floordiv by zero";
+    ICHECK(!b.is_const(0)) << "floordiv by zero";
     return HandleDivision(a, b, op->dtype, InfAwareFloorDiv);
   }
 
@@ -258,7 +258,7 @@ class ConstIntBoundAnalyzer::Impl
         return MakeBound(0, b_max_cap);
       }
     } else {
-      CHECK(!b.is_const(0)) << "floormod by zero";
+      ICHECK(!b.is_const(0)) << "floormod by zero";
       // mod by negative value is rare,
       // and we just use the simpliest rule.
       return Everything(op->dtype);
@@ -352,7 +352,7 @@ class ConstIntBoundAnalyzer::Impl
     additional_info_.insert(additional_info_.end(), info.begin(), info.end());
     size_t new_size = old_size + info.size();
     auto frecover = [old_size, new_size, this]() {
-      CHECK_EQ(additional_info_.size(), new_size);
+      ICHECK_EQ(additional_info_.size(), new_size);
       additional_info_.resize(old_size);
     };
     return frecover;
@@ -432,11 +432,11 @@ class ConstIntBoundAnalyzer::Impl
    */
   static int64_t InfAwareAdd(int64_t x, int64_t y) {
     if (x == kPosInf) {
-      CHECK(y != kNegInf);
+      ICHECK(y != kNegInf);
       return kPosInf;
     }
     if (x == kNegInf) {
-      CHECK(y != kPosInf);
+      ICHECK(y != kPosInf);
       return kNegInf;
     }
     if (y == kPosInf || y == kNegInf) return y;
@@ -464,7 +464,7 @@ class ConstIntBoundAnalyzer::Impl
    * \return the result.
    */
   static int64_t InfAwareDiv(int64_t x, int64_t y) {
-    CHECK_NE(y, 0);
+    ICHECK_NE(y, 0);
     if (x == kPosInf || x == kNegInf) {
       if (y > 0) return x;
       return -x;
@@ -478,7 +478,7 @@ class ConstIntBoundAnalyzer::Impl
    * \return the result.
    */
   static int64_t InfAwareFloorDiv(int64_t x, int64_t y) {
-    CHECK_NE(y, 0);
+    ICHECK_NE(y, 0);
     if (x == kPosInf || x == kNegInf) {
       if (y > 0) return x;
       return -x;
diff --git a/src/arith/domain_touched.cc b/src/arith/domain_touched.cc
index d59486c..3c3da5f 100644
--- a/src/arith/domain_touched.cc
+++ b/src/arith/domain_touched.cc
@@ -67,7 +67,7 @@ class BufferTouchedDomain final : public StmtExprVisitor {
   void VisitStmt_(const AttrStmtNode* op) final {
     if (op->attr_key == tir::attr::thread_extent) {
       const IterVarNode* thread_axis = op->node.as<IterVarNode>();
-      CHECK(thread_axis);
+      ICHECK(thread_axis);
       const VarNode* var = thread_axis->var.get();
       dom_map_[var] = IntSet::FromRange(Range(make_zero(op->value.dtype()), op->value));
       StmtExprVisitor::VisitStmt_(op);
diff --git a/src/arith/int_constraints.cc b/src/arith/int_constraints.cc
index 56c95d0..3a668c2 100644
--- a/src/arith/int_constraints.cc
+++ b/src/arith/int_constraints.cc
@@ -43,9 +43,9 @@ Array<PrimExpr> AsConditions(const Array<Var>& variables, const Map<Var, IntGrou
   Array<PrimExpr> res;
   // use variables to keep the order of iteration
   // so as to get rid of any non-determinism.
-  CHECK_EQ(variables.size(), bounds.size());
+  ICHECK_EQ(variables.size(), bounds.size());
   for (const auto v : variables) {
-    CHECK(bounds.count(v));
+    ICHECK(bounds.count(v));
     const auto& bnds = bounds[v];
     PrimExpr lhs = bnds->coef * v;
     for (const PrimExpr& rhs : bnds->equal) {
@@ -66,7 +66,7 @@ Array<PrimExpr> AsConditions(const Array<Var>& variables, const Map<Var, IntGrou
 
 IntGroupBounds::IntGroupBounds(PrimExpr coef, Array<PrimExpr> lower, Array<PrimExpr> equal,
                                Array<PrimExpr> upper) {
-  CHECK(coef.dtype().is_int() || coef.dtype().is_uint())
+  ICHECK(coef.dtype().is_int() || coef.dtype().is_uint())
       << "Coefficient in IntGroupBounds must be integers";
   ObjectPtr<IntGroupBoundsNode> node = make_object<IntGroupBoundsNode>();
   node->coef = std::move(coef);
@@ -178,7 +178,7 @@ Range IntGroupBounds::FindBestRange(const Map<Var, Range>& vranges_addl) const {
   }
 
   if (!best_lower.defined()) {
-    CHECK(!best_diff_over.defined());
+    ICHECK(!best_diff_over.defined());
     return Range();
   }
   return Range::FromMinExtent(best_lower, analyzer.Simplify(best_diff_over + 1));
@@ -196,7 +196,7 @@ TVM_REGISTER_GLOBAL("arith.IntGroupBounds_from_range").set_body_typed(IntGroupBo
 
 TVM_REGISTER_GLOBAL("arith.IntGroupBounds_FindBestRange")
     .set_body([](TVMArgs args, TVMRetValue* ret) {
-      CHECK(args.size() == 1 || args.size() == 2);
+      ICHECK(args.size() == 1 || args.size() == 2);
       IntGroupBounds bounds = args[0];
       if (args.size() == 1) {
         *ret = bounds.FindBestRange();
@@ -221,9 +221,9 @@ IntConstraints::IntConstraints(Array<Var> variables, Map<Var, Range> ranges,
   if (!ranges.defined()) {
     ranges = Map<Var, Range>();
   }
-  CHECK(relations.defined());
+  ICHECK(relations.defined());
   for (const auto& var : variables) {
-    CHECK(var.dtype().is_int() || var.dtype().is_uint())
+    ICHECK(var.dtype().is_int() || var.dtype().is_uint())
         << "Variables in IntConstraints must be integers";
   }
   node->variables = std::move(variables);
@@ -259,7 +259,7 @@ IntConstraintsTransform::IntConstraintsTransform(IntConstraints src, IntConstrai
 
 IntConstraintsTransform IntConstraintsTransform::operator+(
     const IntConstraintsTransform& other) const {
-  CHECK(other->src.same_as(operator->()->dst));
+  ICHECK(other->src.same_as(operator->()->dst));
   Map<Var, PrimExpr> dst_to_src;
   Map<Var, PrimExpr> src_to_dst;
 
diff --git a/src/arith/int_set.cc b/src/arith/int_set.cc
index 9940d1f..6490f67 100644
--- a/src/arith/int_set.cc
+++ b/src/arith/int_set.cc
@@ -412,7 +412,7 @@ class IntervalSetEvaluator : public ExprFunctor<IntervalSet(const PrimExpr&)> {
   IntervalSet VisitExpr_(const OrNode* op) final { return VisitBinaryExpr_<Or>(op); }
 
   IntervalSet VisitExpr_(const RampNode* op) final {
-    CHECK(eval_vec_);
+    ICHECK(eval_vec_);
     IntervalSet base = Eval(op->base);
     PVar<IntImm> stride;
     if (stride.Match(op->stride)) {
@@ -431,7 +431,7 @@ class IntervalSetEvaluator : public ExprFunctor<IntervalSet(const PrimExpr&)> {
   }
 
   IntervalSet VisitExpr_(const BroadcastNode* op) final {
-    CHECK(eval_vec_);
+    ICHECK(eval_vec_);
     return VisitExpr(op->value);
   }
 
@@ -506,7 +506,7 @@ Range IntSet::CoverRange(Range max_range) const {
   IntSet temp;
   Analyzer analyzer;
   const IntervalSetNode* s_int = (*this).as<IntervalSetNode>();
-  CHECK(s_int != nullptr);
+  ICHECK(s_int != nullptr);
   if (s_int->HasUpperBound() && s_int->HasLowerBound()) {
     return Range::FromMinExtent(s_int->min_value,
                                 analyzer.Simplify(s_int->max_value + 1 - s_int->min_value));
@@ -516,13 +516,13 @@ Range IntSet::CoverRange(Range max_range) const {
 
 PrimExpr IntSet::min() const {
   const IntervalSetNode* s_int = (*this).as<IntervalSetNode>();
-  CHECK(s_int);
+  ICHECK(s_int);
   return s_int->min_value;
 }
 
 PrimExpr IntSet::max() const {
   const IntervalSetNode* s_int = (*this).as<IntervalSetNode>();
-  CHECK(s_int);
+  ICHECK(s_int);
   return s_int->max_value;
 }
 
@@ -584,7 +584,7 @@ SignType IntSet::GetSignType() const {
 }
 PrimExpr IntSet::PointValue() const {
   const IntervalSetNode* s_int = (*this).as<IntervalSetNode>();
-  CHECK(s_int && s_int->IsSinglePoint());
+  ICHECK(s_int && s_int->IsSinglePoint());
   return s_int->min_value;
 }
 
diff --git a/src/arith/ir_mutator_with_analyzer.cc b/src/arith/ir_mutator_with_analyzer.cc
index 8fb69b3..7bc0d94 100644
--- a/src/arith/ir_mutator_with_analyzer.cc
+++ b/src/arith/ir_mutator_with_analyzer.cc
@@ -96,7 +96,7 @@ Stmt IRMutatorWithAnalyzer::VisitStmt_(const IfThenElseNode* op) {
 Stmt IRMutatorWithAnalyzer::VisitStmt_(const AttrStmtNode* op) {
   if (op->attr_key == tir::attr::thread_extent || op->attr_key == tir::attr::virtual_thread) {
     IterVar iv = Downcast<IterVar>(op->node);
-    CHECK_NE(iv->thread_tag.length(), 0U);
+    ICHECK_NE(iv->thread_tag.length(), 0U);
     analyzer_->Bind(iv->var, Range::FromMinExtent(0, op->value));
     Stmt stmt = StmtExprMutator::VisitStmt_(op);
     return stmt;
diff --git a/src/arith/ir_visitor_with_analyzer.h b/src/arith/ir_visitor_with_analyzer.h
index 388720a..058abc8 100644
--- a/src/arith/ir_visitor_with_analyzer.h
+++ b/src/arith/ir_visitor_with_analyzer.h
@@ -44,7 +44,7 @@ class IRVisitorWithAnalyzer final : public StmtExprVisitor {
   void VisitStmt_(const AttrStmtNode* op) {
     if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) {
       IterVar iv = Downcast<IterVar>(op->node);
-      CHECK_NE(iv->thread_tag.length(), 0U);
+      ICHECK_NE(iv->thread_tag.length(), 0U);
       analyzer_.Bind(iv->var, Range::FromMinExtent(0, op->value));
       StmtExprVisitor::VisitStmt_(op);
     } else {
diff --git a/src/arith/iter_affine_map.cc b/src/arith/iter_affine_map.cc
index e56ef2a..283ffa6 100644
--- a/src/arith/iter_affine_map.cc
+++ b/src/arith/iter_affine_map.cc
@@ -336,7 +336,7 @@ class IterMapRewriter : public ExprMutator {
     } else if (const auto* op = expr.as<IterSplitExprNode>()) {
       return IterSumExpr({GetRef<IterSplitExpr>(op)}, make_zero(expr->dtype));
     } else {
-      CHECK(!expr->IsInstance<IterMapExprNode>());
+      ICHECK(!expr->IsInstance<IterMapExprNode>());
       return IterSumExpr({}, expr);
     }
   }
@@ -566,7 +566,7 @@ PrimExpr IterMapRewriter::VisitExpr_(const MulNode* op) {
     MulToLhs(ret.CopyOnWrite(), b);
     return std::move(ret);
   } else {
-    CHECK(a->IsInstance<IterSplitExprNode>());
+    ICHECK(a->IsInstance<IterSplitExprNode>());
     IterSplitExpr ret = Downcast<IterSplitExpr>(std::move(a));
     ret.CopyOnWrite()->scale *= b;
     return std::move(ret);
@@ -639,7 +639,7 @@ PrimExpr IterMapRewriter::VisitExpr_(const FloorDivNode* op) {
       return FloorDiv(a, b);
     }
   } else {
-    CHECK(a->IsInstance<IterSplitExprNode>());
+    ICHECK(a->IsInstance<IterSplitExprNode>());
     IterSplitExpr ret = Downcast<IterSplitExpr>(std::move(a));
     return SplitFloorDivConst(ret, b);
   }
@@ -707,7 +707,7 @@ PrimExpr IterMapRewriter::VisitExpr_(const FloorModNode* op) {
       return FloorMod(a, b);
     }
   } else {
-    CHECK(a->IsInstance<IterSplitExprNode>());
+    ICHECK(a->IsInstance<IterSplitExprNode>());
     IterSplitExpr ret = Downcast<IterSplitExpr>(std::move(a));
     return SplitFloorModConst(ret, b);
   }
diff --git a/src/arith/modular_set.cc b/src/arith/modular_set.cc
index 9826769..ac176b2 100644
--- a/src/arith/modular_set.cc
+++ b/src/arith/modular_set.cc
@@ -67,7 +67,7 @@ struct ModularSetAnalyzer::Entry {
   Entry() = default;
 
   Entry(int64_t coeff, int64_t base) {
-    CHECK_GE(coeff, 0);
+    ICHECK_GE(coeff, 0);
     this->coeff = coeff;
     if (coeff != 0) {
       base = base % coeff;
@@ -93,10 +93,10 @@ class ModularSetAnalyzer::Impl : public ExprFunctor<ModularSetAnalyzer::Entry(co
     if (!allow_override) {
       auto it = var_map_.find(var);
       if (it != var_map_.end()) {
-        CHECK(it->second == info) << "Trying to update var \'" << var << "\'"
-                                  << " with a different const bound: "
-                                  << "original=" << ModularSet(it->second.coeff, it->second.base)
-                                  << ", new=" << info;
+        ICHECK(it->second == info)
+            << "Trying to update var \'" << var << "\'"
+            << " with a different const bound: "
+            << "original=" << ModularSet(it->second.coeff, it->second.base) << ", new=" << info;
       }
     }
     var_map_[var] = Entry(info->coeff, info->base);
@@ -165,7 +165,7 @@ class ModularSetAnalyzer::Impl : public ExprFunctor<ModularSetAnalyzer::Entry(co
 
   Entry DivByConst(const PrimExpr& lhs, int64_t val, bool round_down) {
     Entry a = VisitExpr(lhs);
-    CHECK_NE(val, 0);
+    ICHECK_NE(val, 0);
     if (a.coeff % val == 0) {
       if (a.base == 0) {
         // a c x  / c -> a x
diff --git a/src/arith/pattern_match.h b/src/arith/pattern_match.h
index 78ae446..01baaa8 100644
--- a/src/arith/pattern_match.h
+++ b/src/arith/pattern_match.h
@@ -49,10 +49,10 @@
  *  arith::PVar<Var> v;
  *  // We can match integer and Var, both of which are
  *  // special case container of Expr
- *  CHECK((v * c).Match(tx * 3));
- *  CHECK_EQ(c.Eval()->value, 3);
+ *  ICHECK((v * c).Match(tx * 3));
+ *  ICHECK_EQ(c.Eval()->value, 3);
  *  // cannot match c to ty
- *  CHECK(!(v * c).Match(tx * ty));
+ *  ICHECK(!(v * c).Match(tx * ty));
  *
  * \endcode
  *
@@ -199,7 +199,7 @@ class PVar : public Pattern<PVar<T>> {
   }
 
   T Eval() const {
-    CHECK(filled_);
+    ICHECK(filled_);
     return value_;
   }
 
diff --git a/src/arith/rewrite_simplify.cc b/src/arith/rewrite_simplify.cc
index cb8ef01..a58e443 100644
--- a/src/arith/rewrite_simplify.cc
+++ b/src/arith/rewrite_simplify.cc
@@ -109,9 +109,9 @@ void RewriteSimplifier::Impl::Update(const Var& var, const PrimExpr& info, bool
   if (!can_override) {
     auto it = var_map_.find(var);
     if (it != var_map_.end()) {
-      CHECK(ExprDeepEqual()(it->second, info)) << "Trying to update var \'" << var << "\'"
-                                               << " with a different value: "
-                                               << "original=" << it->second << ", new=" << info;
+      ICHECK(ExprDeepEqual()(it->second, info)) << "Trying to update var \'" << var << "\'"
+                                                << " with a different value: "
+                                                << "original=" << it->second << ", new=" << info;
     }
   }
   var_map_[var] = info;
@@ -222,7 +222,7 @@ std::function<void()> RewriteSimplifier::Impl::EnterConstraint(const PrimExpr& c
   literal_constraints_.push_back(operator()(constraint));
   size_t new_literal_size = literal_constraints_.size();
   auto frecover = [old_literal_size, new_literal_size, this]() {
-    CHECK_EQ(literal_constraints_.size(), new_literal_size);
+    ICHECK_EQ(literal_constraints_.size(), new_literal_size);
     literal_constraints_.resize(old_literal_size);
   };
   return frecover;
@@ -461,8 +461,8 @@ PrimExpr RewriteSimplifier::Impl::VisitExpr_(const DivNode* op) {
 
   // x / 2.0 = x * 0.5
   if (const FloatImmNode* ptr = op->b.as<FloatImmNode>()) {
-    CHECK(op->dtype.is_float() ||
-          datatype::Registry::Global()->GetTypeRegistered(op->dtype.code()));
+    ICHECK(op->dtype.is_float() ||
+           datatype::Registry::Global()->GetTypeRegistered(op->dtype.code()));
     return op->a * make_const(op->b.dtype(), 1.0 / ptr->value);
   }
 
diff --git a/src/arith/solve_linear_equation.cc b/src/arith/solve_linear_equation.cc
index cda1ec2..22bf736 100644
--- a/src/arith/solve_linear_equation.cc
+++ b/src/arith/solve_linear_equation.cc
@@ -42,8 +42,8 @@ void SmithNormalFormDiag(std::vector<std::vector<int64_t>>* S, std::vector<std::
   if (S->empty() || V->empty()) return;
   size_t m = S->size();
   size_t n = (*S)[0].size();  // n is # of variables
-  CHECK_EQ(V->size(), n);
-  CHECK_EQ((*V)[0].size(), n);
+  ICHECK_EQ(V->size(), n);
+  ICHECK_EQ((*V)[0].size(), n);
 
   for (size_t index = 0; index < std::min(m, n); ++index) {
     // Here A is partially diagonalized, that is A[i, j] is zero for all i, j
diff --git a/src/arith/solve_linear_inequality.cc b/src/arith/solve_linear_inequality.cc
index eec916a..f4de9ff 100644
--- a/src/arith/solve_linear_inequality.cc
+++ b/src/arith/solve_linear_inequality.cc
@@ -268,7 +268,7 @@ PartialSolvedInequalities SolveLinearInequalities(const IntConstraints& system_t
 
   Map<Var, IntGroupBounds> res_bounds;
   for (const Var& v : system_to_solve->variables) {
-    CHECK(!res_bounds.count(v))
+    ICHECK(!res_bounds.count(v))
         << "Variable " << v
         << " appears more than one time in the `variables` which might be a bug";
 
@@ -436,7 +436,7 @@ IntConstraints SolveInequalitiesToRange(const IntConstraints& inequalities) {
     analyzer.Bind(vranges);
 
     const Var& var = *it;
-    CHECK(solved_bounds.count(var));
+    ICHECK(solved_bounds.count(var));
     auto bnd = solved_bounds[var];
     if (is_one(bnd->coef) && !bnd->equal.empty()) {
       // There is an equation of the form `v == expr`, so this variable can be completely removed.
diff --git a/src/auto_scheduler/compute_dag.cc b/src/auto_scheduler/compute_dag.cc
index 75fd27e..c6cf094 100755
--- a/src/auto_scheduler/compute_dag.cc
+++ b/src/auto_scheduler/compute_dag.cc
@@ -553,7 +553,7 @@ class FlopEstimator : public ExprFunctor<double(const PrimExpr& n)> {
         if (pop->attrs.count("FLOP")) {
           // Use user-provided FLOP
           auto pint = pop->attrs["FLOP"].as<IntImmNode>();
-          CHECK(pint != nullptr);
+          ICHECK(pint != nullptr);
           ret += pint->value;
         } else {
           // Estimate by parsing the compute body
@@ -719,11 +719,11 @@ class IndexRewriter : public StmtExprMutator {
       for (const auto& arg : op->indices) {
         std::string axis_name;
         if (const auto* int_imm = arg.as<IntImmNode>()) {
-          CHECK_EQ(int_imm->value, 0);
+          ICHECK_EQ(int_imm->value, 0);
           axis_name = "IntImm";
         } else {
           axis_name = AxisBaseName(CleanName(Downcast<Var>(arg)->name_hint));
-          CHECK_EQ(name_to_arg.count(axis_name), 0);
+          ICHECK_EQ(name_to_arg.count(axis_name), 0);
           name_to_arg[axis_name] = arg;
         }
       }
@@ -733,7 +733,7 @@ class IndexRewriter : public StmtExprMutator {
       for (int i = new_names_.size() - 1; i >= 0; --i) {
         auto ori_iter_name = new_names_[i];
         auto name_it = name_to_arg.find(ori_iter_name);
-        CHECK(name_it != name_to_arg.end());
+        ICHECK(name_it != name_to_arg.end());
         PrimExpr ori_arg = name_it->second;
 
         PrimExpr mod_factor = new_shape_[i];
@@ -772,12 +772,12 @@ std::string GetOrigLayout(std::set<std::string>* placeholder_axis_names, const t
   std::ostringstream os;
   uint32_t i = 0;
   const auto& placeholder_op = placeholder->op;
-  CHECK_GT(extractor.read_access.count(placeholder_op), 0);
+  ICHECK_GT(extractor.read_access.count(placeholder_op), 0);
   for (const auto& ev : extractor.read_access[placeholder_op]) {
     for (const auto& e : ev) {
       std::string axis_name;
       if (const auto* int_imm = e.as<IntImmNode>()) {
-        CHECK_EQ(int_imm->value, 0);
+        ICHECK_EQ(int_imm->value, 0);
         axis_name = "IntImm";
       } else {
         axis_name = AxisBaseName(CleanName(Downcast<Var>(e)->name_hint));
@@ -788,7 +788,7 @@ std::string GetOrigLayout(std::set<std::string>* placeholder_axis_names, const t
     }
   }
 
-  CHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size());
+  ICHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size());
   std::string orig_layout = os.str();
   os.str("");
   // TODO(minmin): uncomment this line for relay integration
@@ -837,7 +837,7 @@ std::string GetNewLayout(Array<PrimExpr>* new_shape, const State& state, const i
     ExtractOriginalIterators(iter->name, &ori_iter_names);
     // fused iters have been replaced with iter->orig_iters.
     // So there should be only one ori iter name extracted from iter->name.
-    CHECK_EQ(ori_iter_names.size(), 1);
+    ICHECK_EQ(ori_iter_names.size(), 1);
     auto ori_iter_name = AxisBaseName(*ori_iter_names.begin());
     new_axis_names.push_back(ori_iter_name);
   }
@@ -937,7 +937,7 @@ void ComputeDAG::RewriteLayout(const Array<Step>& transform_steps) {
               new_body.push_back(index_rewriter.Rewrite(body));
             }
             old_compute_op = op;
-            CHECK(!new_compute_op.defined());
+            ICHECK(!new_compute_op.defined());
             new_compute_op = te::ComputeOp(pop->name, pop->tag, pop->attrs, pop->axis, new_body);
           }
         }
@@ -1109,7 +1109,7 @@ String ComputeDAG::PrintStepsAsPython(const Array<Step>& transform_steps) const
 }
 
 State ComputeDAG::InferBound(const State& state) const {
-  CHECK(state->concrete) << "Only concrete state can be processed to get bound info.";
+  ICHECK(state->concrete) << "Only concrete state can be processed to get bound info.";
 
   State ret_state;
   StateNode* pstate;
@@ -1267,7 +1267,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
               ss << ".v" << k;
             }
             if (auto preduce = pop->body[k].as<ReduceNode>()) {
-              CHECK_LT(k, preduce->combiner->result.size());
+              ICHECK_LT(k, preduce->combiner->result.size());
               PrimExpr combiner = preduce->combiner->result[k];
               if (combiner->IsInstance<AddNode>()) {
                 ss << " += " << preduce->source[0] << "\n";
@@ -1300,7 +1300,7 @@ TVM_REGISTER_GLOBAL("auto_scheduler.ComputeDAG")
       if (tensors) {
         return ComputeDAG(tensors.value());
       }
-      CHECK(sch) << "Both tensors and schedule are null";
+      ICHECK(sch) << "Both tensors and schedule are null";
       return ComputeDAG(sch.value());
     });
 
diff --git a/src/auto_scheduler/cost_model.cc b/src/auto_scheduler/cost_model.cc
index 3d540c7..4ed5ca2 100755
--- a/src/auto_scheduler/cost_model.cc
+++ b/src/auto_scheduler/cost_model.cc
@@ -34,7 +34,7 @@ TVM_REGISTER_OBJECT_TYPE(PythonBasedModelNode);
 RandomModel::RandomModel() {
   ObjectPtr<RandomModelNode> node = make_object<RandomModelNode>();
   const auto* f = runtime::Registry::Get("auto_scheduler.cost_model.random_fill_float");
-  CHECK(f != nullptr);
+  ICHECK(f != nullptr);
   node->random_number_func = reinterpret_cast<const TypedPackedFunc<void(size_t, void*)>*>(f);
   data_ = std::move(node);
 }
@@ -109,7 +109,7 @@ void PythonBasedModelNode::PredictStages(const SearchTask& task, const Array<Sta
   // Score of each stage in each states.
   size_t idx = n_states;
   for (size_t i = 0; i < n_states; ++i) {
-    CHECK_LE(idx, flatten_scores.size());
+    ICHECK_LE(idx, flatten_scores.size());
 
     // Number of scored stages of this state.
     int s_length = static_cast<int>(flatten_scores[idx++]);
@@ -134,7 +134,7 @@ void PythonBasedModelNode::PredictStages(const SearchTask& task, const Array<Sta
           scores.push_back(flatten_scores[idx + offset]);
           offset++;
         }
-        CHECK_EQ(offset, s_length);
+        ICHECK_EQ(offset, s_length);
         stage_scores->push_back(std::move(scores));
       }
       idx += s_length;
diff --git a/src/auto_scheduler/feature.cc b/src/auto_scheduler/feature.cc
index 15066a9..8d17c4b 100755
--- a/src/auto_scheduler/feature.cc
+++ b/src/auto_scheduler/feature.cc
@@ -298,7 +298,7 @@ class MathOpCounter : public StmtExprVisitor {
 
   void VisitExpr_(const CallNode* op) final {
     auto* pop = op->op.as<OpNode>();
-    CHECK(pop != nullptr);
+    ICHECK(pop != nullptr);
     auto effect_kind = op_call_effect_[GetRef<Op>(pop)];
     bool is_pure =
         effect_kind == CallEffectKind::kPure || effect_kind == CallEffectKind::kExprAnnotation;
@@ -937,7 +937,7 @@ class PerStoreFeatureExtractor : public StmtExprVisitor {
         while (compute_ops_list[pt] < cur_compute_ops - 1e-4) {
           pt++;
         }
-        CHECK_LT(pt, compute_ops_list.size());
+        ICHECK_LT(pt, compute_ops_list.size());
 
         float value;
         if (pt == 0) {
@@ -1323,7 +1323,7 @@ void GetPerStoreFeaturesWorkerFunc(const SearchTask& task, const State& state, i
         tir::transform::Sequential(Array<tvm::transform::Pass>{tir::transform::Simplify()});
     mod = optimize(std::move(mod));
     const auto& it = mod->functions.find(global_var);
-    CHECK(it != mod->functions.end());
+    ICHECK(it != mod->functions.end());
     const auto& prim_func = (*it).second.as<PrimFuncNode>();
     GetPerStoreFeature(prim_func->body, task->hardware_params->cache_line_bytes, max_n_bufs,
                        feature);
@@ -1389,7 +1389,7 @@ void GetPerStoreFeaturesFromFile(const std::string& filename, int max_lines, int
 
   const auto* workload_key_to_tensors =
       tvm::runtime::Registry::Get("auto_scheduler.workload_key_to_tensors");
-  CHECK(workload_key_to_tensors != nullptr);
+  ICHECK(workload_key_to_tensors != nullptr);
 
   // read from file
   RecordReader reader(filename);
@@ -1454,7 +1454,7 @@ void GetPerStoreFeaturesFromMeasurePairs(const Array<MeasureInput>& inputs,
 
   const auto* workload_key_to_tensors =
       tvm::runtime::Registry::Get("auto_scheduler.workload_key_to_tensors");
-  CHECK(workload_key_to_tensors != nullptr);
+  ICHECK(workload_key_to_tensors != nullptr);
 
   tasks.reserve(inputs.size());
   normalized_throughputs->reserve(inputs.size());
@@ -1548,7 +1548,7 @@ TVMByteArray SerializeFeatures(std::vector<std::vector<float>>&& features,
   size_vector.push_back(static_cast<int>(task_ids.size()));
   total_bytes += sizeof(int) * task_ids.size();
 
-  CHECK_EQ(size_vector.size(), size_vector_size);
+  ICHECK_EQ(size_vector.size(), size_vector_size);
 
   // allocate memory
   out_data->reserve(total_bytes);
@@ -1574,7 +1574,7 @@ TVMByteArray SerializeFeatures(std::vector<std::vector<float>>&& features,
   memmove(ptr, reinterpret_cast<char*>(task_ids.data()), task_ids.size() * sizeof(int));
   ptr += task_ids.size() * sizeof(int);
 
-  CHECK_EQ(ptr - out_data->data(), total_bytes);
+  ICHECK_EQ(ptr - out_data->data(), total_bytes);
 
   return TVMByteArray{out_data->data(), total_bytes};
 }
diff --git a/src/auto_scheduler/loop_state.cc b/src/auto_scheduler/loop_state.cc
index c3c764f..23d6eb6 100755
--- a/src/auto_scheduler/loop_state.cc
+++ b/src/auto_scheduler/loop_state.cc
@@ -114,7 +114,7 @@ void AttachMap::DeleteStage(int stage_id) {
 
 void AttachMap::UpdateIters(const std::vector<IterKey>& original_iters,
                             const std::vector<IterKey>& new_iters) {
-  CHECK_EQ(original_iters.size(), new_iters.size());
+  ICHECK_EQ(original_iters.size(), new_iters.size());
   AttachMapNode* pnode = CopyOnWrite();
   std::unordered_map<IterKey, std::vector<StageKey>> new_iter_to_attached_stages;
   for (size_t i = 0; i < original_iters.size(); ++i) {
@@ -265,8 +265,8 @@ void State::pragma(int stage_id, const Iterator& it, const String& pragma_type)
 
 void State::reorder(int stage_id, const Array<Iterator>& order) {
   const Stage& stage = operator->()->stages[stage_id];
-  CHECK_EQ(order.size(), stage->iters.size()) << "The order of all iterators "
-                                              << "should be specified";
+  ICHECK_EQ(order.size(), stage->iters.size()) << "The order of all iterators "
+                                               << "should be specified";
   Array<Integer> after_ids;
   GetIndices(stage->iters, order, &after_ids);
   ReorderStep step = ReorderStep(stage_id, after_ids);
diff --git a/src/auto_scheduler/measure.cc b/src/auto_scheduler/measure.cc
index c3ee6a1..6c5c10e 100755
--- a/src/auto_scheduler/measure.cc
+++ b/src/auto_scheduler/measure.cc
@@ -303,7 +303,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
         auto old_config = p->stream.precision(4);
         for (size_t i = 0; i < node->costs.size(); ++i) {
           auto pf = node->costs[i].as<FloatImmNode>();
-          CHECK(pf != nullptr);
+          ICHECK(pf != nullptr);
           p->stream << pf->value;
           if (i != node->costs.size() - 1) {
             p->stream << ",";
diff --git a/src/auto_scheduler/measure_record.cc b/src/auto_scheduler/measure_record.cc
index 99c01b1..66f521e 100755
--- a/src/auto_scheduler/measure_record.cc
+++ b/src/auto_scheduler/measure_record.cc
@@ -53,7 +53,7 @@ struct Handler<::tvm::Array<::tvm::auto_scheduler::Stage>> {
     bool s;
     reader->BeginArray();
     s = reader->NextArrayItem();
-    CHECK(!s);
+    ICHECK(!s);
   }
 };
 
@@ -80,7 +80,7 @@ struct Handler<::tvm::Array<::tvm::auto_scheduler::Step>> {
       reader->BeginArray();
       data->push_back(::tvm::auto_scheduler::StepReadFromRecord(reader));
       s = reader->NextArrayItem();
-      CHECK(!s);
+      ICHECK(!s);
     }
   }
 };
@@ -97,13 +97,13 @@ struct Handler<::tvm::auto_scheduler::StateNode> {
     bool s;
     reader->BeginArray();
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&data->stages);
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&data->transform_steps);
     s = reader->NextArrayItem();
-    CHECK(!s);
+    ICHECK(!s);
   }
 };
 
@@ -121,15 +121,15 @@ struct Handler<::tvm::auto_scheduler::SearchTaskNode> {
     std::string str_value;
     reader->BeginArray();
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&str_value);
     data->workload_key = std::move(str_value);
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&str_value);
     data->target = ::tvm::Target(str_value);
     s = reader->NextArrayItem();
-    CHECK(!s);
+    ICHECK(!s);
   }
 };
 
@@ -150,13 +150,13 @@ struct Handler<::tvm::auto_scheduler::MeasureInputNode> {
     bool s;
     reader->BeginArray();
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(task_node.get());
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(state_node.get());
     s = reader->NextArrayItem();
-    CHECK(!s);
+    ICHECK(!s);
 
     data->task = ::tvm::auto_scheduler::SearchTask(task_node);
     data->state = ::tvm::auto_scheduler::State(state_node);
@@ -172,7 +172,7 @@ struct Handler<::tvm::auto_scheduler::MeasureResultNode> {
     writer->BeginArray(false);
     for (const auto& x : data.costs) {
       auto pf = x.as<::tvm::tir::FloatImmNode>();
-      CHECK(pf != nullptr) << "Cost can only contain float values";
+      ICHECK(pf != nullptr) << "Cost can only contain float values";
       writer->WriteArrayItem(pf->value);
     }
     writer->EndArray();
@@ -187,23 +187,23 @@ struct Handler<::tvm::auto_scheduler::MeasureResultNode> {
     bool s;
     reader->BeginArray();
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&double_list);
     data->costs.clear();
     for (const auto& i : double_list) {
       data->costs.push_back(::tvm::FloatImm(::tvm::DataType::Float(64), i));
     }
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&data->error_no);
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&data->all_cost);
     s = reader->NextArrayItem();
-    CHECK(s);
+    ICHECK(s);
     reader->Read(&data->timestamp);
     s = reader->NextArrayItem();
-    CHECK(!s);
+    ICHECK(!s);
   }
 };
 
diff --git a/src/auto_scheduler/search_policy/empty_policy.cc b/src/auto_scheduler/search_policy/empty_policy.cc
index fba1ac2..79f9879 100644
--- a/src/auto_scheduler/search_policy/empty_policy.cc
+++ b/src/auto_scheduler/search_policy/empty_policy.cc
@@ -57,7 +57,7 @@ State EmptyPolicyNode::Search(int num_measure_trials, int early_stopping,
   // Measure is disabled if num_measure_trials <= 1
   if (num_measure_trials <= 1) {
     const auto& res = SearchOneRound();
-    CHECK_GT(res.size(), 0);
+    ICHECK_GT(res.size(), 0);
 
     return res[0];
   } else {
diff --git a/src/auto_scheduler/search_policy/search_policy.cc b/src/auto_scheduler/search_policy/search_policy.cc
index 8b6d22b..702eec0 100644
--- a/src/auto_scheduler/search_policy/search_policy.cc
+++ b/src/auto_scheduler/search_policy/search_policy.cc
@@ -39,7 +39,7 @@ void SearchPolicyNode::PreloadMeasuredStates(const String& log_file) {
   RecordReader reader = RecordReader(log_file);
   const auto& res = reader->ReadLines(-1);
   size_t log_size = res.first.size();
-  CHECK_EQ(log_size, res.second.size());
+  ICHECK_EQ(log_size, res.second.size());
   if (log_size) {
     Array<State> measured_states;
     std::vector<float> measured_throughputs;
diff --git a/src/auto_scheduler/search_policy/sketch_policy.cc b/src/auto_scheduler/search_policy/sketch_policy.cc
index 60178b3..5d6d1d2 100644
--- a/src/auto_scheduler/search_policy/sketch_policy.cc
+++ b/src/auto_scheduler/search_policy/sketch_policy.cc
@@ -147,7 +147,7 @@ State SketchPolicyNode::Search(int n_trials, int early_stopping, int num_measure
   if (n_trials <= 1) {
     // No measurement is allowed
     const Array<State>& best_states = SearchOneRound(0);
-    CHECK_GT(best_states.size(), 0);
+    ICHECK_GT(best_states.size(), 0);
     return best_states[0];
   } else {
     int num_random =
@@ -348,10 +348,10 @@ Array<State> SketchPolicyNode::GenerateSketches() {
     auto pstate = state.CopyOnWrite();
     for (size_t step_id = 0; step_id < pstate->transform_steps.size(); ++step_id) {
       if (pstate->transform_steps[step_id]->IsInstance<RfactorStepNode>()) {
-        CHECK_GE(step_id, 1);
+        ICHECK_GE(step_id, 1);
         int split_step_id = static_cast<int>(step_id - 1);
         auto step = pstate->transform_steps[split_step_id].as<SplitStepNode>();
-        CHECK(step != nullptr);
+        ICHECK(step != nullptr);
         pstate->transform_steps.Set(
             split_step_id, SplitStep(step->stage_id, step->iter_id, step->extent, {NullOpt},
                                      step->inner_to_outer));
diff --git a/src/auto_scheduler/search_policy/sketch_policy_rules.cc b/src/auto_scheduler/search_policy/sketch_policy_rules.cc
index 1b965c9..1b6cc06 100644
--- a/src/auto_scheduler/search_policy/sketch_policy_rules.cc
+++ b/src/auto_scheduler/search_policy/sketch_policy_rules.cc
@@ -115,7 +115,8 @@ SketchGenerationRule::ConditionKind RuleMultiLevelTilingWithFusion::MeetConditio
 std::vector<std::pair<State, int>> RuleMultiLevelTilingWithFusion::Apply(
     const SketchPolicyNode& policy, const State& state, int stage_id) const {
   int target_stage_id;
-  CHECK(HasSingleElementwiseMatchedConsumer(policy.search_task, state, stage_id, &target_stage_id));
+  ICHECK(
+      HasSingleElementwiseMatchedConsumer(policy.search_task, state, stage_id, &target_stage_id));
   const std::string& multi_level_tiling_structure =
       IsGPUTask(policy.search_task)
           ? GetStringParam(policy.params, SketchParamKey::MultiLevelTiling::gpu_structure)
@@ -296,7 +297,7 @@ std::vector<std::pair<State, int>> RuleSimplifyComputeWithConstTensor::Apply(
       unrolled_inner_iters.push_back(tmp_s.unroll(stage_id, iter));
     } else {
       // tile other space indices
-      CHECK(iter->iter_kind == IteratorKind::kSpatial);
+      ICHECK(iter->iter_kind == IteratorKind::kSpatial);
       tiled_outer_iters.push_back(
           tmp_s.split(stage_id, iter, Array<Optional<Integer>>(tile_level - 1, NullOpt)));
     }
@@ -319,7 +320,7 @@ std::vector<std::pair<State, int>> RuleSimplifyComputeWithConstTensor::Apply(
 
 SketchGenerationRule::ConditionKind RuleCrossThreadReduction::MeetCondition(
     const SketchPolicyNode& policy, const State& state, int stage_id) const {
-  CHECK(IsGPUTask(policy.search_task));
+  ICHECK(IsGPUTask(policy.search_task));
 
   // If it is an intermediate state created by RuleAddCacheWrite,
   // we just skip it.
@@ -386,14 +387,14 @@ std::vector<std::pair<State, int>> RuleCrossThreadReduction::Apply(const SketchP
       // If the target stage does not have split step,
       // it must be a simple stage without reduce iters.
       // We then should do a split for it.
-      CHECK(!HasReduceIter(target_stage));
+      ICHECK(!HasReduceIter(target_stage));
       const auto& split_res = tmp_s.split(target_stage_id, target_stage->iters.back(),
                                           {Integer(task->hardware_params->warp_size)});
       tmp_s.bind(target_stage_id, split_res[1], IteratorAnnotation::kThreadX);
       split_step_ids.push_back(tmp_s->transform_steps.size() - 2);
     }
 
-    CHECK_EQ(split_step_ids.size(), 1);
+    ICHECK_EQ(split_step_ids.size(), 1);
 
     const Iterator& target_iter = tmp_s->stages[target_stage_id]->iters[num_common_outer - 1];
     const auto& split_res = tmp_s.follow_split(stage_id, fused_reduce_iter, split_step_ids[0], 1);
@@ -429,13 +430,13 @@ std::vector<std::pair<State, int>> RuleSpecialComputeLocationGPU::Apply(
     const SketchPolicyNode& policy, const State& state, int stage_id) const {
   State tmp_s = state;
   const std::set<int>& consumers = GetConsumers(policy.search_task, state, stage_id);
-  CHECK_EQ(consumers.size(), 1);
+  ICHECK_EQ(consumers.size(), 1);
 
   // Get the last outer space iterator that is not unrolled.
   const Stage& target_stage = state->stages[*consumers.begin()];
   for (size_t i = 0; i < target_stage->iters.size(); ++i) {
     if (target_stage->iters[i]->annotation == IteratorAnnotation::kUnroll) {
-      CHECK_GT(i, 0);
+      ICHECK_GT(i, 0);
 
       tmp_s.compute_at(stage_id, *consumers.begin(), target_stage->iters[i - 1]);
       break;
@@ -467,7 +468,7 @@ PopulationGenerationRule::ResultKind InitFillTileSize::Apply(SketchPolicyNode* p
         continue;
       }
 
-      CHECK(ps->extent);
+      ICHECK(ps->extent);
       int extent = GetIntImm(ps->extent.value());
       const auto& candidate_lens = policy->split_memo.GetFactorizationSchemes(
           extent, ps->lengths.size(), max_innermost_split_factor);
@@ -720,10 +721,10 @@ PopulationGenerationRule::ResultKind InitThreadBind::Apply(SketchPolicyNode* pol
       } else if (stage->compute_at != ComputeAtKind::kIter) {
         // This stage is not multi-level tiled,
         // so it must be produced by RuleCrossThreadReduction.
-        CHECK(HasCrossThreadReduction(*state, stage_id));
+        ICHECK(HasCrossThreadReduction(*state, stage_id));
       } else {
         const auto res = (*state)->attach_map->stage_to_attach_iter.find(stage_id);
-        CHECK(res != (*state)->attach_map->stage_to_attach_iter.end());
+        ICHECK(res != (*state)->attach_map->stage_to_attach_iter.end());
         multi_level_tiling_root_set.insert(res->second.first);
       }
     }
@@ -782,9 +783,9 @@ PopulationGenerationRule::ResultKind InitThreadBind::Apply(SketchPolicyNode* pol
       std::vector<Iterator> to_fuse;
       int total_space_extent = 1;
       for (const auto& i : pop->root_iter_vars()) {
-        CHECK(i->dom.defined());
+        ICHECK(i->dom.defined());
         const auto& pint = i->dom->extent.as<IntImmNode>();
-        CHECK(pint);
+        ICHECK(pint);
         total_space_extent *= pint->value;
       }
 
@@ -847,7 +848,7 @@ PopulationGenerationRule::ResultKind InitThreadBind::Apply(SketchPolicyNode* pol
       // Do cooperative fetching for the cache read stage.
       // Get spatial_split_step_ids from the root stage
       const auto& it = (*state)->attach_map->stage_to_attach_iter.find(stage_id);
-      CHECK(it != (*state)->attach_map->stage_to_attach_iter.end());
+      ICHECK(it != (*state)->attach_map->stage_to_attach_iter.end());
       Array<Integer> spatial_split_step_ids = GetSpatialSplitStepIds(*state, it->second.first);
 
       // Fuse all iterators to do cooperative fetching
@@ -897,7 +898,7 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol
   do {
     step_id = split_step_ids[(*rand_gen)() % split_step_ids.size()];
     ps = (*state)->transform_steps[step_id].as<SplitStepNode>();
-    CHECK(ps != nullptr);
+    ICHECK(ps != nullptr);
     extent = GetIntImm(ps->extent.value());
     retry_ct += 1;
   } while (retry_ct < static_cast<int>(split_step_ids.size()) << 2 && (extent == 1 || extent == 0));
@@ -929,7 +930,7 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol
     // Divide one factor from lengths[src_idx] and multiply it to lengths[dst_idx]
     size_t dst_idx = random_perm[(i + 1) % random_perm.size()];
     const std::vector<int>& factors = policy->split_memo.GetFactors(length);
-    CHECK_GE(factors.size(), 1);
+    ICHECK_GE(factors.size(), 1);
 
     int divide_factor;
     if (dst_idx == lengths.size() - 1) {
@@ -961,7 +962,7 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol
       }
     }
 
-    CHECK_LE(GetIntImm(new_lengths.back()), max_innermost_split_factor);
+    ICHECK_LE(GetIntImm(new_lengths.back()), max_innermost_split_factor);
 
     StateNode* pstate = state->CopyOnWrite();
     pstate->transform_steps.Set(
@@ -994,7 +995,7 @@ PopulationGenerationRule::ResultKind MutateAutoUnroll::Apply(SketchPolicyNode* p
   // Randomly pick up an auto unroll pragma step
   auto step_id = pragma_steps[(*rand_gen)() % pragma_steps.size()];
   auto ps = (*state)->transform_steps[step_id].as<PragmaStepNode>();
-  CHECK(ps);
+  ICHECK(ps);
 
   // Mutate its value to a random candidates
   auto val = std::to_string(auto_unroll_configs[(*rand_gen)() % auto_unroll_configs.size()]);
@@ -1035,7 +1036,7 @@ PopulationGenerationRule::ResultKind MutateComputeLocation::Apply(SketchPolicyNo
   size_t step_id = compute_at_steps[(*rand_gen)() % compute_at_steps.size()];
   auto ps = (*state)->transform_steps[step_id].as<ComputeAtStepNode>();
   int stage_inc = GetTargetStageIDInState(*state, step_id) - ps->stage_id;
-  CHECK(ps != nullptr);
+  ICHECK(ps != nullptr);
 
   // Randomly pick a new computation location
   std::vector<std::pair<int, int>> candidates =
@@ -1156,14 +1157,14 @@ PopulationGenerationRule::ResultKind MutateParallel::Apply(SketchPolicyNode* pol
         if (ps->iter_id == 0) {
           step = AnnotationStep(ps->stage_id, 0, ps->annotation);
         } else {
-          CHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size());
+          ICHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size());
           step = AnnotationStep(ps->stage_id, ps->iter_id + iter_offset, ps->annotation);
         }
       } else if (auto ps = step.as<PragmaStepNode>()) {
         if (ps->iter_id == 0) {
           step = PragmaStep(ps->stage_id, 0, ps->pragma_type);
         } else {
-          CHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size());
+          ICHECK_LE(ps->iter_id + iter_offset, tmp_s->stages[stage_id]->iters.size());
           step = PragmaStep(ps->stage_id, ps->iter_id + iter_offset, ps->pragma_type);
         }
       } else {
diff --git a/src/auto_scheduler/search_policy/utils.cc b/src/auto_scheduler/search_policy/utils.cc
index 9e72eeb..3e2f7aa 100644
--- a/src/auto_scheduler/search_policy/utils.cc
+++ b/src/auto_scheduler/search_policy/utils.cc
@@ -32,7 +32,7 @@ namespace auto_scheduler {
 Array<Integer> GetSpatialSplitStepIds(const State& s, int stage_id) {
   const auto& stage = s->stages[stage_id];
   const auto& pop = s->stages[stage_id]->op.as<te::ComputeOpNode>();
-  CHECK(pop != nullptr);
+  ICHECK(pop != nullptr);
   const std::set<std::string>& no_split_at_inner_name_set =
       stage->op->attrs.count(SearchPolicyKey::no_split_at_inner)
           ? GetIterNameSetParam(stage->op->attrs, SearchPolicyKey::no_split_at_inner)
@@ -182,7 +182,7 @@ State DoMultiLevelTiling(const State& state, int stage_id, const std::string& fo
   for (const auto& iter : state->stages[stage_id]->iters) {
     if (!no_split_at_inner_name_set.count(iter->name)) {
       if (iter->iter_kind == IteratorKind::kSpatial) {
-        CHECK_GE(n_space, 1);
+        ICHECK_GE(n_space, 1);
 
         if (n_space == 1) {
           space_levels[0].push_back(iter);
@@ -194,7 +194,7 @@ State DoMultiLevelTiling(const State& state, int stage_id, const std::string& fo
           spatial_split_step_ids->push_back(tmp_s->transform_steps.size() - 1);
         }
       } else if (iter->iter_kind == IteratorKind::kReduction) {
-        CHECK_GE(n_reduce, 1);
+        ICHECK_GE(n_reduce, 1);
 
         if (n_reduce == 1) {
           reduce_levels[0].push_back(iter);
@@ -219,26 +219,26 @@ State DoMultiLevelTiling(const State& state, int stage_id, const std::string& fo
   }
 
   if (!space_outer.empty()) {
-    CHECK(!space_levels.empty());
+    ICHECK(!space_levels.empty());
     space_levels.front().insert(space_levels.front().begin(),
                                 std::make_move_iterator(space_outer.begin()),
                                 std::make_move_iterator(space_outer.end()));
   }
   if (!space_inner.empty()) {
-    CHECK(!space_levels.empty());
+    ICHECK(!space_levels.empty());
     space_levels.back().insert(space_levels.back().begin(),
                                std::make_move_iterator(space_inner.begin()),
                                std::make_move_iterator(space_inner.end()));
   }
 
   if (!reduce_outer.empty()) {
-    CHECK(!reduce_levels.empty());
+    ICHECK(!reduce_levels.empty());
     reduce_levels.front().insert(reduce_levels.front().begin(),
                                  std::make_move_iterator(reduce_outer.begin()),
                                  std::make_move_iterator(reduce_outer.end()));
   }
   if (!reduce_inner.empty()) {
-    CHECK(!reduce_levels.empty());
+    ICHECK(!reduce_levels.empty());
     reduce_levels.back().insert(reduce_levels.back().begin(),
                                 std::make_move_iterator(reduce_inner.begin()),
                                 std::make_move_iterator(reduce_inner.end()));
@@ -274,7 +274,7 @@ State FollowTiling(const State& state, int stage_id, const std::vector<int>& spl
   Array<Iterator> split_res;
 
   auto pop = state->stages[stage_id]->op.as<te::ComputeOpNode>();
-  CHECK(pop != nullptr);
+  ICHECK(pop != nullptr);
   const Stage& stage = state->stages[stage_id];
   const std::set<std::string>& no_split_at_inner_name_set =
       stage->op->attrs.count(SearchPolicyKey::no_split_at_inner)
@@ -285,8 +285,8 @@ State FollowTiling(const State& state, int stage_id, const std::vector<int>& spl
     no_split_at_inner_name_in_stage_cnt += no_split_at_inner_name_set.count(iter->name);
   }
 
-  CHECK_EQ(state->stages[stage_id]->iters.size() - no_split_at_inner_name_in_stage_cnt,
-           split_step_ids.size());
+  ICHECK_EQ(state->stages[stage_id]->iters.size() - no_split_at_inner_name_in_stage_cnt,
+            split_step_ids.size());
 
   State tmp_s = state;
   int ct = 0;
@@ -328,7 +328,7 @@ State FollowTiling(const State& state, int stage_id, const std::vector<int>& spl
           } else if (n_split == 2) {
             space_2.push_back(iter);
           } else {
-            CHECK_EQ(n_split, 3);
+            ICHECK_EQ(n_split, 3);
             space_3.push_back(iter);
           }
         }
diff --git a/src/auto_scheduler/search_policy/utils.h b/src/auto_scheduler/search_policy/utils.h
index 5c015ca..f0c4cbc 100644
--- a/src/auto_scheduler/search_policy/utils.h
+++ b/src/auto_scheduler/search_policy/utils.h
@@ -99,29 +99,29 @@ inline int OperationToStage(const te::Operation& op, const State& state) {
 
 /*! \brief Get an integer from a tvm str Map. */
 inline int GetIntParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) {
-  CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
+  ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
   auto pint = attr_dict[key].as<IntImmNode>();
-  CHECK(pint != nullptr);
+  ICHECK(pint != nullptr);
   return pint->value;
 }
 
 /*! \brief Get a double from a tvm str Map. */
 inline double GetDoubleParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) {
-  CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
+  ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
   auto pdouble = attr_dict[key].as<FloatImmNode>();
-  CHECK(pdouble != nullptr);
+  ICHECK(pdouble != nullptr);
   return pdouble->value;
 }
 
 /*! \brief Get a string from a tvm str Map. */
 inline std::string GetStringParam(const Map<String, ObjectRef>& attr_dict, const std::string& key) {
-  CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
+  ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
   const auto& target = attr_dict[key];
   if (auto pstr = target.as<StringImmNode>()) {
     return pstr->value;
   }
   auto pstr = target.as<StringObj>();
-  CHECK(pstr != nullptr);
+  ICHECK(pstr != nullptr);
   return pstr->data;
 }
 
@@ -129,9 +129,9 @@ inline std::string GetStringParam(const Map<String, ObjectRef>& attr_dict, const
 inline std::set<std::string> GetIterNameSetParam(const Map<String, ObjectRef>& attr_dict,
                                                  const std::string& key) {
   std::set<std::string> ret;
-  CHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
+  ICHECK_GT(attr_dict.count(key), 0) << "Cannot find key: \"" << key << "\" in " << attr_dict;
   auto names = attr_dict[key].as<ArrayNode>();
-  CHECK(names != nullptr);
+  ICHECK(names != nullptr);
   for (const auto& name : *names) {
     ret.insert(name.as<StringObj>()->data);
   }
@@ -477,7 +477,7 @@ inline bool HasCrossThreadReduction(const State& state, int stage_id) {
 /*! \brief Return whether the stage has been tiled already. */
 inline bool IsTiled(const Stage& stage) {
   auto op = stage->op.as<te::ComputeOpNode>();
-  CHECK(op != nullptr);
+  ICHECK(op != nullptr);
   return stage->iters.size() != op->axis.size() + op->reduce_axis.size();
 }
 
@@ -502,7 +502,7 @@ inline void ExtractOriginalIterators(const std::string& name, std::set<std::stri
 /*! \brief Get the last reduce iterator in the outermost reduce tile. */
 inline Iterator GetLastReduceIteratorInOutermostReduceTile(const Stage& stage) {
   auto pop = stage->op.as<te::ComputeOpNode>();
-  CHECK(pop != nullptr);
+  ICHECK(pop != nullptr);
   std::set<std::string> original_names;
 
   const std::set<std::string>& no_split_at_inner_name_set =
@@ -583,7 +583,7 @@ inline State FuseAllReductionIterators(const State& state, int stage_id, Iterato
     }
   }
 
-  CHECK(!reduce_iters->empty());
+  ICHECK(!reduce_iters->empty());
   State tmp_s = state;
   if (reduce_iters->size() > 1) {
     *fused_iter = tmp_s.fuse(stage_id, *reduce_iters);
@@ -609,7 +609,7 @@ inline State FuseAllOuterSpaceIterators(const State& state, int stage_id, Iterat
     to_fuse.push_back(it);
   }
 
-  CHECK(!to_fuse.empty());
+  ICHECK(!to_fuse.empty());
   State tmp_s = state;
   if (to_fuse.size() > 1) {
     *fused_iter = tmp_s.fuse(stage_id, to_fuse);
@@ -649,7 +649,7 @@ inline int RandomChoose(const std::vector<double>& prefix_sum_probs, std::mt1993
   std::uniform_real_distribution<> dis(0.0, 1.0);
   double x = dis(*random_gen);
 
-  CHECK(!prefix_sum_probs.empty());
+  ICHECK(!prefix_sum_probs.empty());
 
   return std::lower_bound(prefix_sum_probs.begin(), prefix_sum_probs.end(), x) -
          prefix_sum_probs.begin();
diff --git a/src/auto_scheduler/search_task.cc b/src/auto_scheduler/search_task.cc
index e3f35e9..0b85a03 100755
--- a/src/auto_scheduler/search_task.cc
+++ b/src/auto_scheduler/search_task.cc
@@ -53,7 +53,7 @@ HardwareParams HardwareParamsNode::GetDefaultHardwareParams(const Target& target
 
     auto ctx = TVMContext{kDLGPU, 0};
     auto func = tvm::runtime::Registry::Get("device_api.gpu");
-    CHECK(func != nullptr) << "Cannot find GPU device_api in registry";
+    ICHECK(func != nullptr) << "Cannot find GPU device_api in registry";
     auto device_api = static_cast<tvm::runtime::DeviceAPI*>(((*func)()).operator void*());
 
     tvm::runtime::TVMRetValue ret;
diff --git a/src/auto_scheduler/transform_step.cc b/src/auto_scheduler/transform_step.cc
index 73f6734..852f1e1 100755
--- a/src/auto_scheduler/transform_step.cc
+++ b/src/auto_scheduler/transform_step.cc
@@ -27,6 +27,7 @@
 #include <tvm/auto_scheduler/loop_state.h>
 #include <tvm/auto_scheduler/transform_step.h>
 #include <tvm/runtime/registry.h>
+#include <tvm/support/logging.h>
 #include <tvm/te/operation.h>
 
 #include <string>
@@ -43,7 +44,7 @@ struct Handler<::tvm::Array<::tvm::Integer>> {
   inline static void Write(dmlc::JSONWriter* writer, const ::tvm::Array<::tvm::Integer>& array) {
     writer->BeginArray(false);
     for (const auto& i : array) {
-      CHECK(i.defined());
+      ICHECK(i.defined());
       writer->WriteArrayItem(i->value);
     }
     writer->EndArray();
@@ -65,7 +66,7 @@ struct Handler<::tvm::Array<::tvm::Optional<::tvm::Integer>>> {
                            const ::tvm::Array<::tvm::Optional<::tvm::Integer>>& array) {
     writer->BeginArray(false);
     for (const auto& i : array) {
-      CHECK(i);
+      ICHECK(i);
       writer->WriteArrayItem(i.value()->value);
     }
     writer->EndArray();
@@ -125,7 +126,7 @@ Step StepReadFromRecord(dmlc::JSONReader* reader) {
   std::string name;
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&name);
   if (name == AnnotationStepNode::record_prefix_str) {
     return AnnotationStep(reader);
@@ -283,13 +284,13 @@ AnnotationStep::AnnotationStep(dmlc::JSONReader* reader) {
   auto node = make_object<AnnotationStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   int int_val;
   reader->Read(&int_val);
   node->annotation = IteratorAnnotation(int_val);
@@ -308,7 +309,7 @@ Iterator AnnotationStepNode::ApplyToState(State* state) const {
   const Stage& stage = (*state)->stages[stage_id];
   Iterator it = stage->iters[iter_id];
 
-  CHECK(it->annotation == IteratorAnnotation::kNone);
+  ICHECK(it->annotation == IteratorAnnotation::kNone);
   Iterator new_it = Iterator(it->name, it->range, it->iter_kind, annotation, &it->orig_iters);
   Stage new_stage = stage;
   new_stage.CopyOnWrite()->iters.Set(iter_id, new_it);
@@ -410,7 +411,7 @@ FuseStep::FuseStep(int stage_id, const Array<Integer>& fused_ids) {
   auto node = make_object<FuseStepNode>();
   node->stage_id = stage_id;
   for (const auto& x : fused_ids) {
-    CHECK(x->IsInstance<IntImmNode>());
+    ICHECK(x->IsInstance<IntImmNode>());
   }
   node->fused_ids = fused_ids;
   data_ = std::move(node);
@@ -420,10 +421,10 @@ FuseStep::FuseStep(dmlc::JSONReader* reader) {
   auto node = make_object<FuseStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->fused_ids);
   data_ = std::move(node);
 }
@@ -446,7 +447,7 @@ Iterator FuseStepNode::ApplyToState(State* state) const {
 
   for (size_t i = 0; i < fused_ids.size(); ++i) {
     if (i > 0) {
-      CHECK_EQ(fused_ids[i]->value, fused_ids[i - 1]->value + 1);
+      ICHECK_EQ(fused_ids[i]->value, fused_ids[i - 1]->value + 1);
     }
 
     if (i != fused_ids.size() - 1) {
@@ -574,13 +575,13 @@ PragmaStep::PragmaStep(dmlc::JSONReader* reader) {
   auto node = make_object<PragmaStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   std::string string_value;
   reader->Read(&string_value);
   node->pragma_type = std::move(string_value);
@@ -609,7 +610,7 @@ void PragmaStepNode::ApplyToState(State* state) const {
         break;
       }
     }
-    CHECK_LT(pos, pragma_type.size()) << "max step value not found.";
+    ICHECK_LT(pos, pragma_type.size()) << "max step value not found.";
     stage.CopyOnWrite()->attrs.auto_unroll_max_step = atoi(pragma_type.c_str() + pos + 1);
     pstate->stages.Set(stage_id, std::move(stage));
   } else {
@@ -628,7 +629,7 @@ void PragmaStepNode::ApplyToSchedule(Array<te::Stage>* stages,
         break;
       }
     }
-    CHECK_LT(pos, pragma_type.size()) << "max step value not found.";
+    ICHECK_LT(pos, pragma_type.size()) << "max step value not found.";
     int value = atoi(pragma_type.c_str() + pos + 1);
     stage.pragma(axes[iter_id], "auto_unroll_max_step", value);
     stage.pragma(axes[iter_id], "unroll_explicit", true);
@@ -651,7 +652,7 @@ String PragmaStepNode::PrintAsPythonAPI(Array<te::Stage>* stages,
         break;
       }
     }
-    CHECK_LT(pos, pragma_type.size()) << "max step value not found.";
+    ICHECK_LT(pos, pragma_type.size()) << "max step value not found.";
     int value = atoi(pragma_type.c_str() + pos + 1);
     ss << "s[" << op_name << "].pragma("
        << CleanName((*stage_to_axes)[stage][iter_id]->var->name_hint, op_name)
@@ -674,7 +675,7 @@ ReorderStep::ReorderStep(int stage_id, const Array<Integer>& after_ids) {
   auto node = make_object<ReorderStepNode>();
   node->stage_id = stage_id;
   for (const auto& x : after_ids) {
-    CHECK(x->IsInstance<IntImmNode>());
+    ICHECK(x->IsInstance<IntImmNode>());
   }
   node->after_ids = after_ids;
   data_ = std::move(node);
@@ -684,10 +685,10 @@ ReorderStep::ReorderStep(dmlc::JSONReader* reader) {
   auto node = make_object<ReorderStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->after_ids);
   data_ = std::move(node);
 }
@@ -713,7 +714,7 @@ void ReorderStepNode::ApplyToSchedule(Array<te::Stage>* stages,
                                       StageToAxesMap* stage_to_axes) const {
   auto stage = (*stages)[stage_id];
   const Array<IterVar>& axes = stage_to_axes->at(stage);
-  CHECK_EQ(after_ids.size(), axes.size());
+  ICHECK_EQ(after_ids.size(), axes.size());
 
   Array<IterVar> new_axes;
   new_axes.reserve(axes.size());
@@ -879,7 +880,7 @@ String PrintSplitAsPythonAPI(Array<te::Stage>* stages, StageToAxesMap* stage_to_
   const auto& func_name = CleanName(stage->op->name);
   const auto& outs =
       ApplySplitToSchedule(stages, stage_to_axes, stage_id, iter_id, lengths, inner_to_outer);
-  CHECK_EQ(outs.size(), lengths.size() + 1);
+  ICHECK_EQ(outs.size(), lengths.size() + 1);
 
   std::stringstream ss;
   int size = static_cast<int>(lengths.size());
@@ -921,23 +922,23 @@ SplitStep::SplitStep(dmlc::JSONReader* reader) {
   auto node = make_object<SplitStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   int int_val;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&int_val);
   if (int_val) {
     node->extent = Integer(int_val);
   }
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->lengths);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->inner_to_outer);
   data_ = std::move(node);
 }
@@ -988,14 +989,14 @@ void FollowSplitStepNode::WriteToRecord(dmlc::JSONWriter* writer) const {
 Array<Optional<Integer>> FollowSplitStepNode::ExtractSplitLengths(
     const Array<Step>& transform_steps) const {
   // Make sure src_step_id is within the range of transform_steps.
-  CHECK_LT(src_step_id, transform_steps.size());
+  ICHECK_LT(src_step_id, transform_steps.size());
   auto ps = transform_steps[src_step_id].as<SplitStepNode>();
-  CHECK(ps != nullptr);
+  ICHECK(ps != nullptr);
 
   // Make sure the size of ps->lengths is not smaller than n_split-1.
   // Note that the number of actual splitting factors of src_step is ps->lengths.size()+1.
-  CHECK_LE(n_split, ps->lengths.size() + 1);
-  CHECK(ps != nullptr);
+  ICHECK_LE(n_split, ps->lengths.size() + 1);
+  ICHECK(ps != nullptr);
 
   Array<Optional<Integer>> lengths;
   lengths.reserve(n_split);
@@ -1029,16 +1030,16 @@ FollowSplitStep::FollowSplitStep(dmlc::JSONReader* reader) {
   auto node = make_object<FollowSplitStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->src_step_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->n_split);
   data_ = std::move(node);
 }
@@ -1079,19 +1080,19 @@ FollowFusedSplitStep::FollowFusedSplitStep(dmlc::JSONReader* reader) {
   auto node = make_object<FollowFusedSplitStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->src_step_ids);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->level);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->factor_or_nparts);
   data_ = std::move(node);
 }
@@ -1112,9 +1113,9 @@ Optional<Integer> FollowFusedSplitStepNode::ExtractSplitLength(
 
   for (int src_step_id : src_step_ids) {
     // Make sure the src_step_id is within the range of transform_steps.
-    CHECK_LT(src_step_id, transform_steps.size());
+    ICHECK_LT(src_step_id, transform_steps.size());
     auto ps = transform_steps[src_step_id].as<SplitStepNode>();
-    CHECK(ps != nullptr);
+    ICHECK(ps != nullptr);
     // Multiple the splitting factor on corresponding splitting level of src_steps.
     if (ps->lengths[level] && ret.defined()) {
       ret *= ps->lengths[level].value();
@@ -1158,16 +1159,16 @@ StorageAlignStep::StorageAlignStep(dmlc::JSONReader* reader) {
   auto node = make_object<StorageAlignStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->factor);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->offset);
   data_ = std::move(node);
 }
@@ -1224,13 +1225,13 @@ ComputeAtStep::ComputeAtStep(dmlc::JSONReader* reader) {
   auto node = make_object<ComputeAtStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->target_stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->target_iter_id);
   data_ = std::move(node);
 }
@@ -1295,7 +1296,7 @@ ComputeInlineStep::ComputeInlineStep(dmlc::JSONReader* reader) {
   auto node = make_object<ComputeInlineStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   data_ = std::move(node);
 }
@@ -1311,7 +1312,7 @@ void ComputeInlineStepNode::ApplyToState(State* state) const {
 
   // Check the validity of compute_inline
   for (size_t i = 0; i < stage->iters.size(); ++i) {
-    CHECK_EQ((*state)->attach_map->iter_to_attached_stages.count(std::make_pair(stage_id, i)), 0)
+    ICHECK_EQ((*state)->attach_map->iter_to_attached_stages.count(std::make_pair(stage_id, i)), 0)
         << "Invalid compute_inline: There are some other stages that are attached to the "
         << "target stage";
   }
@@ -1351,7 +1352,7 @@ ComputeRootStep::ComputeRootStep(dmlc::JSONReader* reader) {
   auto node = make_object<ComputeRootStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   data_ = std::move(node);
 }
@@ -1418,10 +1419,10 @@ Array<Step> GetFormerStageModifiableSteps(Step current_step, const Array<Step>&
         }
       }
       // add SplitStepNode required by rfactor
-      CHECK_GE(i, 1);
-      CHECK(transform_steps[i - 1]->IsInstance<SplitStepNode>());
+      ICHECK_GE(i, 1);
+      ICHECK(transform_steps[i - 1]->IsInstance<SplitStepNode>());
       const Step& split_step = transform_steps[i - 1];
-      CHECK_EQ(split_step->stage_id, step->stage_id);
+      ICHECK_EQ(split_step->stage_id, step->stage_id);
       ret_steps.push_back(split_step);
       // add RfactorStepNode
       ret_steps.push_back(step);
@@ -1449,15 +1450,15 @@ CacheReadStep::CacheReadStep(dmlc::JSONReader* reader) {
   auto node = make_object<CacheReadStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   std::string string_value;
   reader->Read(&string_value);
   node->scope_name = std::move(string_value);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->reader_stage_ids);
   data_ = std::move(node);
 }
@@ -1560,10 +1561,10 @@ CacheWriteStep::CacheWriteStep(dmlc::JSONReader* reader) {
   auto node = make_object<CacheWriteStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   std::string string_value;
   reader->Read(&string_value);
   node->scope_name = std::move(string_value);
@@ -1587,7 +1588,7 @@ int CacheWriteStepNode::ApplyToState(State* state, const ComputeDAG& dag) const
       GetFormerStageModifiableSteps(GetRef<Step>(this), (*state)->transform_steps));
   int added_ops = current_compute_dag->ops.size() - last_dag_op_size;
   // TODO(jcf94): Update this check to equal after fixing the cache write bug in TVM
-  CHECK_GE(added_ops, 1);
+  ICHECK_GE(added_ops, 1);
 
   // target_stage -> cache_write_stage + target_stage
   // Assume no step has been applied to the target stage before cache write.
@@ -1691,13 +1692,13 @@ RfactorStep::RfactorStep(dmlc::JSONReader* reader) {
   auto node = make_object<RfactorStepNode>();
   bool s;
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->stage_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->iter_id);
   s = reader->NextArrayItem();
-  CHECK(s);
+  ICHECK(s);
   reader->Read(&node->factor_iter_id);
   data_ = std::move(node);
 }
diff --git a/src/auto_scheduler/utils.h b/src/auto_scheduler/utils.h
index 610fec9..88c649c 100755
--- a/src/auto_scheduler/utils.h
+++ b/src/auto_scheduler/utils.h
@@ -150,8 +150,8 @@ inline bool IntArrayEqual(const Array<PrimExpr>& arr1, const Array<PrimExpr>& ar
   for (size_t i = 0; i < arr1.size(); ++i) {
     auto int1 = arr1[i].as<IntImmNode>();
     auto int2 = arr2[i].as<IntImmNode>();
-    CHECK(int1 != nullptr);
-    CHECK(int2 != nullptr);
+    ICHECK(int1 != nullptr);
+    ICHECK(int2 != nullptr);
     if (int1->value != int2->value) {
       return false;
     }
@@ -169,7 +169,7 @@ inline double FloatArrayMean(const Array<PrimExpr>& float_array) {
 
   for (const auto& x : float_array) {
     auto floatimm = x.as<tir::FloatImmNode>();
-    CHECK(floatimm != nullptr);
+    ICHECK(floatimm != nullptr);
     sum += floatimm->value;
   }
   return sum / float_array.size();
@@ -191,7 +191,7 @@ inline bool StrEndsWith(const String& a, const String& b) {
 /*! \brief Get an int value from an Expr */
 inline int64_t GetIntImm(const PrimExpr& expr) {
   auto pint = expr.as<IntImmNode>();
-  CHECK(pint != nullptr);
+  ICHECK(pint != nullptr);
   return pint->value;
 }
 
diff --git a/src/autotvm/feature_visitor.cc b/src/autotvm/feature_visitor.cc
index 54fc252..15e0975 100644
--- a/src/autotvm/feature_visitor.cc
+++ b/src/autotvm/feature_visitor.cc
@@ -60,7 +60,7 @@ void FeatureVisitor::VisitStmt_(const AttrStmtNode* op) {
   if (op->attr_key == attr::thread_extent || op->attr_key == attr::virtual_thread) {
     Var var = op->node.as<tir::IterVarNode>()->var;
     const auto* extent = op->value.as<IntImmNode>();
-    CHECK(extent);
+    ICHECK(extent);
 
     std::string name = var.get()->name_hint;
     AnnotationType ann = kParallel;
diff --git a/src/autotvm/touch_extractor.cc b/src/autotvm/touch_extractor.cc
index 91e2ee1..10ead71 100644
--- a/src/autotvm/touch_extractor.cc
+++ b/src/autotvm/touch_extractor.cc
@@ -120,13 +120,13 @@ void TouchExtractor::ExitItervar_() {
     if (kv.second.stride != 0) {  // multiply count
       for (auto stack_var : itervar_stack_) {
         auto touch_pattern = itervar_map[stack_var].touch_feature.find(kv.first);
-        CHECK(touch_pattern != itervar_map[stack_var].touch_feature.end());
+        ICHECK(touch_pattern != itervar_map[stack_var].touch_feature.end());
         touch_pattern->second.count *= itervar_map[var].length;
       }
     } else {  // multiply reuse ratio
       for (auto stack_var : itervar_stack_) {
         auto touch_pattern = itervar_map[stack_var].touch_feature.find(kv.first);
-        CHECK(touch_pattern != itervar_map[stack_var].touch_feature.end());
+        ICHECK(touch_pattern != itervar_map[stack_var].touch_feature.end());
         touch_pattern->second.reuse *= itervar_map[var].length;
       }
     }
@@ -151,7 +151,7 @@ void TouchExtractor::ExitItervar_() {
       for (auto stack_var : itervar_stack_) {
         if (ParallelLevel(itervar_map[stack_var].ann) == para_level + 1) {
           auto touch_pattern = itervar_map[stack_var].touch_feature.find(kv.first);
-          CHECK(touch_pattern != itervar_map[stack_var].touch_feature.end());
+          ICHECK(touch_pattern != itervar_map[stack_var].touch_feature.end());
           touch_pattern->second.thread_reuse = -kv.second.reuse;
           touch_pattern->second.thread_count = -kv.second.count;
           // NOTE: use minus as a flag to denote it is a base,
diff --git a/src/contrib/hybrid/codegen_hybrid.cc b/src/contrib/hybrid/codegen_hybrid.cc
index 67765f0..7522f20 100644
--- a/src/contrib/hybrid/codegen_hybrid.cc
+++ b/src/contrib/hybrid/codegen_hybrid.cc
@@ -65,14 +65,14 @@ std::string CodeGenHybrid::Finish() { return stream.str(); }
 void CodeGenHybrid::PrintType(DataType t, std::ostream& os) {
   if (t.is_float()) {
     os << "float";
-    CHECK(t.bits() == 16 || t.bits() == 32 || t.bits() == 64);
+    ICHECK(t.bits() == 16 || t.bits() == 32 || t.bits() == 64);
   } else if (t.is_int()) {
     os << "int";
-    CHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64);
+    ICHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64);
   } else {
-    CHECK(t.is_uint()) << "Unsupported type " << t;
+    ICHECK(t.is_uint()) << "Unsupported type " << t;
     os << "uint";
-    CHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64);
+    ICHECK(t.bits() == 8 || t.bits() == 16 || t.bits() == 32 || t.bits() == 64);
   }
   os << t.bits();
 }
@@ -93,7 +93,7 @@ template <typename T>
 inline void PrintBinaryExpr(const T* op, const char* opstr,
                             std::ostream& os,  // NOLINT(*)
                             CodeGenHybrid* p) {
-  CHECK(op->dtype.lanes() == 1) << "vec bin op not implemented";
+  ICHECK(op->dtype.lanes() == 1) << "vec bin op not implemented";
   if (isalpha(opstr[0])) {
     os << opstr << '(';
     p->PrintExpr(op->a, os);
@@ -114,8 +114,8 @@ inline void PrintBinaryExpr(const T* op, const char* opstr,
 inline void PrintBinaryIntrinsitc(const CallNode* op, const char* opstr,
                                   std::ostream& os,  // NOLINT(*)
                                   CodeGenHybrid* p) {
-  CHECK(op->dtype.lanes() == 1) << "vec bin intrin not implemented";
-  CHECK_EQ(op->args.size(), 2U);
+  ICHECK(op->dtype.lanes() == 1) << "vec bin intrin not implemented";
+  ICHECK_EQ(op->args.size(), 2U);
   os << '(';
   p->PrintExpr(op->args[0], os);
   os << opstr;
@@ -228,7 +228,7 @@ void CodeGenHybrid::VisitExpr_(const CallNode* op, std::ostream& os) {  // NOLIN
   } else if (op->op.same_as(builtin::shift_right())) {
     PrintBinaryIntrinsitc(op, ">>", os, this);
   } else if (op->op.same_as(builtin::bitwise_not())) {
-    CHECK_EQ(op->args.size(), 1U);
+    ICHECK_EQ(op->args.size(), 1U);
     os << "(~";
     PrintExpr(op->args[0], os);
     os << ')';
@@ -251,9 +251,9 @@ void CodeGenHybrid::VisitExpr_(const CallNode* op, std::ostream& os) {  // NOLIN
     os << ")";
   } else {
     auto* ptr_op = op->op.as<OpNode>();
-    CHECK(ptr_op != nullptr);
+    ICHECK(ptr_op != nullptr);
     std::string name = ptr_op->name;
-    CHECK_EQ(name.compare(0, 4, "tir."), 0);
+    ICHECK_EQ(name.compare(0, 4, "tir."), 0);
     os << name.substr(4) << "(";
     for (size_t i = 0; i < op->args.size(); i++) {
       PrintExpr(op->args[i], os);
@@ -305,7 +305,7 @@ void CodeGenHybrid::VisitStmt_(const LetStmtNode* op) {
 void CodeGenHybrid::VisitStmt_(const AttrStmtNode* op) {
   if (op->attr_key == tir::attr::thread_extent) {
     auto iter_var = op->node.as<IterVarNode>();
-    CHECK(iter_var);
+    ICHECK(iter_var);
     binds_[iter_var->var.get()] = dot_to_underscore(iter_var->var->name_hint);
     PrintIndent();
     stream << "for " << binds_[iter_var->var.get()] << " in bind('" << iter_var->var->name_hint
@@ -327,7 +327,7 @@ void CodeGenHybrid::VisitStmt_(const AttrStmtNode* op) {
 
 void CodeGenHybrid::VisitStmt_(const ProducerRealizeNode* op) {
   auto tensor = Downcast<Tensor>(op->producer);
-  CHECK(alloc_storage_scope_.count(tensor->op));
+  ICHECK(alloc_storage_scope_.count(tensor->op));
   if (!alloc_storage_scope_[tensor->op].empty()) {
     PrintIndent();
     stream << GetTensorID(tensor) << " = allocate((";
@@ -493,7 +493,7 @@ void CodeGenHybrid::DumpStmt(const Stmt& stmt, const Array<ObjectRef>& inputs,
       stream << GetTensorID(GetRef<Tensor>(tensor));
     } else {
       auto var = inputs[i].as<VarNode>();
-      CHECK(var) << "Input should either be a tensor or a variable!";
+      ICHECK(var) << "Input should either be a tensor or a variable!";
       stream << GetVarID(var);
     }
   }
diff --git a/src/contrib/tf_op/tvm_dso_op_kernels.cc b/src/contrib/tf_op/tvm_dso_op_kernels.cc
index 705a334..5c119b6 100644
--- a/src/contrib/tf_op/tvm_dso_op_kernels.cc
+++ b/src/contrib/tf_op/tvm_dso_op_kernels.cc
@@ -241,7 +241,7 @@ class TVMDSOOp : public OpKernel {
     // Load TVM function from dynamic library
     tvm::runtime::Module mod_dylib = tvm::runtime::Module::LoadFromFile(lib_path);
     tvm_func = mod_dylib.GetFunction(func_name);
-    CHECK(tvm_func != nullptr);
+    ICHECK(tvm_func != nullptr);
   }
 
   void Compute(tensorflow::OpKernelContext* context) override {
diff --git a/src/driver/driver_api.cc b/src/driver/driver_api.cc
index 2e41f0b..f88b621 100644
--- a/src/driver/driver_api.cc
+++ b/src/driver/driver_api.cc
@@ -215,7 +215,7 @@ std::pair<IRModule, IRModule> SplitDevHostFuncs(IRModule mod_mixed, const Target
       tir::transform::CombineContextCall(),
   };
   auto opt_host = transform::Sequential(host_pass_list);
-  CHECK(mod_mixed.defined()) << "This module must be defined";
+  ICHECK(mod_mixed.defined()) << "This module must be defined";
   auto mhost = opt_host(mod_mixed);
 
   // device pipeline
@@ -243,9 +243,9 @@ std::pair<IRModule, IRModule> SplitDevHostFuncs(IRModule mod_mixed, const Target
   }
 
   if (target->kind->device_type == kDLCPU && target_host == target) {
-    CHECK(mdevice->functions.empty()) << "No device code should be generated when target "
-                                      << "and host_target are both llvm target."
-                                      << "\n";
+    ICHECK(mdevice->functions.empty()) << "No device code should be generated when target "
+                                       << "and host_target are both llvm target."
+                                       << "\n";
   }
 
   return {mhost, mdevice};
@@ -272,7 +272,7 @@ runtime::Module build(const Map<Target, IRModule>& inputs, const Target& target_
 
   IRModule mhost_all = IRModule(Map<GlobalVar, BaseFunc>());
 
-  CHECK(mhost_all.defined()) << "The host module must be defined";
+  ICHECK(mhost_all.defined()) << "The host module must be defined";
 
   for (const auto& it : inputs) {
     if (it.second.defined()) {
@@ -280,9 +280,9 @@ runtime::Module build(const Map<Target, IRModule>& inputs, const Target& target_
       auto& mhost = pair.first;
       auto& mdevice = pair.second;
 
-      CHECK(mhost.defined()) << "The split host module must be defined";
+      ICHECK(mhost.defined()) << "The split host module must be defined";
 
-      CHECK(mhost_all.defined()) << "The host module must be defined";
+      ICHECK(mhost_all.defined()) << "The host module must be defined";
 
       mhost_all->Update(mhost);
 
diff --git a/src/ir/diagnostic.cc b/src/ir/diagnostic.cc
index 148831d..f9299e3 100644
--- a/src/ir/diagnostic.cc
+++ b/src/ir/diagnostic.cc
@@ -225,7 +225,7 @@ void ReportAt(const DiagnosticContext& context, std::ostream& out, const Span& s
     return;
   }
 
-  CHECK(context->module->source_map.defined());
+  ICHECK(context->module->source_map.defined());
   auto it = context->module->source_map->source_map.find(span->source_name);
 
   // If the source name is not in the current source map, sources were not annotated.
diff --git a/src/ir/env_func.cc b/src/ir/env_func.cc
index 7b0d6e6..6e1f847 100644
--- a/src/ir/env_func.cc
+++ b/src/ir/env_func.cc
@@ -38,7 +38,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
 
 ObjectPtr<Object> CreateEnvNode(const std::string& name) {
   auto* f = runtime::Registry::Get(name);
-  CHECK(f != nullptr) << "Cannot find global function \'" << name << '\'';
+  ICHECK(f != nullptr) << "Cannot find global function \'" << name << '\'';
   ObjectPtr<EnvFuncNode> n = make_object<EnvFuncNode>();
   n->func = *f;
   n->name = name;
@@ -51,7 +51,7 @@ TVM_REGISTER_GLOBAL("ir.EnvFuncGet").set_body_typed(EnvFunc::Get);
 
 TVM_REGISTER_GLOBAL("ir.EnvFuncCall").set_body([](TVMArgs args, TVMRetValue* rv) {
   EnvFunc env = args[0];
-  CHECK_GE(args.size(), 1);
+  ICHECK_GE(args.size(), 1);
   env->func.CallPacked(TVMArgs(args.values + 1, args.type_codes + 1, args.size() - 1), rv);
 });
 
diff --git a/src/ir/error.cc b/src/ir/error.cc
index 5cd7a24..5d3978d 100644
--- a/src/ir/error.cc
+++ b/src/ir/error.cc
@@ -46,7 +46,7 @@ void ErrorReporter::RenderErrors(const IRModule& module, bool use_color) {
   // First we pick an error reporting strategy for each error.
   // TODO(@jroesch): Spanned errors are currently not supported.
   for (auto err : this->errors_) {
-    CHECK(!err.span.defined()) << "attempting to use spanned errors, currently not supported";
+    ICHECK(!err.span.defined()) << "attempting to use spanned errors, currently not supported";
   }
 
   NodeMap<GlobalVar, NodeMap<ObjectRef, std::string>> error_maps;
@@ -62,7 +62,7 @@ void ErrorReporter::RenderErrors(const IRModule& module, bool use_color) {
 
     auto has_errs = this->node_to_error_.find(node);
 
-    CHECK(has_errs != this->node_to_error_.end());
+    ICHECK(has_errs != this->node_to_error_.end());
 
     const auto& error_indicies = has_errs->second;
 
@@ -113,7 +113,7 @@ void ErrorReporter::RenderErrors(const IRModule& module, bool use_color) {
     annotated_prog << AsText(func, false, [&err_map](const ObjectRef& expr) {
       auto it = err_map.find(expr);
       if (it != err_map.end()) {
-        CHECK_NE(it->second.size(), 0);
+        ICHECK_NE(it->second.size(), 0);
         return it->second;
       } else {
         return std::string("");
diff --git a/src/ir/expr.cc b/src/ir/expr.cc
index 05d41cf..67e5cea 100644
--- a/src/ir/expr.cc
+++ b/src/ir/expr.cc
@@ -49,17 +49,17 @@ PrimExpr PrimExpr::FromObject_(ObjectRef ref) {
   if (auto* ptr = ref.as<runtime::StringObj>()) {
     return tir::StringImm(GetRef<runtime::String>(ptr));
   }
-  CHECK(ObjectTypeChecker<PrimExpr>::Check(ref.get()))
+  ICHECK(ObjectTypeChecker<PrimExpr>::Check(ref.get()))
       << "Expect type " << ObjectTypeChecker<PrimExpr>::TypeName() << " but get "
       << ref->GetTypeKey();
   return Downcast<PrimExpr>(ref);
 }
 
 IntImm::IntImm(DataType dtype, int64_t value) {
-  CHECK(dtype.is_scalar()) << "ValueError: IntImm can only take scalar.";
-  CHECK(dtype.is_int() || dtype.is_uint()) << "ValueError: IntImm supports only int or uint type.";
+  ICHECK(dtype.is_scalar()) << "ValueError: IntImm can only take scalar.";
+  ICHECK(dtype.is_int() || dtype.is_uint()) << "ValueError: IntImm supports only int or uint type.";
   if (dtype.is_uint()) {
-    CHECK_GE(value, 0U);
+    ICHECK_GE(value, 0U);
   }
   ObjectPtr<IntImmNode> node = make_object<IntImmNode>();
   node->dtype = dtype;
@@ -84,7 +84,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
     });
 
 FloatImm::FloatImm(DataType dtype, double value) {
-  CHECK_EQ(dtype.lanes(), 1) << "ValueError: FloatImm can only take scalar.";
+  ICHECK_EQ(dtype.lanes(), 1) << "ValueError: FloatImm can only take scalar.";
   ObjectPtr<FloatImmNode> node = make_object<FloatImmNode>();
   node->dtype = dtype;
   node->value = value;
diff --git a/src/ir/module.cc b/src/ir/module.cc
index 231ae68..b011f2d 100644
--- a/src/ir/module.cc
+++ b/src/ir/module.cc
@@ -55,14 +55,14 @@ IRModule::IRModule(tvm::Map<GlobalVar, BaseFunc> functions,
 
   for (const auto& kv : n->functions) {
     // set global var map
-    CHECK(n->global_var_map_.count(kv.first->name_hint) == 0)
+    ICHECK(n->global_var_map_.count(kv.first->name_hint) == 0)
         << "Duplicate global function name " << kv.first->name_hint;
     n->global_var_map_.Set(kv.first->name_hint, kv.first);
   }
 
   for (const auto& kv : n->type_definitions) {
     // set global typevar map
-    CHECK(n->global_type_var_map_.count(kv.first->name_hint) == 0)
+    ICHECK(n->global_type_var_map_.count(kv.first->name_hint) == 0)
         << "Duplicate global type definition name " << kv.first->name_hint;
     n->global_type_var_map_.Set(kv.first->name_hint, kv.first);
     n->RegisterConstructors(kv.first, kv.second);
@@ -150,9 +150,9 @@ tvm::Array<GlobalVar> IRModuleNode::GetGlobalVars() const {
 }
 
 GlobalTypeVar IRModuleNode::GetGlobalTypeVar(const String& name) const {
-  CHECK(global_type_var_map_.defined());
+  ICHECK(global_type_var_map_.defined());
   auto it = global_type_var_map_.find(name);
-  CHECK(it != global_type_var_map_.end())
+  ICHECK(it != global_type_var_map_.end())
       << "Cannot find global type var " << name << " in the Module";
   return (*it).second;
 }
@@ -183,9 +183,9 @@ void WarnIfMalformed(const IRModule& mod, relay::Function func) {
   auto fv = relay::FreeVars(func);
   auto ftv = relay::FreeTypeVars(func, mod);
   // TODO(@jroesch): refactor to use diagnostic context
-  CHECK_EQ(fv.size(), 0) << "There are free variables: " << fv << std::endl;
-  CHECK_EQ(ftv.size(), 0) << "There are free type variables: " << fv
-                          << " in function: " << AsText(func, false);
+  ICHECK_EQ(fv.size(), 0) << "There are free variables: " << fv << std::endl;
+  ICHECK_EQ(ftv.size(), 0) << "There are free type variables: " << fv
+                           << " in function: " << AsText(func, false);
 }
 
 void IRModuleNode::Add(const GlobalVar& var, const BaseFunc& f, bool update) {
@@ -202,9 +202,9 @@ void IRModuleNode::AddUnchecked(const GlobalVar& var, const BaseFunc& func) {
 
   auto it = global_var_map_.find(var->name_hint);
   if (it != global_var_map_.end()) {
-    CHECK_EQ((*it).second, var);
+    ICHECK_EQ((*it).second, var);
   } else {
-    CHECK(global_var_map_.count(var->name_hint) == 0)
+    ICHECK(global_var_map_.count(var->name_hint) == 0)
         << "Duplicate global function name " << var->name_hint;
   }
 
@@ -234,7 +234,7 @@ void IRModuleNode::AddTypeDefUnchecked(const GlobalTypeVar& var, const TypeData&
   this->type_definitions.Set(var, type);
   if (!update) {
     // set global type var map
-    CHECK(global_type_var_map_.count(var->name_hint) == 0)
+    ICHECK(global_type_var_map_.count(var->name_hint) == 0)
         << "Duplicate global type definition name " << var->name_hint;
   }
   global_type_var_map_.Set(var->name_hint, var);
@@ -258,7 +258,7 @@ void IRModuleNode::Remove(const GlobalVar& var) {
 
 BaseFunc IRModuleNode::Lookup(const GlobalVar& var) const {
   auto it = functions.find(var);
-  CHECK(it != functions.end()) << "There is no definition of " << var->name_hint;
+  ICHECK(it != functions.end()) << "There is no definition of " << var->name_hint;
   return (*it).second;
 }
 
@@ -269,7 +269,7 @@ BaseFunc IRModuleNode::Lookup(const String& name) const {
 
 TypeData IRModuleNode::LookupTypeDef(const GlobalTypeVar& var) const {
   auto it = type_definitions.find(var);
-  CHECK(it != type_definitions.end()) << "There is no definition of " << var->name_hint;
+  ICHECK(it != type_definitions.end()) << "There is no definition of " << var->name_hint;
   return (*it).second;
 }
 
@@ -280,7 +280,7 @@ TypeData IRModuleNode::LookupTypeDef(const String& name) const {
 
 Constructor IRModuleNode::LookupTag(const int32_t tag) {
   auto it = constructor_tag_map_.find(tag);
-  CHECK(it != constructor_tag_map_.end()) << "There is no constructor with the tag " << tag;
+  ICHECK(it != constructor_tag_map_.end()) << "There is no constructor with the tag " << tag;
   return (*it).second;
 }
 
@@ -382,7 +382,7 @@ void IRModuleNode::Import(const String& path) {
 
 void IRModuleNode::ImportFromStd(const String& path) {
   auto* f = tvm::runtime::Registry::Get("tvm.relay.std_path");
-  CHECK(f != nullptr) << "The Relay std_path is not set, please register tvm.relay.std_path.";
+  ICHECK(f != nullptr) << "The Relay std_path is not set, please register tvm.relay.std_path.";
   std::string std_path = (*f)();
   this->Import(std_path + "/" + path);
 }
@@ -406,7 +406,7 @@ TVM_REGISTER_GLOBAL("ir.Module_Add").set_body([](TVMArgs args, TVMRetValue* ret)
   GlobalVar var = args[1];
   ObjectRef val = args[2];
   bool update = args[3];
-  CHECK(val->IsInstance<RelayExprNode>());
+  ICHECK(val->IsInstance<RelayExprNode>());
 
   if (val->IsInstance<BaseFuncNode>()) {
     mod->Add(var, Downcast<BaseFunc>(val), update);
diff --git a/src/ir/op.cc b/src/ir/op.cc
index 45c3196..5d2dc70 100644
--- a/src/ir/op.cc
+++ b/src/ir/op.cc
@@ -42,7 +42,7 @@ using OpRegistry = AttrRegistry<OpRegEntry, Op>;
 // find operator by name
 const Op& Op::Get(const String& name) {
   const OpRegEntry* reg = OpRegistry::Global()->Get(name);
-  CHECK(reg != nullptr) << "AttributeError: Operator " << name << " is not registered";
+  ICHECK(reg != nullptr) << "AttributeError: Operator " << name << " is not registered";
   return reg->op();
 }
 
@@ -130,7 +130,7 @@ struct Op2ObjectPtr : public ObjectRef {
 ObjectPtr<Object> CreateOp(const std::string& name) {
   // Hack use TVMRetValue as exchange
   auto op = Op::Get(name);
-  CHECK(op.defined()) << "Cannot find op \'" << name << '\'';
+  ICHECK(op.defined()) << "Cannot find op \'" << name << '\'';
   return Op2ObjectPtr::Get(op);
 }
 
diff --git a/src/ir/span.cc b/src/ir/span.cc
index 667c14e..4a26f3a 100644
--- a/src/ir/span.cc
+++ b/src/ir/span.cc
@@ -74,9 +74,9 @@ Span::Span(SourceName source_name, int line, int end_line, int column, int end_c
 }
 
 Span Span::Merge(const Span& other) const {
-  CHECK(this->defined() && other.defined()) << "Span::Merge: both spans must be defined";
+  ICHECK(this->defined() && other.defined()) << "Span::Merge: both spans must be defined";
 
-  CHECK((*this)->source_name == other->source_name);
+  ICHECK((*this)->source_name == other->source_name);
   return Span((*this)->source_name, std::min((*this)->line, other->line),
               std::max((*this)->end_line, other->end_line),
               std::min((*this)->column, other->column),
diff --git a/src/ir/transform.cc b/src/ir/transform.cc
index ec88482..3b77446 100644
--- a/src/ir/transform.cc
+++ b/src/ir/transform.cc
@@ -60,8 +60,8 @@ void PassContext::EnterWithScope() {
 
 void PassContext::ExitWithScope() {
   PassContextThreadLocalEntry* entry = RelayPassContextThreadLocalStore::Get();
-  CHECK(!entry->context_stack.empty());
-  CHECK(entry->context_stack.top().same_as(*this));
+  ICHECK(!entry->context_stack.empty());
+  ICHECK(entry->context_stack.top().same_as(*this));
   entry->context_stack.pop();
 }
 
@@ -77,7 +77,7 @@ PassContext PassContext::Current() {
 class PassConfigManager {
  public:
   void Register(std::string key, uint32_t value_type_index) {
-    CHECK_EQ(key2vtype_.count(key), 0U);
+    ICHECK_EQ(key2vtype_.count(key), 0U);
     ValueTypeInfo info;
     info.type_index = value_type_index;
     info.type_key = runtime::Object::TypeIndex2Key(value_type_index);
@@ -103,7 +103,7 @@ class PassConfigManager {
         LOG(FATAL) << os.str();
       }
       const auto& info = it->second;
-      CHECK(kv.second.defined()) << "AttributeError: " << kv.first << " is None";
+      ICHECK(kv.second.defined()) << "AttributeError: " << kv.first << " is None";
       if (kv.second->IsInstance<Map<String, ObjectRef>::ContainerType>()) {
         ObjectRef converted =
             reflection->CreateObject(info.type_key, Downcast<Map<String, ObjectRef>>(kv.second));
@@ -376,7 +376,7 @@ Pass GetPass(const String& pass_name) {
     // pass
   } else if ((f = Registry::Get("relay._transform." + pass_name))) {
   }
-  CHECK(f != nullptr) << "Cannot use " << pass_name << "to create the pass";
+  ICHECK(f != nullptr) << "Cannot use " << pass_name << "to create the pass";
   return (*f)();
 }
 
@@ -385,7 +385,7 @@ Pass GetPass(const String& pass_name) {
 // ordering problem needs to be handled in the future.
 IRModule SequentialNode::operator()(IRModule mod, const PassContext& pass_ctx) const {
   for (const Pass& pass : passes) {
-    CHECK(pass.defined()) << "Found undefined pass for optimization.";
+    ICHECK(pass.defined()) << "Found undefined pass for optimization.";
     const PassInfo& pass_info = pass->Info();
     if (!PassEnabled(pass_info)) continue;
     // resolve dependencies
diff --git a/src/node/attr_registry.h b/src/node/attr_registry.h
index 01d2b68..f84be14 100644
--- a/src/node/attr_registry.h
+++ b/src/node/attr_registry.h
@@ -109,10 +109,10 @@ class AttrRegistry {
       op_map->data_.resize(index + 1, std::make_pair(TVMRetValue(), 0));
     }
     std::pair<TVMRetValue, int>& p = op_map->data_[index];
-    CHECK(p.second != plevel) << "Attribute " << attr_name << " of " << key->AttrRegistryName()
-                              << " is already registered with same plevel=" << plevel;
-    CHECK(value.type_code() != kTVMNullptr) << "Registered packed_func is Null for " << attr_name
-                                            << " of operator " << key->AttrRegistryName();
+    ICHECK(p.second != plevel) << "Attribute " << attr_name << " of " << key->AttrRegistryName()
+                               << " is already registered with same plevel=" << plevel;
+    ICHECK(value.type_code() != kTVMNullptr) << "Registered packed_func is Null for " << attr_name
+                                             << " of operator " << key->AttrRegistryName();
     if (p.second < plevel && value.type_code() != kTVMNullptr) {
       op_map->data_[index] = std::make_pair(value, plevel);
     }
diff --git a/src/node/container.cc b/src/node/container.cc
index 60b5f40..b72d5a4 100644
--- a/src/node/container.cc
+++ b/src/node/container.cc
@@ -96,8 +96,8 @@ struct NDArrayContainerTrait {
   static constexpr const std::nullptr_t VisitAttrs = nullptr;
 
   static void SHashReduce(const runtime::NDArray::Container* key, SHashReducer hash_reduce) {
-    CHECK_EQ(key->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor";
-    CHECK(runtime::IsContiguous(key->dl_tensor)) << "Can only hash contiguous tensor";
+    ICHECK_EQ(key->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor";
+    ICHECK(runtime::IsContiguous(key->dl_tensor)) << "Can only hash contiguous tensor";
     hash_reduce(runtime::DataType(key->dl_tensor.dtype));
     hash_reduce(key->dl_tensor.ndim);
     for (int i = 0; i < key->dl_tensor.ndim; ++i) {
@@ -113,10 +113,10 @@ struct NDArrayContainerTrait {
 
     auto ldt = lhs->dl_tensor.dtype;
     auto rdt = rhs->dl_tensor.dtype;
-    CHECK_EQ(lhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor";
-    CHECK_EQ(rhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor";
-    CHECK(runtime::IsContiguous(lhs->dl_tensor)) << "Can only compare contiguous tensor";
-    CHECK(runtime::IsContiguous(rhs->dl_tensor)) << "Can only compare contiguous tensor";
+    ICHECK_EQ(lhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor";
+    ICHECK_EQ(rhs->dl_tensor.ctx.device_type, kDLCPU) << "can only compare CPU tensor";
+    ICHECK(runtime::IsContiguous(lhs->dl_tensor)) << "Can only compare contiguous tensor";
+    ICHECK(runtime::IsContiguous(rhs->dl_tensor)) << "Can only compare contiguous tensor";
 
     if (lhs->dl_tensor.ndim != rhs->dl_tensor.ndim) return false;
     for (int i = 0; i < lhs->dl_tensor.ndim; ++i) {
@@ -172,18 +172,18 @@ TVM_REGISTER_GLOBAL("node.Array").set_body([](TVMArgs args, TVMRetValue* ret) {
 
 TVM_REGISTER_GLOBAL("node.ArrayGetItem").set_body([](TVMArgs args, TVMRetValue* ret) {
   int64_t i = args[1];
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-  CHECK(ptr->IsInstance<ArrayNode>());
+  ICHECK(ptr->IsInstance<ArrayNode>());
   auto* n = static_cast<const ArrayNode*>(ptr);
-  CHECK_LT(static_cast<size_t>(i), n->size()) << "out of bound of array";
+  ICHECK_LT(static_cast<size_t>(i), n->size()) << "out of bound of array";
   *ret = n->at(i);
 });
 
 TVM_REGISTER_GLOBAL("node.ArraySize").set_body([](TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-  CHECK(ptr->IsInstance<ArrayNode>());
+  ICHECK(ptr->IsInstance<ArrayNode>());
   *ret = static_cast<int64_t>(static_cast<const ArrayNode*>(ptr)->size());
 });
 
@@ -300,7 +300,7 @@ TVM_REGISTER_REFLECTION_VTABLE(MapNode, MapNodeTrait)
     .set_creator([](const std::string&) -> ObjectPtr<Object> { return MapNode::Empty(); });
 
 TVM_REGISTER_GLOBAL("node.Map").set_body([](TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args.size() % 2, 0);
+  ICHECK_EQ(args.size() % 2, 0);
   std::unordered_map<ObjectRef, ObjectRef, ObjectPtrHash, ObjectPtrEqual> data;
   for (int i = 0; i < args.num_args; i += 2) {
     ObjectRef k =
@@ -312,29 +312,29 @@ TVM_REGISTER_GLOBAL("node.Map").set_body([](TVMArgs args, TVMRetValue* ret) {
 });
 
 TVM_REGISTER_GLOBAL("node.MapSize").set_body([](TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-  CHECK(ptr->IsInstance<MapNode>());
+  ICHECK(ptr->IsInstance<MapNode>());
   auto* n = static_cast<const MapNode*>(ptr);
   *ret = static_cast<int64_t>(n->size());
 });
 
 TVM_REGISTER_GLOBAL("node.MapGetItem").set_body([](TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-  CHECK(ptr->IsInstance<MapNode>());
+  ICHECK(ptr->IsInstance<MapNode>());
 
   auto* n = static_cast<const MapNode*>(ptr);
   auto it = n->find(String::CanConvertFrom(args[1]) ? args[1].operator String()
                                                     : args[1].operator ObjectRef());
-  CHECK(it != n->end()) << "cannot find the corresponding key in the Map";
+  ICHECK(it != n->end()) << "cannot find the corresponding key in the Map";
   *ret = (*it).second;
 });
 
 TVM_REGISTER_GLOBAL("node.MapCount").set_body([](TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* ptr = static_cast<Object*>(args[0].value().v_handle);
-  CHECK(ptr->IsInstance<MapNode>());
+  ICHECK(ptr->IsInstance<MapNode>());
   const MapNode* n = static_cast<const MapNode*>(ptr);
   int64_t cnt = n->count(String::CanConvertFrom(args[1]) ? args[1].operator String()
                                                          : args[1].operator ObjectRef());
@@ -342,7 +342,7 @@ TVM_REGISTER_GLOBAL("node.MapCount").set_body([](TVMArgs args, TVMRetValue* ret)
 });
 
 TVM_REGISTER_GLOBAL("node.MapItems").set_body([](TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* ptr = static_cast<Object*>(args[0].value().v_handle);
   auto* n = static_cast<const MapNode*>(ptr);
   Array<ObjectRef> rkvs;
diff --git a/src/node/reflection.cc b/src/node/reflection.cc
index ec82c91..9dc9d33 100644
--- a/src/node/reflection.cc
+++ b/src/node/reflection.cc
@@ -50,7 +50,7 @@ class AttrGetter : public AttrVisitor {
     if (skey == key) *ret = value[0];
   }
   void Visit(const char* key, uint64_t* value) final {
-    CHECK_LE(value[0], static_cast<uint64_t>(std::numeric_limits<int64_t>::max()))
+    ICHECK_LE(value[0], static_cast<uint64_t>(std::numeric_limits<int64_t>::max()))
         << "cannot return too big constant";
     if (skey == key) *ret = static_cast<int64_t>(value[0]);
   }
@@ -198,7 +198,7 @@ class NodeAttrSetter : public AttrVisitor {
 void InitNodeByPackedArgs(ReflectionVTable* reflection, Object* n, const TVMArgs& args) {
   NodeAttrSetter setter;
   setter.type_key = n->GetTypeKey();
-  CHECK_EQ(args.size() % 2, 0);
+  ICHECK_EQ(args.size() % 2, 0);
   for (int i = 0; i < args.size(); i += 2) {
     setter.attrs.emplace(args[i].operator std::string(), args[i + 1]);
   }
@@ -245,13 +245,13 @@ ObjectRef ReflectionVTable::CreateObject(const std::string& type_key,
 
 // Expose to FFI APIs.
 void NodeGetAttr(TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* self = static_cast<Object*>(args[0].value().v_handle);
   *ret = ReflectionVTable::Global()->GetAttr(self, args[1]);
 }
 
 void NodeListAttrNames(TVMArgs args, TVMRetValue* ret) {
-  CHECK_EQ(args[0].type_code(), kTVMObjectHandle);
+  ICHECK_EQ(args[0].type_code(), kTVMObjectHandle);
   Object* self = static_cast<Object*>(args[0].value().v_handle);
 
   auto names =
diff --git a/src/node/serialization.cc b/src/node/serialization.cc
index 1f0e8c0..c7e4d27 100644
--- a/src/node/serialization.cc
+++ b/src/node/serialization.cc
@@ -85,7 +85,7 @@ class NodeIndexer : public AttrVisitor {
   void Visit(const char* key, runtime::NDArray* value) final {
     DLTensor* ptr = const_cast<DLTensor*>((*value).operator->());
     if (tensor_index_.count(ptr)) return;
-    CHECK_EQ(tensor_index_.size(), tensor_list_.size());
+    ICHECK_EQ(tensor_index_.size(), tensor_list_.size());
     tensor_index_[ptr] = tensor_list_.size();
     tensor_list_.push_back(ptr);
   }
@@ -97,10 +97,10 @@ class NodeIndexer : public AttrVisitor {
   // make index of all the children of node
   void MakeIndex(Object* node) {
     if (node == nullptr) return;
-    CHECK(node->IsInstance<Object>());
+    ICHECK(node->IsInstance<Object>());
 
     if (node_index_.count(node)) return;
-    CHECK_EQ(node_index_.size(), node_list_.size());
+    ICHECK_EQ(node_index_.size(), node_list_.size());
     node_index_[node] = node_list_.size();
     node_list_.push_back(node);
 
@@ -195,7 +195,7 @@ struct JSONNode {
     helper.ReadAllFields(reader);
 
     if (repr_str.size() != 0) {
-      CHECK_EQ(repr_b64.size(), 0U);
+      ICHECK_EQ(repr_b64.size(), 0U);
       repr_bytes = std::move(repr_str);
     } else if (repr_b64.size() != 0) {
       repr_bytes = Base64Decode(repr_b64);
@@ -388,13 +388,13 @@ class JSONAttrSetter : public AttrVisitor {
   void Visit(const char* key, runtime::NDArray* value) final {
     size_t index;
     ParseValue(key, &index);
-    CHECK_LE(index, tensor_list_->size());
+    ICHECK_LE(index, tensor_list_->size());
     *value = tensor_list_->at(index);
   }
   void Visit(const char* key, ObjectRef* value) final {
     size_t index;
     ParseValue(key, &index);
-    CHECK_LE(index, node_list_->size());
+    ICHECK_LE(index, node_list_->size());
     *value = ObjectRef(node_list_->at(index));
   }
   // set node to be current JSONNode
@@ -421,13 +421,13 @@ class JSONAttrSetter : public AttrVisitor {
     if (jnode->type_key == MapNode::_type_key) {
       std::unordered_map<ObjectRef, ObjectRef, ObjectHash, ObjectEqual> container;
       if (jnode->keys.empty()) {
-        CHECK_EQ(jnode->data.size() % 2, 0U);
+        ICHECK_EQ(jnode->data.size() % 2, 0U);
         for (size_t i = 0; i < jnode->data.size(); i += 2) {
           container[ObjectRef(node_list_->at(jnode->data[i]))] =
               ObjectRef(node_list_->at(jnode->data[i + 1]));
         }
       } else {
-        CHECK_EQ(jnode->data.size(), jnode->keys.size());
+        ICHECK_EQ(jnode->data.size(), jnode->keys.size());
         for (size_t i = 0; i < jnode->data.size(); ++i) {
           container[String(jnode->keys[i])] = ObjectRef(node_list_->at(jnode->data[i]));
         }
@@ -530,7 +530,7 @@ struct JSONGraph {
         }
       }
     }
-    CHECK_EQ(topo_order.size(), n_nodes) << "Cyclic reference detected in JSON file";
+    ICHECK_EQ(topo_order.size(), n_nodes) << "Cyclic reference detected in JSON file";
     std::reverse(std::begin(topo_order), std::end(topo_order));
     return topo_order;
   }
@@ -562,7 +562,7 @@ ObjectRef LoadJSON(std::string json_str) {
       support::Base64InStream b64strm(&mstrm);
       b64strm.InitPosition();
       runtime::NDArray temp;
-      CHECK(temp.Load(&b64strm));
+      ICHECK(temp.Load(&b64strm));
       tensors.emplace_back(std::move(temp));
     }
   }
diff --git a/src/node/structural_equal.cc b/src/node/structural_equal.cc
index e05cbbb..1fa72c9 100644
--- a/src/node/structural_equal.cc
+++ b/src/node/structural_equal.cc
@@ -90,7 +90,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler {
 
   void MarkGraphNode() final {
     // need to push to pending tasks in this case
-    CHECK(!allow_push_to_stack_ && !task_stack_.empty());
+    ICHECK(!allow_push_to_stack_ && !task_stack_.empty());
     task_stack_.back().graph_equal = true;
   }
 
@@ -108,8 +108,8 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler {
     equal_map_lhs_.clear();
     equal_map_rhs_.clear();
     if (!SEqualReduce(lhs, rhs, map_free_vars)) return false;
-    CHECK_EQ(pending_tasks_.size(), 1U);
-    CHECK(allow_push_to_stack_);
+    ICHECK_EQ(pending_tasks_.size(), 1U);
+    ICHECK(allow_push_to_stack_);
     task_stack_.emplace_back(std::move(pending_tasks_.back()));
     pending_tasks_.clear();
     return RunTasks();
@@ -141,7 +141,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler {
         // We can safely mark lhs and rhs as equal to each other.
         auto it = equal_map_lhs_.find(entry.lhs);
         if (it != equal_map_lhs_.end()) {
-          CHECK(it->second.same_as(entry.rhs));
+          ICHECK(it->second.same_as(entry.rhs));
         }
         // create the map if the quality is graph equal.
         if (entry.graph_equal) {
@@ -156,7 +156,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler {
         // Expand the objects
         // The SEqual of the object can call into this->SEqualReduce
         // which populates the pending tasks.
-        CHECK_EQ(pending_tasks_.size(), 0U);
+        ICHECK_EQ(pending_tasks_.size(), 0U);
         allow_push_to_stack_ = false;
         if (!DispatchSEqualReduce(entry.lhs, entry.rhs, entry.map_free_vars)) return false;
         allow_push_to_stack_ = true;
@@ -174,7 +174,7 @@ class RemapVarSEqualHandler : public SEqualReducer::Handler {
   // The default equal as registered in the structural equal vtable.
   bool DispatchSEqualReduce(const ObjectRef& lhs, const ObjectRef& rhs, bool map_free_vars) {
     auto compute = [=]() {
-      CHECK(lhs.defined() && rhs.defined() && lhs->type_index() == rhs->type_index());
+      ICHECK(lhs.defined() && rhs.defined() && lhs->type_index() == rhs->type_index());
       // skip entries that already have equality maps.
       auto it = equal_map_lhs_.find(lhs);
       if (it != equal_map_lhs_.end()) {
diff --git a/src/node/structural_hash.cc b/src/node/structural_hash.cc
index cb576fa..e0b729d 100644
--- a/src/node/structural_hash.cc
+++ b/src/node/structural_hash.cc
@@ -79,7 +79,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler {
 
   void MarkGraphNode() final {
     // need to push to pending tasks in this case
-    CHECK(!allow_push_to_stack_ && !task_stack_.empty());
+    ICHECK(!allow_push_to_stack_ && !task_stack_.empty());
     task_stack_.back().graph_node_hash = true;
   }
 
@@ -97,7 +97,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler {
   }
 
   void SHashReduceFreeVar(const runtime::Object* var, bool map_free_vars) final {
-    CHECK(!hash_memo_.count(GetRef<ObjectRef>(var)));
+    ICHECK(!hash_memo_.count(GetRef<ObjectRef>(var)));
     if (map_free_vars) {
       // use counter value.
       size_t value = std::hash<size_t>()(free_var_counter_++);
@@ -127,19 +127,19 @@ class VarCountingSHashHandler : public SHashReducer::Handler {
   }
 
   size_t Hash(const ObjectRef& object, bool map_free_vars) {
-    CHECK_EQ(task_stack_.size(), 0U);
-    CHECK_EQ(pending_tasks_.size(), 0U);
-    CHECK_EQ(result_stack_.size(), 0U);
+    ICHECK_EQ(task_stack_.size(), 0U);
+    ICHECK_EQ(pending_tasks_.size(), 0U);
+    ICHECK_EQ(result_stack_.size(), 0U);
 
     this->SHashReduce(object, map_free_vars);
-    CHECK_EQ(pending_tasks_.size(), 1U);
-    CHECK(allow_push_to_stack_);
+    ICHECK_EQ(pending_tasks_.size(), 1U);
+    ICHECK(allow_push_to_stack_);
     task_stack_.emplace_back(std::move(pending_tasks_.back()));
     pending_tasks_.clear();
 
     this->RunTasks();
 
-    CHECK_EQ(result_stack_.size(), 1U);
+    ICHECK_EQ(result_stack_.size(), 1U);
     size_t ret = result_stack_.back();
     result_stack_.pop_back();
     return ret;
@@ -160,7 +160,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler {
    */
   size_t ReduceHash(const Task& task) {
     size_t stack_begin = task.result_stack_index;
-    CHECK_LE(stack_begin, result_stack_.size());
+    ICHECK_LE(stack_begin, result_stack_.size());
 
     // combine in the reverse order of the stack.
     size_t reduced_hash = task.reduced_hash;
@@ -210,7 +210,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler {
           entry.children_expanded = true;
           entry.result_stack_index = result_stack_.size();
 
-          CHECK_EQ(pending_tasks_.size(), 0U);
+          ICHECK_EQ(pending_tasks_.size(), 0U);
           allow_push_to_stack_ = false;
           // dispatch hash, reduce to the current slot.
           this->DispatchSHash(entry.object, entry.map_free_vars);
@@ -227,7 +227,7 @@ class VarCountingSHashHandler : public SHashReducer::Handler {
 
   // The default equal as registered in the structural equal vtable.
   void DispatchSHash(const ObjectRef& object, bool map_free_vars) {
-    CHECK(object.defined());
+    ICHECK(object.defined());
     vtable_->SHashReduce(object.get(), SHashReducer(this, map_free_vars));
   }
 
diff --git a/src/parser/meta_ref.cc b/src/parser/meta_ref.cc
index d238927..c74b396 100644
--- a/src/parser/meta_ref.cc
+++ b/src/parser/meta_ref.cc
@@ -72,9 +72,9 @@ struct MetaRefExpander : public ExprMutator {
     if (auto op_node = call->op.as<OpNode>()) {
       if (op_node->name == "parser.MetaRef") {
         auto meta_attrs = call->attrs.as<MetaRefAttrs>();
-        CHECK(meta_attrs) << "an internal error has occurred";
+        ICHECK(meta_attrs) << "an internal error has occurred";
         auto nodes = table.at(meta_attrs->node_type_key);
-        CHECK_LT(meta_attrs->node_index, nodes.size());
+        ICHECK_LT(meta_attrs->node_index, nodes.size());
         return Downcast<Expr>(nodes[meta_attrs->node_index]);
       }
     }
diff --git a/src/parser/parser.cc b/src/parser/parser.cc
index 9c9965c..987a6e2 100644
--- a/src/parser/parser.cc
+++ b/src/parser/parser.cc
@@ -371,7 +371,7 @@ class Parser {
    * \return The Nth token.
    */
   Token Lookahead(int n) {
-    CHECK_GE(n, 1) << "lookahead is only valid when n >= 1";
+    ICHECK_GE(n, 1) << "lookahead is only valid when n >= 1";
 
     // We intend to skip n - 1 tokens, then return the nth.
     auto old_pos = pos;
@@ -822,7 +822,7 @@ class Parser {
               ctor = tvm::Constructor(ctor_name, arg_types, type_global);
             }
 
-            CHECK(ctor.defined());
+            ICHECK(ctor.defined());
 
             try {
               this->ctors.Add(ctor_name, ctor);
@@ -944,7 +944,7 @@ class Parser {
         }
       }
 
-      CHECK_GE(exprs.size(), 1);
+      ICHECK_GE(exprs.size(), 1);
 
       if (exprs.size() == 1) {
         // ICHECK(exprs[0].defined() && exprs[0]->span.defined())
@@ -1258,7 +1258,7 @@ class Parser {
         auto op = opt_op[0];
 
         Expr right = WithSpan<Expr>([this] { return ParseCallExpr(); });
-        CHECK(right->span.defined());
+        ICHECK(right->span.defined());
 
         // If the operator stack is empty
         // we parse an operator and expression
@@ -1285,7 +1285,7 @@ class Parser {
           exprs.pop_back();
           Expr left = exprs.back();
           exprs.pop_back();
-          CHECK(new_op.op.defined()) << "a call op must be set " << new_op.op;
+          ICHECK(new_op.op.defined()) << "a call op must be set " << new_op.op;
           exprs.push_back(
               relay::Call(new_op.op, {left, right}, Attrs(), {}, left->span.Merge(right->span)));
         }
@@ -1301,7 +1301,7 @@ class Parser {
         exprs.pop_back();
         Expr left = exprs.back();
         exprs.pop_back();
-        CHECK(new_op.op.defined()) << "a call op must be set " << new_op.op;
+        ICHECK(new_op.op.defined()) << "a call op must be set " << new_op.op;
         exprs.push_back(
             relay::Call(new_op.op, {left, right}, Attrs(), {}, left->span.Merge(right->span)));
       }
@@ -1369,7 +1369,7 @@ class Parser {
   }
 
   Expr ParseCallArgs(Expr op) {
-    CHECK(op.defined()) << "the operator must be defined";
+    ICHECK(op.defined()) << "the operator must be defined";
 
     DLOG(INFO) << "Parser::ParseCallArgs";
     Map<String, ObjectRef> raw_attrs;
@@ -1401,7 +1401,7 @@ class Parser {
 
       if (is_op && op_key.size()) {
         auto attr_obj = tvm::ReflectionVTable::Global()->CreateObject(op_key, raw_attrs);
-        CHECK(attr_obj.defined());
+        ICHECK(attr_obj.defined());
         attrs = Downcast<Attrs>(attr_obj);
       }
 
@@ -1500,7 +1500,7 @@ class Parser {
             auto spanned_idents = ParseHierarchicalName();
             auto idents = spanned_idents.data;
             auto span = spanned_idents.span;
-            CHECK_NE(idents.size(), 0);
+            ICHECK_NE(idents.size(), 0);
             std::stringstream op_name;
             int i = 0;
             int periods = idents.size() - 1;
diff --git a/src/parser/source_map.cc b/src/parser/source_map.cc
index 40998b0..7ac978c 100644
--- a/src/parser/source_map.cc
+++ b/src/parser/source_map.cc
@@ -62,7 +62,7 @@ Source::Source(SourceName src_name, std::string source) {
 
 tvm::String Source::GetLine(int line) {
   DLOG(INFO) << "Source::GetLine: line=" << line;
-  CHECK(line - 1 < static_cast<int64_t>((*this)->line_map.size()))
+  ICHECK(line - 1 < static_cast<int64_t>((*this)->line_map.size()))
       << "requested line: " << line << "at index: " << (line - 1)
       << "line_map size: " << (*this)->line_map.size() << "source: " << (*this)->source;
 
diff --git a/src/parser/tokenizer.h b/src/parser/tokenizer.h
index 20ad173..a9ae64b 100644
--- a/src/parser/tokenizer.h
+++ b/src/parser/tokenizer.h
@@ -100,7 +100,7 @@ struct Tokenizer {
   bool More() { return this->pos < this->source.size(); }
 
   char Peek() {
-    CHECK(pos < this->source.size());
+    ICHECK(pos < this->source.size());
     return this->source.at(this->pos);
   }
 
@@ -170,7 +170,7 @@ struct Tokenizer {
   }
 
   Token ParseNumber(bool is_pos, bool is_float, std::string number) {
-    CHECK(number.size() > 0) << "an empty string is an invalid number";
+    ICHECK(number.size() > 0) << "an empty string is an invalid number";
 
     try {
       if (is_float) {
@@ -231,22 +231,22 @@ struct Tokenizer {
     int line = this->line;
     int column = this->col;
 
-    CHECK_EQ(Peek(), '[');
+    ICHECK_EQ(Peek(), '[');
     Next();
     std::stringstream type_key;
     while (More() && Peek() != ']') {
       type_key << Next();
     }
-    CHECK_EQ(Peek(), ']');
+    ICHECK_EQ(Peek(), ']');
     Next();
 
-    CHECK_EQ(Peek(), '[');
+    ICHECK_EQ(Peek(), '[');
     Next();
     std::stringstream str_index;
     while (More() && Peek() != ']') {
       str_index << Next();
     }
-    CHECK_EQ(Peek(), ']');
+    ICHECK_EQ(Peek(), ']');
     Next();
     // todo: add error handling around bad indices
     auto index = ParseNumber(true, false, str_index.str()).ToNumber();
@@ -266,7 +266,7 @@ struct Tokenizer {
         raw_attribute << Next();
       }
 
-      CHECK_EQ(Next(), ']');
+      ICHECK_EQ(Next(), ']');
 
       auto attribute = raw_attribute.str();
       // Clean up the white-space on both sides.
@@ -537,7 +537,7 @@ struct Tokenizer {
     DLOG(INFO) << "tvm::parser::Tokenize";
     while (this->More()) {
       auto token = TokenizeOnce();
-      CHECK(token.defined());
+      ICHECK(token.defined());
       this->tokens.push_back(token);
     }
     this->tokens.push_back(NewToken(TokenType::kEndOfFile));
@@ -576,15 +576,15 @@ std::vector<Token> Condense(const std::vector<Token>& tokens, Token* table) {
           i += 1;
           // TODO(@jroesch): merge spans
           auto tok = Token(current->span, TokenType::kLocal, next->data);
-          CHECK(tok.defined());
+          ICHECK(tok.defined());
           out.push_back(tok);
         } else if (next->token_type == TokenType::kInteger) {
           i += 1;
           auto tok = Token(current->span, TokenType::kGraph, next->data);
-          CHECK(tok.defined());
+          ICHECK(tok.defined());
           out.push_back(tok);
         } else {
-          CHECK(current.defined());
+          ICHECK(current.defined());
           out.push_back(current);
         }
         continue;
@@ -596,10 +596,10 @@ std::vector<Token> Condense(const std::vector<Token>& tokens, Token* table) {
           i += 1;
           // TODO(@jroesch): merge spans
           auto tok = Token(current->span, TokenType::kGlobal, next->data);
-          CHECK(tok.defined());
+          ICHECK(tok.defined());
           out.push_back(tok);
         } else {
-          CHECK(current.defined());
+          ICHECK(current.defined());
           out.push_back(current);
         }
         continue;
@@ -638,7 +638,7 @@ std::pair<std::vector<Token>, Token> Tokenize(const DiagnosticContext& ctx, cons
   Token meta_table(Span(), TokenType::kUnknown, ObjectRef());
   auto tokens = Condense(tokenizer.tokens, &meta_table);
   for (auto token : tokens) {
-    CHECK(token.defined());
+    ICHECK(token.defined());
   }
   return {tokens, meta_table};
 }
diff --git a/src/printer/doc.cc b/src/printer/doc.cc
index ab1eddb..4b22d54 100644
--- a/src/printer/doc.cc
+++ b/src/printer/doc.cc
@@ -85,7 +85,7 @@ class DocLine : public DocAtom {
 
 // DSL function implementations
 Doc& Doc::operator<<(const Doc& right) {
-  CHECK(this != &right);
+  ICHECK(this != &right);
   this->stream_.insert(this->stream_.end(), right.stream_.begin(), right.stream_.end());
   return *this;
 }
diff --git a/src/printer/meta_data.h b/src/printer/meta_data.h
index df27d92..233da1b 100644
--- a/src/printer/meta_data.h
+++ b/src/printer/meta_data.h
@@ -99,7 +99,7 @@ class TextMetaDataContext {
       return it->second;
     }
     std::string type_key = node->GetTypeKey();
-    CHECK(!type_key.empty());
+    ICHECK(!type_key.empty());
     Array<ObjectRef>& mvector = meta_data_[type_key];
     int64_t index = static_cast<int64_t>(mvector.size());
     mvector.push_back(node);
diff --git a/src/printer/relay_text_printer.cc b/src/printer/relay_text_printer.cc
index 555d335..4132ab1 100644
--- a/src/printer/relay_text_printer.cc
+++ b/src/printer/relay_text_printer.cc
@@ -322,7 +322,7 @@ Doc RelayTextPrinter::VisitExpr_(const ConstantNode* op) {
   if (op->is_scalar()) {
     std::ostringstream os;
     DataType dtype = DataType(op->data->dtype);
-    CHECK_EQ(op->data->ctx.device_type, kDLCPU);
+    ICHECK_EQ(op->data->ctx.device_type, kDLCPU);
     if (dtype == DataType::Int(32)) {
       return ScalarLiteral(dtype, static_cast<const int32_t*>(op->data->data)[0]);
     } else if (dtype == DataType::Int(64)) {
@@ -831,7 +831,7 @@ std::vector<Doc> RelayTextPrinter::PrintFuncAttrs(const Attrs& attrs) {
   std::vector<Doc> docs;
   if (!attrs.defined()) return docs;
   const auto* dict_attrs = attrs.as<DictAttrsNode>();
-  CHECK(dict_attrs);
+  ICHECK(dict_attrs);
   for (const auto& k : dict_attrs->dict) {
     Doc doc;
     doc << k.first << "=" << Print(k.second);
diff --git a/src/printer/tir_text_printer.cc b/src/printer/tir_text_printer.cc
index 7feb0b5..107817d 100644
--- a/src/printer/tir_text_printer.cc
+++ b/src/printer/tir_text_printer.cc
@@ -353,7 +353,7 @@ Doc TIRTextPrinter::VisitExpr_(const CallNode* op) {
   } else {
     // TODO(bohan): Print out the name by he global var in the module.
     auto* op_gvar = op->op.as<GlobalVarNode>();
-    CHECK(op_gvar != nullptr);
+    ICHECK(op_gvar != nullptr);
     doc << "@" << Doc::Text(op_gvar->name_hint) << "(";
   }
   std::vector<Doc> args;
diff --git a/src/printer/tvmscript_printer.cc b/src/printer/tvmscript_printer.cc
index 5add7c1..09f95e4 100644
--- a/src/printer/tvmscript_printer.cc
+++ b/src/printer/tvmscript_printer.cc
@@ -475,7 +475,7 @@ Doc TVMScriptPrinter::VisitExpr_(const CallNode* op) {
     doc << Doc::Text(ptr_op->name) << "(";
   } else {
     auto* op_gvar = op->op.as<GlobalVarNode>();
-    CHECK(op_gvar != nullptr);
+    ICHECK(op_gvar != nullptr);
     doc << Doc::Text(op_gvar->name_hint) << "(";
   }
   std::vector<Doc> args;
@@ -566,7 +566,7 @@ Doc TVMScriptPrinter::VisitStmt_(const AttrStmtNode* op) {
   // concise thread env
   if (op->node->IsInstance<IterVarNode>() && op->attr_key == "thread_extent") {
     const auto* iter_var = Downcast<IterVar>(op->node).get();
-    CHECK(!iter_var->dom.defined());
+    ICHECK(!iter_var->dom.defined());
     var_not_in_headers.insert(iter_var->var.get());
     var_env_map_[iter_var->var] = iter_var->thread_tag;
     if (current_num_ != num_child_ - 1) {
@@ -890,7 +890,7 @@ Doc TVMScriptPrinter::PrintBuffer(const BufferNode* op) {
 TVM_REGISTER_GLOBAL("script.AsTVMScript")
     .set_body_typed<std::string(const ObjectRef&, bool)>([](const ObjectRef& functions,
                                                             bool show_meta) {
-      CHECK(functions.as<PrimFuncNode>() != nullptr || functions.as<IRModuleNode>() != nullptr);
+      ICHECK(functions.as<PrimFuncNode>() != nullptr || functions.as<IRModuleNode>() != nullptr);
       return "@tvm.script.tir\n" + TVMScriptPrinter(show_meta).Print(functions).str() + "\n";
     });
 
diff --git a/src/relay/analysis/annotated_region_set.cc b/src/relay/analysis/annotated_region_set.cc
index 587add3..04a18c4 100644
--- a/src/relay/analysis/annotated_region_set.cc
+++ b/src/relay/analysis/annotated_region_set.cc
@@ -119,7 +119,7 @@ class AnnotatedRegionSet::Creator : protected MixedModeVisitor {
       }
 
       auto arg_region = region_set_->GetRegion(arg);
-      CHECK_EQ(region.defined(), arg_region.defined())
+      ICHECK_EQ(region.defined(), arg_region.defined())
           << "Arg regions are inconsistent: " << AsText(expr);
       if (region.defined() && region != arg_region) {
         region_set_->MergeRegions(arg_region, region);
@@ -137,21 +137,21 @@ class AnnotatedRegionSet::Creator : protected MixedModeVisitor {
       AddToArgRegion(GetRef<Call>(call), call->args);
     } else if (call->op == begin_op_) {
       // The annotation node is inserted on edge so it must have only one argument.
-      CHECK_EQ(call->args.size(), 1U);
+      ICHECK_EQ(call->args.size(), 1U);
       std::string target = call->attrs.as<CompilerAttrs>()->compiler;
 
       // Check if the argument already belongs to a region
       auto region = region_set_->GetRegion(GetRef<Call>(call));
-      CHECK(!region.defined());
+      ICHECK(!region.defined());
 
       // Create a new region.
       region = region_set_->MakeRegion(target);
       region->nodes_.insert(GetRef<Call>(call));
       region->ins_.push_back(GetRef<Call>(call));
     } else {
-      CHECK_EQ(call->op, end_op_);
+      ICHECK_EQ(call->op, end_op_);
       // The annotation node is inserted on edge so it must have only one argument.
-      CHECK_EQ(call->args.size(), 1U);
+      ICHECK_EQ(call->args.size(), 1U);
       std::string target = call->attrs.as<CompilerAttrs>()->compiler;
 
       // Check if the argument already belongs to a region
@@ -162,7 +162,7 @@ class AnnotatedRegionSet::Creator : protected MixedModeVisitor {
       } else {
         // If the argument is belonged to a region, it must have the same target.
         // Otherwise we should see a region_begin op.
-        CHECK_EQ(region->GetTarget(), target);
+        ICHECK_EQ(region->GetTarget(), target);
       }
       region->nodes_.insert(GetRef<Call>(call));
       region->outs_.push_back(GetRef<Call>(call));
diff --git a/src/relay/analysis/annotated_region_set.h b/src/relay/analysis/annotated_region_set.h
index cbcf155..d9923cc 100644
--- a/src/relay/analysis/annotated_region_set.h
+++ b/src/relay/analysis/annotated_region_set.h
@@ -114,7 +114,7 @@ class AnnotatedRegion : public ObjectRef {
   /*! \return Mutable pointers to the node. */
   AnnotatedRegionNode* operator->() const {
     auto* ptr = get_mutable();
-    CHECK(ptr != nullptr);
+    ICHECK(ptr != nullptr);
     return static_cast<AnnotatedRegionNode*>(ptr);
   }
 };
@@ -216,39 +216,39 @@ class AnnotatedRegionSet : public ObjectRef {
   /*! \return The begin iterator. */
   iterator begin() {
     auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->begin();
   }
   /*! \return The end iterator. */
   iterator end() {
     auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->end();
   }
   /*! \return The begin iterator. */
   const_iterator begin() const {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->begin();
   }
   /*! \return The end iterator. */
   const_iterator end() const {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->end();
   }
 
   /*! \return mutable pointers to the node. */
   AnnotatedRegionSetNode* operator->() const {
     auto* ptr = get_mutable();
-    CHECK(ptr != nullptr);
+    ICHECK(ptr != nullptr);
     return static_cast<AnnotatedRegionSetNode*>(ptr);
   }
 
   /*! \return The region an expression belongs to. */
   AnnotatedRegion operator[](const Expr& expr) {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->GetRegion(expr);
   }
 
diff --git a/src/relay/analysis/call_graph.cc b/src/relay/analysis/call_graph.cc
index 0d3fedc..9edb471 100644
--- a/src/relay/analysis/call_graph.cc
+++ b/src/relay/analysis/call_graph.cc
@@ -51,7 +51,7 @@ CallGraph::CallGraph(IRModule module) {
 }
 
 void CallGraphNode::AddToCallGraph(const GlobalVar& gv, const Function& func) {
-  CHECK(func.defined() && gv.defined());
+  ICHECK(func.defined() && gv.defined());
   // Add the current global function as an entry to the call grpah.
   CallGraphEntry* cg_node = LookupGlobalVar(gv);
 
@@ -73,20 +73,20 @@ void CallGraphNode::AddToCallGraph(const GlobalVar& gv, const Function& func) {
 
 const CallGraphEntry* CallGraphNode::operator[](const GlobalVar& gv) const {
   const_iterator cit = call_graph_.find(gv);
-  CHECK(cit != call_graph_.end()) << "GlobalVar " << gv->name_hint
-                                  << " not found in the call graph!";
+  ICHECK(cit != call_graph_.end())
+      << "GlobalVar " << gv->name_hint << " not found in the call graph!";
   return cit->second.get();
 }
 
 CallGraphEntry* CallGraphNode::operator[](const GlobalVar& gv) {
   const_iterator cit = call_graph_.find(gv);
-  CHECK(cit != call_graph_.end()) << "GlobalVar " << gv->name_hint
-                                  << " not found in the call graph!";
+  ICHECK(cit != call_graph_.end())
+      << "GlobalVar " << gv->name_hint << " not found in the call graph!";
   return cit->second.get();
 }
 
 BaseFunc CallGraphNode::GetGlobalFunction(const GlobalVar& var) const {
-  CHECK(module->ContainGlobalVar(var->name_hint))
+  ICHECK(module->ContainGlobalVar(var->name_hint))
       << "GlobalVar " << var->name_hint << " not found in the current ir module";
   return module->Lookup(var);
 }
@@ -94,13 +94,13 @@ BaseFunc CallGraphNode::GetGlobalFunction(const GlobalVar& var) const {
 // Query the existence of a GlobalVar in the call graph. It creates an entry if
 // there is no such node available.
 CallGraphEntry* CallGraphNode::LookupGlobalVar(const GlobalVar& gv) {
-  CHECK(gv.defined());
+  ICHECK(gv.defined());
 
   // This inserts an element to the call graph if it is not there yet.
   auto& call_graph_node = call_graph_[gv];
   if (call_graph_node) return call_graph_node.get();
 
-  CHECK(module->ContainGlobalVar(gv->name_hint))
+  ICHECK(module->ContainGlobalVar(gv->name_hint))
       << "GlobalVar " << gv->name_hint << " not found in the current ir module";
 
   // Create the node for the inserted entry.
@@ -118,7 +118,7 @@ void CallGraphNode::Print(std::ostream& os) const {
 
 GlobalVar CallGraphNode::RemoveGlobalVarFromModule(CallGraphEntry* cg_node,
                                                    bool update_call_graph) {
-  CHECK(cg_node->empty() || (cg_node->IsRecursive() && cg_node->size() == 1))
+  ICHECK(cg_node->empty() || (cg_node->IsRecursive() && cg_node->size() == 1))
       << "Cannot remove global var " << cg_node->GetNameHint()
       << " from call graph, because it still calls " << cg_node->size()
       << " other global functions";
@@ -232,7 +232,7 @@ inline void CallGraphEntry::AddCalledGlobal(CallGraphEntry* cg_node) {
 // Remove an edge from the current global function to the callee.
 void CallGraphEntry::RemoveCallTo(const GlobalVar& callee) {
   for (auto it = begin();; ++it) {
-    CHECK(it != end()) << "Cannot find global function " << callee->name_hint << " to remove!";
+    ICHECK(it != end()) << "Cannot find global function " << callee->name_hint << " to remove!";
     if (it->second->GetGlobalVar() == callee) {
       // Only remove one occurrence of the call site.
       it->second->DecRef();
@@ -256,7 +256,7 @@ void CallGraphEntry::RemoveAllCallTo(CallGraphEntry* callee) {
     }
   }
   // Make sure all references to the callee are removed.
-  CHECK_EQ(callee->GetRefCount(), 0U)
+  ICHECK_EQ(callee->GetRefCount(), 0U)
       << "All references to " << callee->GetNameHint() << " should have been removed";
 }
 
@@ -291,7 +291,7 @@ TVM_REGISTER_NODE_TYPE(CallGraphNode);
 TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
     .set_dispatch<CallGraphNode>([](const ObjectRef& ref, ReprPrinter* p) {
       auto* node = static_cast<const CallGraphNode*>(ref.get());
-      CHECK(node);
+      ICHECK(node);
       p->stream << "CallGraph: \n" << GetRef<CallGraph>(node);
     });
 
diff --git a/src/relay/analysis/call_graph.h b/src/relay/analysis/call_graph.h
index 07b2527..7cc813e 100644
--- a/src/relay/analysis/call_graph.h
+++ b/src/relay/analysis/call_graph.h
@@ -218,25 +218,25 @@ class CallGraph : public ObjectRef {
   /*! \return The begin iterator. */
   iterator begin() {
     auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->begin();
   }
   /*! \return The end iterator. */
   iterator end() {
     auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->end();
   }
   /*! \return The begin iterator. */
   const_iterator begin() const {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->begin();
   }
   /*! \return The end iterator. */
   const_iterator end() const {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return n->end();
   }
 
@@ -249,7 +249,7 @@ class CallGraph : public ObjectRef {
    */
   const CallGraphEntry* operator[](const GlobalVar& gv) const {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return (*n)[gv];
   }
   /*!
@@ -261,7 +261,7 @@ class CallGraph : public ObjectRef {
    */
   CallGraphEntry* operator[](const GlobalVar& gv) {
     auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return (*n)[gv];
   }
   /*!
@@ -273,7 +273,7 @@ class CallGraph : public ObjectRef {
    */
   const CallGraphEntry* operator[](const std::string& gvar_name) const {
     const auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return (*n)[gvar_name];
   }
   /*!
@@ -285,14 +285,14 @@ class CallGraph : public ObjectRef {
    */
   CallGraphEntry* operator[](const std::string& gvar_name) {
     auto* n = operator->();
-    CHECK(n);
+    ICHECK(n);
     return (*n)[gvar_name];
   }
 
   /*! \return mutable pointers to the node. */
   CallGraphNode* operator->() const {
     auto* ptr = get_mutable();
-    CHECK(ptr != nullptr);
+    ICHECK(ptr != nullptr);
     return static_cast<CallGraphNode*>(ptr);
   }
 
@@ -360,7 +360,7 @@ class CallGraphEntry {
    * \return The fetched CallGraphEntry.
    */
   CallGraphEntry* operator[](size_t i) const {
-    CHECK_LT(i, called_globals_.size()) << "Invalid Index";
+    ICHECK_LT(i, called_globals_.size()) << "Invalid Index";
     return called_globals_[i].second;
   }
 
@@ -452,7 +452,7 @@ class CallGraphEntry {
  private:
   /*! \brief Decrement the reference counter by 1. */
   void DecRef() {
-    CHECK_GT(ref_cnt_, 0);
+    ICHECK_GT(ref_cnt_, 0);
     --ref_cnt_;
   }
   /*! \brief Increment the reference counter by 1. */
diff --git a/src/relay/analysis/context_analysis.cc b/src/relay/analysis/context_analysis.cc
index 5fbd8a4..a648b7a 100644
--- a/src/relay/analysis/context_analysis.cc
+++ b/src/relay/analysis/context_analysis.cc
@@ -151,7 +151,7 @@ DeviceDomainPtr Join(const DeviceDomainPtr& lhs, const DeviceDomainPtr& rhs) {
   } else if (rhs->IsEmptyDomain()) {
     return lhs;
   } else {
-    CHECK(*lhs.get() == *rhs.get()) << "All expressions must have a singular device to unify";
+    ICHECK(*lhs.get() == *rhs.get()) << "All expressions must have a singular device to unify";
     return lhs;
   }
 }
@@ -311,7 +311,7 @@ class ContextAnalyzer : public MixedModeVisitor {
       auto ty = let->value->checked_type();
       if (ty->IsInstance<FuncTypeNode>()) {
         auto gv = ExtractClosure(let);
-        CHECK(gv.defined() && gv->IsInstance<GlobalVarNode>());
+        ICHECK(gv.defined() && gv->IsInstance<GlobalVarNode>());
         closures_[let->var] = Downcast<GlobalVar>(gv);
       }
 
@@ -444,7 +444,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   // Process device copy call node
   void UnifyDeviceCopyCall(const CallNode* call) {
-    CHECK_EQ(call->args.size(), 1U);
+    ICHECK_EQ(call->args.size(), 1U);
 
     std::vector<Expr> inps{call->args[0]};
     std::vector<Expr> outs{GetRef<Call>(call)};
@@ -455,13 +455,13 @@ class ContextAnalyzer : public MixedModeVisitor {
       inps.push_back(fn->params[0]);
       outs.push_back(call->op);
       Expr body = fn->body;
-      CHECK(body->IsInstance<CallNode>() && IsDeviceCopy(body));
+      ICHECK(body->IsInstance<CallNode>() && IsDeviceCopy(body));
       Call call_body = Downcast<Call>(body);
       attrs = call_body->attrs.as<DeviceCopyAttrs>();
     } else {
       attrs = call->attrs.as<DeviceCopyAttrs>();
     }
-    CHECK(attrs != nullptr);
+    ICHECK(attrs != nullptr);
     src_dev_type = static_cast<DLDeviceType>(attrs->src_dev_type);
     dst_dev_type = static_cast<DLDeviceType>(attrs->dst_dev_type);
 
@@ -474,7 +474,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   void UnifyAllocStorageCall(const CallNode* call) {
     // [size, alignment]
-    CHECK_EQ(call->args.size(), 2U);
+    ICHECK_EQ(call->args.size(), 2U);
 
     // The arguments of alloc storage should be on CPU.
     for (int i = 0; i < 2; i++) {
@@ -490,7 +490,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   void UnifyAllocTensorCall(const CallNode* call) {
     // [storage, offset, shape]
-    CHECK_EQ(call->args.size(), 3U);
+    ICHECK_EQ(call->args.size(), 3U);
 
     Expr storage = call->args[0];
     Expr shape = call->args[1];
@@ -503,7 +503,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   void UnifyShapeFuncCall(const CallNode* call) {
     // [func, inputs, outputs]
-    CHECK_EQ(call->args.size(), 3U);
+    ICHECK_EQ(call->args.size(), 3U);
     auto shape_func_domain = DeviceType(cpu_ctx_);
 
     // No need to unify the op of a shape_func as shape_func doesn't
@@ -523,7 +523,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   void UnifyInvokeTVMOpCall(const CallNode* call) {
     // [op, inputs, outputs]
-    CHECK_EQ(call->args.size(), 3U);
+    ICHECK_EQ(call->args.size(), 3U);
     Tuple inps = Downcast<Tuple>(call->args[1]);
     Tuple outputs = Downcast<Tuple>(call->args[2]);
     UnifyCall(call->args[0], inps->fields, outputs->fields, Bottom());
@@ -532,7 +532,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   void UnifyShapeOfCall(const CallNode* call) {
     // vm shape_of is always on the CPU.
-    CHECK_EQ(call->args.size(), 1U);
+    ICHECK_EQ(call->args.size(), 1U);
     MixedModeVisitor::VisitExpr(call->args[0]);
     // Note we don't unify the input of a shape_of with the cpu domain. This is
     // because vm.shape_of has a native instruction to compute the shape of
@@ -544,7 +544,7 @@ class ContextAnalyzer : public MixedModeVisitor {
 
   void UnifyReshapeTensorCall(const CallNode* call) {
     // [data, shape]
-    CHECK_EQ(call->args.size(), 2U);
+    ICHECK_EQ(call->args.size(), 2U);
     Expr data = call->args[0];
     Expr shape = call->args[1];
     Unify(DeviceFor(GetRef<Call>(call)), DeviceFor(data));
@@ -583,10 +583,10 @@ class ContextAnalyzer : public MixedModeVisitor {
   // Invoke a global function.
   void UnifyGlobalVarCall(const CallNode* call) {
     auto device = DeviceFor(GetRef<Call>(call));
-    CHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module";
+    ICHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module";
     GlobalVar gv = Downcast<GlobalVar>(call->op);
     auto func = Downcast<Function>(mod_->Lookup(gv));
-    CHECK_EQ(call->args.size(), func->params.size())
+    ICHECK_EQ(call->args.size(), func->params.size())
         << "The number of arguments doesn't match the number of parameters of the function.";
 
     for (size_t i = 0; i < call->args.size(); i++) {
@@ -596,14 +596,14 @@ class ContextAnalyzer : public MixedModeVisitor {
 
       // Save the the arg to function mapping for closures as it will
       // be invoked/unified later.
-      CHECK(arg->checked_type().defined())
+      ICHECK(arg->checked_type().defined())
           << "Type inference is required to run the context analysis passes.";
       if (arg->checked_type()->IsInstance<FuncTypeNode>()) {
         auto it = closures_.find(arg);
         if (it != closures_.end()) {
           closures_[param] = it->second;
         } else {
-          CHECK(arg->IsInstance<GlobalVarNode>());
+          ICHECK(arg->IsInstance<GlobalVarNode>());
           closures_[param] = Downcast<GlobalVar>(arg);
         }
       }
@@ -631,9 +631,9 @@ class ContextAnalyzer : public MixedModeVisitor {
     // Unify the corresponding arguement and parameter.
     auto device = DeviceFor(GetRef<Call>(call));
     auto it = closures_.find(call->op);
-    CHECK(it != closures_.end()) << "Cannot find var: " << call->op;
+    ICHECK(it != closures_.end()) << "Cannot find var: " << call->op;
     auto glb_var = it->second;
-    CHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module";
+    ICHECK(mod_.defined()) << "Cannot analyze context on a globalvar without module";
     Function func = Downcast<Function>(mod_->Lookup(glb_var));
     // Unify the underlying function for clousre or currying functions.
     while (IsClosure(func) || IsCurrying(func)) {
@@ -648,7 +648,7 @@ class ContextAnalyzer : public MixedModeVisitor {
       }
     }
 
-    CHECK_EQ(call->args.size(), func->params.size());
+    ICHECK_EQ(call->args.size(), func->params.size());
     for (size_t i = 0; i < call->args.size(); i++) {
       Unify(DeviceFor(call->args[i]), DeviceFor(func->params[i]));
       MixedModeVisitor::VisitExpr(call->args[i]);
diff --git a/src/relay/analysis/dependency_graph.cc b/src/relay/analysis/dependency_graph.cc
index de61800..3a4fb59 100644
--- a/src/relay/analysis/dependency_graph.cc
+++ b/src/relay/analysis/dependency_graph.cc
@@ -50,7 +50,7 @@ class DependencyGraph::Creator : private ExprFunctor<void(const Expr& e)> {
   void Depend(DependencyGraph::Node* parent, const Expr& child) {
     VisitExpr(child);
 
-    CHECK_NE(graph_.expr_node.count(child), 0);
+    ICHECK_NE(graph_.expr_node.count(child), 0);
 
     Depend(parent, graph_.expr_node[child]);
   }
diff --git a/src/relay/analysis/feature.cc b/src/relay/analysis/feature.cc
index b3516e9..f72b4e1 100644
--- a/src/relay/analysis/feature.cc
+++ b/src/relay/analysis/feature.cc
@@ -114,7 +114,7 @@ std::string FeatureSet::ToString() const {
   DETECT_FEATURE(fGraph);
   DETECT_FEATURE(fLetRec);
 #undef DETECT_FEATURE
-  CHECK(detected == feature_count) << "some feature not printed";
+  ICHECK(detected == feature_count) << "some feature not printed";
   ret += "]";
   return ret;
 }
@@ -139,8 +139,8 @@ TVM_REGISTER_GLOBAL("relay.analysis.detect_feature").set_body_typed(PyDetectFeat
 
 void CheckFeature(const Expr& expr, const FeatureSet& fs) {
   auto dfs = DetectFeature(expr);
-  CHECK(dfs.is_subset_of(fs)) << AsText(expr, false)
-                              << "\nhas unsupported feature: " << (dfs - fs).ToString();
+  ICHECK(dfs.is_subset_of(fs)) << AsText(expr, false)
+                               << "\nhas unsupported feature: " << (dfs - fs).ToString();
 }
 
 void CheckFeature(const IRModule& mod, const FeatureSet& fs) {
diff --git a/src/relay/analysis/get_calibration_data.cc b/src/relay/analysis/get_calibration_data.cc
index 34d0d00..70fe2a6 100644
--- a/src/relay/analysis/get_calibration_data.cc
+++ b/src/relay/analysis/get_calibration_data.cc
@@ -52,7 +52,7 @@ class Collector : public ExprRewriter {
     // intrinsic functions are excluded for now
     if (call->op->IsInstance<GlobalVarNode>()) {
       auto var = Downcast<GlobalVar>(call->op);
-      CHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined";
+      ICHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined";
       // we only handle functions with Compiler attribute set
       auto func = Downcast<Function>(module_->Lookup(var));
       if (func->GetAttr<String>(attr::kCompiler)) {
@@ -74,10 +74,10 @@ class Collector : public ExprRewriter {
 Expr FlattenOutputTuple(const Array<Expr>& exprs) {
   Array<Expr> fields;
   for (const auto& it : exprs) {
-    CHECK(it->checked_type_.defined());
+    ICHECK(it->checked_type_.defined());
     if (auto* tn = it->checked_type_.as<TupleTypeNode>()) {
       // TODO(seanlatias): for now input argument cannot be a tuple
-      CHECK(it->IsInstance<CallNode>());
+      ICHECK(it->IsInstance<CallNode>());
       for (size_t i = 0; i < tn->fields.size(); i++) {
         fields.push_back(TupleGetItem(it, i));
       }
@@ -140,8 +140,8 @@ class OutputMapper : public ExprRewriter {
   Expr Rewrite_(const CallNode* call, const Expr& post) final {
     if (call->op->IsInstance<GlobalVarNode>()) {
       auto var = Downcast<GlobalVar>(call->op);
-      CHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined";
-      CHECK_EQ(output_map_->count(var), 0)
+      ICHECK(module_->ContainGlobalVar(var->name_hint)) << "Function " << var << " is not defined";
+      ICHECK_EQ(output_map_->count(var), 0)
           << "Repeated function call " << var << " is not supported.";
       auto func = Downcast<Function>(module_->Lookup(var));
       // we only handle functions with Compiler attribute set
diff --git a/src/relay/analysis/mac_count.cc b/src/relay/analysis/mac_count.cc
index 5e35ab7..29edf55 100644
--- a/src/relay/analysis/mac_count.cc
+++ b/src/relay/analysis/mac_count.cc
@@ -65,24 +65,24 @@ int64_t ConvMacCount(const Call& call_node) {
     return 0;
   }
   Array<Expr> args = call_node->args;
-  CHECK_EQ(args.size(), 2) << "The number of input arguments of a CONV 2D node should be 2.";
+  ICHECK_EQ(args.size(), 2) << "The number of input arguments of a CONV 2D node should be 2.";
   const auto* conv_2d_attr = call_node->attrs.as<Conv2DAttrs>();
   const auto* data_type = args[0]->checked_type().as<TensorTypeNode>();
   Array<IndexExpr> data_shape = data_type->shape;
   std::string data_layout = conv_2d_attr->data_layout;
   int32_t C_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('C'));
   int32_t c_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('c'));
-  CHECK_NE(C_ind, -1) << "There is no input channel dimension.";
+  ICHECK_NE(C_ind, -1) << "There is no input channel dimension.";
   int64_t input_channel = static_cast<int64_t>(data_shape[C_ind].as<IntImmNode>()->value);
   if (c_ind != -1) input_channel *= static_cast<int64_t>(data_shape[c_ind].as<IntImmNode>()->value);
   Array<IndexExpr> kernel_size = conv_2d_attr->kernel_size;
-  CHECK_EQ(kernel_size.size(), 2) << "The dimension of the kernel in Conv 2D should be 2.";
+  ICHECK_EQ(kernel_size.size(), 2) << "The dimension of the kernel in Conv 2D should be 2.";
   const auto* expr = call_node->checked_type().as<TensorTypeNode>();
   Array<IndexExpr> output_tensor = expr->shape;
-  CHECK(output_tensor.size() == 4 || output_tensor.size() == 5)
+  ICHECK(output_tensor.size() == 4 || output_tensor.size() == 5)
       << "The dimension of the output tensor in Conv 2D should be 4 or 5.";
   int64_t count = GetCartesianProd(output_tensor) * GetCartesianProd(kernel_size);
-  CHECK_EQ(input_channel % conv_2d_attr->groups, 0)
+  ICHECK_EQ(input_channel % conv_2d_attr->groups, 0)
       << "The number of input channels is not divisble by groups.";
   count *= input_channel / conv_2d_attr->groups;
   return count;
@@ -94,7 +94,7 @@ int64_t Conv2dTransposeMacCount(const Call& call_node) {
     return 0;
   }
   Array<Expr> args = call_node->args;
-  CHECK_EQ(args.size(), 2)
+  ICHECK_EQ(args.size(), 2)
       << "The number of input arguments of a CONV 2D Transpose node should be 2.";
   const auto* conv_2d_transpose_attr = call_node->attrs.as<Conv2DTransposeAttrs>();
   const auto* data_type = args[0]->checked_type().as<TensorTypeNode>();
@@ -102,18 +102,18 @@ int64_t Conv2dTransposeMacCount(const Call& call_node) {
   std::string data_layout = conv_2d_transpose_attr->data_layout;
   int32_t C_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('C'));
   int32_t c_ind = Layout(data_layout).IndexOf(LayoutAxis::Get('c'));
-  CHECK_NE(C_ind, -1) << "There is no input channel dimension.";
+  ICHECK_NE(C_ind, -1) << "There is no input channel dimension.";
   int64_t input_channel = static_cast<int64_t>(data_shape[C_ind].as<IntImmNode>()->value);
   if (c_ind != -1) input_channel *= static_cast<int64_t>(data_shape[c_ind].as<IntImmNode>()->value);
   Array<IndexExpr> kernel_size = conv_2d_transpose_attr->kernel_size;
-  CHECK_EQ(kernel_size.size(), 2)
+  ICHECK_EQ(kernel_size.size(), 2)
       << "The dimension of the kernel in Conv 2D Transpose should be 2.";
   const auto* expr = call_node->checked_type().as<TensorTypeNode>();
   Array<IndexExpr> output_tensor = expr->shape;
-  CHECK(output_tensor.size() == 4 || output_tensor.size() == 5)
+  ICHECK(output_tensor.size() == 4 || output_tensor.size() == 5)
       << "The dimension of the output tensor in Conv 2D Transpose should be 4 or 5.";
   int64_t count = GetCartesianProd(output_tensor) * GetCartesianProd(kernel_size);
-  CHECK_EQ(input_channel % conv_2d_transpose_attr->groups, 0)
+  ICHECK_EQ(input_channel % conv_2d_transpose_attr->groups, 0)
       << "The number of input channels is not divisble by groups.";
   count *= input_channel / conv_2d_transpose_attr->groups;
   return count;
@@ -125,18 +125,18 @@ int64_t DenseMacCount(const Call& call_node) {
     return 0;
   }
   Array<Expr> args = call_node->args;
-  CHECK_EQ(args.size(), 2) << "The number of input arguments of a Dense node should be 2.";
+  ICHECK_EQ(args.size(), 2) << "The number of input arguments of a Dense node should be 2.";
   const auto* data_type = args[0]->checked_type().as<TensorTypeNode>();
   const auto* weight_type = args[1]->checked_type().as<TensorTypeNode>();
   Array<IndexExpr> data_shape = data_type->shape;
   Array<IndexExpr> weight_shape = weight_type->shape;
-  CHECK(data_shape.size() == 2 && weight_shape.size() == 2)
+  ICHECK(data_shape.size() == 2 && weight_shape.size() == 2)
       << "The dimension of an input tensor to Dense node should be 2.";
   int64_t d1 = static_cast<int64_t>(data_shape[0].as<IntImmNode>()->value);
   int64_t d2 = static_cast<int64_t>(data_shape[1].as<IntImmNode>()->value);
   int64_t d3 = static_cast<int64_t>(weight_shape[0].as<IntImmNode>()->value);
   int64_t d4 = static_cast<int64_t>(weight_shape[1].as<IntImmNode>()->value);
-  CHECK_EQ(d2, d4) << "The dimensions of input arguments do not match.";
+  ICHECK_EQ(d2, d4) << "The dimensions of input arguments do not match.";
   int64_t count = d1 * d2 * d3;
   return count;
 }
@@ -147,7 +147,7 @@ int64_t BatchMatmulMacCount(const Call& call_node) {
     return 0;
   }
   Array<Expr> args = call_node->args;
-  CHECK_EQ(args.size(), 2);
+  ICHECK_EQ(args.size(), 2);
   Array<IndexExpr> x_shape = args[0]->checked_type().as<TensorTypeNode>()->shape;
   Array<IndexExpr> y_shape = args[1]->checked_type().as<TensorTypeNode>()->shape;
   int64_t batch = x_shape[0].as<IntImmNode>()->value;
diff --git a/src/relay/analysis/match_exhaustion.cc b/src/relay/analysis/match_exhaustion.cc
index e852c40..bb6e8f1 100644
--- a/src/relay/analysis/match_exhaustion.cc
+++ b/src/relay/analysis/match_exhaustion.cc
@@ -68,7 +68,7 @@ class CandidateChecker : public PatternFunctor<MatchResult(const Pattern&, const
     }
 
     // now check that subpatterns match
-    CHECK_EQ(op->patterns.size(), ctor_cand->patterns.size());
+    ICHECK_EQ(op->patterns.size(), ctor_cand->patterns.size());
     bool unspecified = false;
     for (size_t i = 0; i < op->patterns.size(); i++) {
       MatchResult submatch = this->Check(op->patterns[i], ctor_cand->patterns[i]);
@@ -95,7 +95,7 @@ class CandidateChecker : public PatternFunctor<MatchResult(const Pattern&, const
     }
 
     // now check that subpatterns match
-    CHECK_EQ(op->patterns.size(), tuple_cand->patterns.size());
+    ICHECK_EQ(op->patterns.size(), tuple_cand->patterns.size());
     bool unspecified = false;
     for (size_t i = 0; i < op->patterns.size(); i++) {
       MatchResult submatch = this->Check(op->patterns[i], tuple_cand->patterns[i]);
@@ -126,7 +126,7 @@ class CandidateChecker : public PatternFunctor<MatchResult(const Pattern&, const
 
 // Returns list of arrays corresponding to Cartesian product of input list
 Array<Array<Pattern>> CartesianProduct(Array<Array<Pattern>> fields) {
-  CHECK_NE(fields.size(), 0);
+  ICHECK_NE(fields.size(), 0);
   Array<Pattern> field_vals = fields[fields.size() - 1];
   Array<Array<Pattern>> ret;
 
diff --git a/src/relay/analysis/type_solver.cc b/src/relay/analysis/type_solver.cc
index 8c1cc92..55f7368 100644
--- a/src/relay/analysis/type_solver.cc
+++ b/src/relay/analysis/type_solver.cc
@@ -114,14 +114,14 @@ class TypeSolver::Unifier : public TypeFunctor<Type(const Type&, const Type&)> {
     }
 
     if (lhs->resolved_type.as<IncompleteTypeNode>()) {
-      CHECK(!OccursCheck(lhs, rhs->resolved_type))
+      ICHECK(!OccursCheck(lhs, rhs->resolved_type))
           << "Incomplete type " << lhs->resolved_type << " occurs in " << rhs->resolved_type
           << ", cannot unify";
 
       solver_->MergeFromTo(lhs, rhs);
       return rhs->resolved_type;
     } else if (rhs->resolved_type.as<IncompleteTypeNode>()) {
-      CHECK(!OccursCheck(rhs, lhs->resolved_type))
+      ICHECK(!OccursCheck(rhs, lhs->resolved_type))
           << "Incomplete type " << rhs->resolved_type << " occurs in " << lhs->resolved_type
           << ", cannot unify";
       solver_->MergeFromTo(rhs, lhs);
@@ -242,7 +242,7 @@ class TypeSolver::Unifier : public TypeFunctor<Type(const Type&, const Type&)> {
 
     std::vector<std::tuple<size_t, IndexExpr, IndexExpr>> mismatches;
 
-    CHECK_EQ(tt1->shape.size(), tt2->shape.size());
+    ICHECK_EQ(tt1->shape.size(), tt2->shape.size());
     for (size_t i = 0; i < tt1->shape.size(); i++) {
       auto dim = UnifyDim(tt1->shape[i], tt2->shape[i]);
       if (!dim.defined()) {
@@ -328,8 +328,8 @@ class TypeSolver::Unifier : public TypeFunctor<Type(const Type&, const Type&)> {
     for (size_t i = 0; i < ft1->type_constraints.size(); ++i) {
       Type unified_constraint = Unify(ft1->type_constraints[i], ft2->type_constraints[i]);
       const auto* tcn = unified_constraint.as<TypeConstraintNode>();
-      CHECK(tcn) << "Two type constraints unified into a non-constraint?"
-                 << ft1->type_constraints[i] << " and " << ft2->type_constraints[i];
+      ICHECK(tcn) << "Two type constraints unified into a non-constraint?"
+                  << ft1->type_constraints[i] << " and " << ft2->type_constraints[i];
       type_constraints.push_back(GetRef<TypeConstraint>(tcn));
     }
 
@@ -527,7 +527,7 @@ TypeSolver::TypeSolver(const GlobalVar& current_func, DiagnosticContext diag_ctx
       current_func(current_func),
       diag_ctx_(diag_ctx),
       module_(diag_ctx->module) {
-  CHECK(module_.defined());
+  ICHECK(module_.defined());
 }
 
 // destructor
@@ -593,12 +593,12 @@ bool TypeSolver::Solve() {
     RelationNode* rnode = update_queue_.front();
     const auto& rel = rnode->rel;
     update_queue_.pop();
-    CHECK(!rnode->resolved);
+    ICHECK(!rnode->resolved);
     // update the relation with given evidence.
     Array<Type> args;
     for (auto* tlink = rnode->type_list.head; tlink != nullptr; tlink = tlink->next) {
       args.push_back(Resolve(tlink->value->FindRoot()->resolved_type));
-      CHECK_LE(args.size(), rel->args.size());
+      ICHECK_LE(args.size(), rel->args.size());
     }
 
     // We need to set this in order to understand where unification
diff --git a/src/relay/analysis/type_solver.h b/src/relay/analysis/type_solver.h
index 1fc0525..4ae2e6a 100644
--- a/src/relay/analysis/type_solver.h
+++ b/src/relay/analysis/type_solver.h
@@ -208,7 +208,7 @@ class TypeSolver {
    */
   void AddToQueue(RelationNode* rel) {
     if (rel->inqueue) return;
-    CHECK(!rel->resolved);
+    ICHECK(!rel->resolved);
     rel->inqueue = true;
     update_queue_.push(rel);
   }
diff --git a/src/relay/analysis/util.cc b/src/relay/analysis/util.cc
index edf8fb6..bcfbc83 100644
--- a/src/relay/analysis/util.cc
+++ b/src/relay/analysis/util.cc
@@ -358,9 +358,9 @@ std::unordered_map<const Object*, size_t> GetExprRefCount(const Expr& body) {
 
 template <typename T>
 bool IsNDArrayAllGreaterEqual(const runtime::NDArray& tensor, T value) {
-  CHECK_EQ(tensor->ctx.device_type, kDLCPU);
-  CHECK(tensor->strides == nullptr);
-  CHECK_EQ(tensor->byte_offset, 0);
+  ICHECK_EQ(tensor->ctx.device_type, kDLCPU);
+  ICHECK(tensor->strides == nullptr);
+  ICHECK_EQ(tensor->byte_offset, 0);
   const T* data = static_cast<const T*>(tensor->data);
   int64_t num_elems = 1;
   for (int i = 0; i < tensor->ndim; ++i) {
@@ -446,10 +446,10 @@ Expr TypeSubst(const Expr& expr, const tvm::Map<TypeVar, Type>& subst_map) {
    private:
     const tvm::Map<TypeVar, Type>& subst_map_;
   };
-  CHECK(WellFormed(expr));
+  ICHECK(WellFormed(expr));
   auto ret = TypeSubstMutator(subst_map).VisitExpr(expr);
-  CHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size());
-  CHECK(WellFormed(ret));
+  ICHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size());
+  ICHECK(WellFormed(ret));
   return ret;
 }
 
diff --git a/src/relay/analysis/well_formed.cc b/src/relay/analysis/well_formed.cc
index 0b6e043..856c5dc 100644
--- a/src/relay/analysis/well_formed.cc
+++ b/src/relay/analysis/well_formed.cc
@@ -59,9 +59,9 @@ class WellFormedChecker : private MixedModeVisitor, PatternVisitor {
     WellFormedChecker* wfc;
     explicit Scope(WellFormedChecker* wfc) : wfc(wfc) { wfc->scope.push_back({{}}); }
     ~Scope() {
-      CHECK_GE(wfc->scope.size(), 0);
+      ICHECK_GE(wfc->scope.size(), 0);
       for (const Var& v : wfc->scope.back()) {
-        CHECK_GE(wfc->current_bound.count(v), 0);
+        ICHECK_GE(wfc->current_bound.count(v), 0);
         wfc->current_bound.erase(v);
       }
       wfc->scope.pop_back();
@@ -73,7 +73,7 @@ class WellFormedChecker : private MixedModeVisitor, PatternVisitor {
       Illformed(Diagnostic::Error(v->span) << "the variable " << v->name_hint()
                                            << "is bound more then once, this is not valid IR");
     }
-    CHECK_GE(scope.size(), 0);
+    ICHECK_GE(scope.size(), 0);
     scope.back().insert(v);
     current_bound.insert(v);
     total_bound.insert(v);
@@ -120,14 +120,14 @@ class WellFormedChecker : private MixedModeVisitor, PatternVisitor {
   }
 
   void VisitExpr_(const CallNode* call) final {
-    CHECK(call->op.defined());
+    ICHECK(call->op.defined());
 
     for (auto arg : call->args) {
-      CHECK(arg.defined());
+      ICHECK(arg.defined());
     }
 
-    // CHECK(call->attrs.defined());
-    CHECK(call->type_args.defined());
+    // ICHECK(call->attrs.defined());
+    ICHECK(call->type_args.defined());
     MixedModeVisitor::VisitExpr_(call);
   }
 
diff --git a/src/relay/backend/build_module.cc b/src/relay/backend/build_module.cc
index 64f1253..ddea545 100644
--- a/src/relay/backend/build_module.cc
+++ b/src/relay/backend/build_module.cc
@@ -124,7 +124,7 @@ class RelayBuildModule : public runtime::ModuleNode {
           [sptr_to_self, this](TVMArgs args, TVMRetValue* rv) { *rv = this->GetModule(); });
     } else if (name == "build") {
       return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-        CHECK_EQ(args.num_args, 3);
+        ICHECK_EQ(args.num_args, 3);
         this->Build(args[0], args[1], args[2]);
       });
     } else if (name == "list_params") {
@@ -150,7 +150,7 @@ class RelayBuildModule : public runtime::ModuleNode {
       });
     } else if (name == "optimize") {
       return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-        CHECK_EQ(args.num_args, 2);
+        ICHECK_EQ(args.num_args, 2);
         *rv = this->Optimize(args[0], args[1], this->params_);
       });
     } else {
@@ -244,7 +244,7 @@ class RelayBuildModule : public runtime::ModuleNode {
     ICHECK(relay_module.defined()) << "The IRModule must be defined for the Relay compiler.";
 
     if (params.size()) {
-      CHECK(relay_module->ContainGlobalVar("main")) << "Missing the main entry function";
+      ICHECK(relay_module->ContainGlobalVar("main")) << "Missing the main entry function";
       GlobalVar main_glb_var = relay_module->GetGlobalVar("main");
       Function main_func = Downcast<Function>(relay_module->Lookup(main_glb_var));
       auto new_main = BindParamsByName(main_func, params);
@@ -319,7 +319,7 @@ class RelayBuildModule : public runtime::ModuleNode {
       Optional<Integer> opt_fallback_dev =
           pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast<int>(kDLCPU)));
       auto fallback_dev = opt_fallback_dev.value();
-      CHECK_GT(fallback_dev->value, 0U);
+      ICHECK_GT(fallback_dev->value, 0U);
       relay_module = RunDeviceAnnotationPass(relay_module, fallback_dev->value);
     }
 
@@ -335,7 +335,7 @@ class RelayBuildModule : public runtime::ModuleNode {
     relay_module = transform::Inline()(relay_module);
     relay_module = transform::InferType()(relay_module);
 
-    CHECK(relay_module.defined());
+    ICHECK(relay_module.defined());
 
     return relay_module;
   }
@@ -383,7 +383,7 @@ class RelayBuildModule : public runtime::ModuleNode {
     UpdateHeterogeneousInputs(fallback_device);
     auto rewrite = transform::RewriteAnnotatedOps(fallback_device);
     auto updated_module = rewrite(relay_module);
-    CHECK(updated_module.defined());
+    ICHECK(updated_module.defined());
 
     tvm::Map<Expr, Integer> device_map;
     for (const auto& it : updated_module->functions) {
@@ -408,11 +408,11 @@ class RelayBuildModule : public runtime::ModuleNode {
           break;
         }
         for (auto kv : annotation_map) {
-          CHECK_EQ(kv.second->value, dev_type) << "Expressions in the function are "
-                                               << "annotated with various device types,"
-                                               << "but not device copy operators "
-                                               << "found. Please check the "
-                                               << "RewriteAnnotation pass.";
+          ICHECK_EQ(kv.second->value, dev_type) << "Expressions in the function are "
+                                                << "annotated with various device types,"
+                                                << "but not device copy operators "
+                                                << "found. Please check the "
+                                                << "RewriteAnnotation pass.";
         }
         targets_.Set(0, CreateDefaultTarget(dev_type));
       }
diff --git a/src/relay/backend/compile_engine.cc b/src/relay/backend/compile_engine.cc
index d720e94..556687c 100644
--- a/src/relay/backend/compile_engine.cc
+++ b/src/relay/backend/compile_engine.cc
@@ -79,8 +79,8 @@ Array<IndexExpr> GetShape(const Array<IndexExpr>& shape) {
     const int64_t* pval = tir::as_const_int(val);
     if (pval != nullptr) {
 #ifndef TVM_INDEX_DEFAULT_I64
-      CHECK_LE(pval[0], std::numeric_limits<int32_t>::max());
-      CHECK_GE(pval[0], std::numeric_limits<int32_t>::min());
+      ICHECK_LE(pval[0], std::numeric_limits<int32_t>::max());
+      ICHECK_GE(pval[0], std::numeric_limits<int32_t>::min());
       res.push_back(IntImm(DataType::Int(32), *pval));
 #else
       res.push_back(val);
@@ -116,7 +116,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
         for (Type field : tuple_type->fields) {
           const auto* ttype = field.as<TensorTypeNode>();
           // TODO(@icemelon): Allow recursive tuple
-          CHECK(ttype != nullptr);
+          ICHECK(ttype != nullptr);
           tvm::te::Tensor tensor = tvm::te::placeholder(GetShape(ttype->shape), ttype->dtype);
           cache_node->inputs.push_back(tensor);
           inputs.push_back(tensor);
@@ -135,7 +135,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
       candidate_name = truncated_name.str();
     }
     cache_node->func_name = candidate_name;
-    CHECK(anchor_op_.defined());
+    ICHECK(anchor_op_.defined());
     // Fusion over tupled results may leave identity relationships
     // between inputs and outputs, and those should not be scheduled.
     // Hence schedule only non PlaceholderOp outputs.
@@ -148,7 +148,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
     te::Schedule schedule;
     // No need to register schedule for device copy op.
     if (anchor_attrs_.as<DeviceCopyAttrs>() == nullptr) {
-      CHECK(anchor_implementation_.defined());
+      ICHECK(anchor_implementation_.defined());
       schedule = anchor_implementation_.Schedule(anchor_attrs_, tensor_outs, target_);
       for (const auto& scalar : scalars_) {
         if (schedule->Contain(scalar)) {
@@ -167,7 +167,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
 
   Array<te::Tensor> VisitExpr_(const ConstantNode* op) final {
     using tir::make_const;
-    CHECK(op->is_scalar());
+    ICHECK(op->is_scalar());
     void* data = op->data->data;
     DataType dtype = DataType(op->data->dtype);
     auto value = te::compute(
@@ -196,7 +196,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
   Array<te::Tensor> VisitExpr_(const CallNode* call_node) final {
     static auto fpattern = Op::GetAttrMap<TOpPattern>("TOpPattern");
     static auto flower_call = tvm::runtime::Registry::Get("relay.backend.lower_call");
-    CHECK(flower_call) << "relay.backend.lower_call is not registered.";
+    ICHECK(flower_call) << "relay.backend.lower_call is not registered.";
 
     Array<te::Tensor> inputs;
     int count_tuple = 0;
@@ -209,10 +209,10 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
       }
     }
     if (count_tuple) {
-      CHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input";
+      ICHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input";
     }
 
-    CHECK(call_node->op.as<OpNode>()) << "Primitive function only allows call into primitive ops";
+    ICHECK(call_node->op.as<OpNode>()) << "Primitive function only allows call into primitive ops";
     Op op = Downcast<Op>(call_node->op);
 
     Array<te::Tensor> outputs;
@@ -229,7 +229,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
 
     int op_pattern = fpattern[op];
     if (op_pattern >= kCommReduce) {
-      CHECK(!anchor_op_.defined() || anchor_op_pattern_ < kCommReduce)
+      ICHECK(!anchor_op_.defined() || anchor_op_pattern_ < kCommReduce)
           << "Two complicated op in a primitive function "
           << " anchor=" << anchor_op_ << " current=" << op;
     }
@@ -241,8 +241,8 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
     }
     if (outputs.size() != 1) {
       const auto* tuple_type = call_node->checked_type().as<TupleTypeNode>();
-      CHECK(tuple_type) << "Expect output to be a tuple type";
-      CHECK_EQ(tuple_type->fields.size(), outputs.size());
+      ICHECK(tuple_type) << "Expect output to be a tuple type";
+      ICHECK_EQ(tuple_type->fields.size(), outputs.size());
     }
     // Set the name to `__copy`. It will be detected in graph runtime to perform
     // data copy across devices.
@@ -262,7 +262,7 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
 
   Array<te::Tensor> VisitExpr_(const LetNode* op) final {
     Array<te::Tensor> val = VisitExpr(op->value);
-    CHECK(!memo_.count(op->var));
+    ICHECK(!memo_.count(op->var));
     memo_[op->var] = val;
     return VisitExpr(op->body);
   }
@@ -270,9 +270,9 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
   Array<te::Tensor> VisitExpr_(const TupleNode* op) final {
     Array<te::Tensor> fields;
     for (Expr field : op->fields) {
-      CHECK(field->checked_type().as<TensorTypeNode>()) << "Only allow Tuple of Tensor";
+      ICHECK(field->checked_type().as<TensorTypeNode>()) << "Only allow Tuple of Tensor";
       Array<te::Tensor> res = VisitExpr(field);
-      CHECK_EQ(res.size(), 1);
+      ICHECK_EQ(res.size(), 1);
       fields.push_back(res[0]);
     }
     return fields;
@@ -281,9 +281,9 @@ class ScheduleGetter : public backend::MemoizedExprTranslator<Array<te::Tensor>>
   Array<te::Tensor> VisitExpr_(const TupleGetItemNode* op) final {
     const auto* tuple_type = op->tuple->type_as<TupleTypeNode>();
     Array<te::Tensor> tuple = VisitExpr(op->tuple);
-    CHECK_EQ(tuple_type->fields.size(), tuple.size());
-    CHECK_GE(op->index, 0);
-    CHECK_LT(static_cast<size_t>(op->index), tuple.size());
+    ICHECK_EQ(tuple_type->fields.size(), tuple.size());
+    ICHECK_GE(op->index, 0);
+    ICHECK_LT(static_cast<size_t>(op->index), tuple.size());
     return {tuple[op->index]};
   }
 
@@ -332,10 +332,10 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
         // flatten tuple of tensor type.
         const auto* tuple_type = param->type_as<TupleTypeNode>();
         // TODO(@icemelon): Support recursive tuple
-        CHECK(tuple_type);
+        ICHECK(tuple_type);
         for (Type field : tuple_type->fields) {
           const auto* ttype = field.as<TensorTypeNode>();
-          CHECK(ttype);
+          ICHECK(ttype);
           add_placeholder(ttype);
         }
       }
@@ -405,7 +405,7 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
       LOG(FATAL) << "Free variable " << var->name_hint();
       return {};
     } else {
-      CHECK(data_dependants_.size());
+      ICHECK(data_dependants_.size());
       bool data_dependant = data_dependants_.back();
       if (data_dependant) {
         param_states_[var] |= kNeedInputData;
@@ -419,8 +419,8 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
 
   Array<te::Tensor> VisitExpr_(const ConstantNode* op) final {
     using tir::make_const;
-    CHECK(data_dependants_.size());
-    CHECK(op->is_scalar());
+    ICHECK(data_dependants_.size());
+    ICHECK(op->is_scalar());
     bool data_dependant = data_dependants_.back();
     if (data_dependant) {
       void* data = op->data->data;
@@ -458,13 +458,13 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
   Array<te::Tensor> VisitExpr_(const CallNode* call_node) final {
     static auto fshape_func = Op::GetAttrMap<FShapeFunc>("FShapeFunc");
     static auto tshape_data_dependant = Op::GetAttrMap<TShapeDataDependant>("TShapeDataDependant");
-    CHECK(call_node->op.as<OpNode>()) << "Primitive function only allows call into primitive ops";
+    ICHECK(call_node->op.as<OpNode>()) << "Primitive function only allows call into primitive ops";
     Op op = Downcast<Op>(call_node->op);
-    CHECK(data_dependants_.empty() || !data_dependants_.back())
+    ICHECK(data_dependants_.empty() || !data_dependants_.back())
         << "Error in op fusion: output of the shape func is fed to a "
         << "data-dependant shape func";
-    CHECK_GT(fshape_func.count(op), 0) << "Internal error, cannot find ShapeFunc for " << op->name;
-    CHECK_GT(tshape_data_dependant.count(op), 0)
+    ICHECK_GT(fshape_func.count(op), 0) << "Internal error, cannot find ShapeFunc for " << op->name;
+    ICHECK_GT(tshape_data_dependant.count(op), 0)
         << "Internal error, cannot find TShapeDataDependant for " << op->name;
 
     data_dependants_.push_back(IsDataDependant(call_node));
@@ -480,7 +480,7 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
       }
     }
     if (count_tuple) {
-      CHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input";
+      ICHECK_EQ(call_node->args.size(), 1U) << "Only allow function with a single tuple input";
     }
     // Get output ndims
     auto ret_type = call_node->checked_type();
@@ -490,10 +490,10 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
     } else {
       auto rtype = ret_type.as<TupleTypeNode>();
       // TODO(@icemelon): Allow recursive tuple
-      CHECK(rtype);
+      ICHECK(rtype);
       for (size_t i = 0; i < rtype->fields.size(); ++i) {
         auto ttype = rtype->fields[i].as<TensorTypeNode>();
-        CHECK(ttype);
+        ICHECK(ttype);
         out_ndims.push_back(IntImm(DataType::Int(32), ttype->shape.size()));
       }
     }
@@ -511,7 +511,7 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
 
   Array<te::Tensor> VisitExpr_(const LetNode* op) final {
     Array<te::Tensor> val = VisitExpr(op->value);
-    CHECK(!memo_.count(op->var));
+    ICHECK(!memo_.count(op->var));
     memo_[op->var] = val;
     return VisitExpr(op->body);
   }
@@ -519,9 +519,9 @@ class MakeShapeFunc : public backend::MemoizedExprTranslator<Array<te::Tensor>>
   Array<te::Tensor> VisitExpr_(const TupleNode* op) final {
     Array<te::Tensor> fields;
     for (Expr field : op->fields) {
-      CHECK(field->checked_type().as<TensorTypeNode>()) << "Only allow Tuple of Tensor";
+      ICHECK(field->checked_type().as<TensorTypeNode>()) << "Only allow Tuple of Tensor";
       Array<te::Tensor> res = VisitExpr(field);
-      CHECK_EQ(res.size(), 1);
+      ICHECK_EQ(res.size(), 1);
       fields.push_back(res[0]);
     }
     return fields;
@@ -579,34 +579,34 @@ class CompileEngineImpl : public CompileEngineNode {
     std::vector<CCacheKey> cached_ext_funcs;
     for (const auto& it : cache_) {
       auto src_func = it.first->source_func;
-      CHECK(src_func.defined());
+      ICHECK(src_func.defined());
       if (src_func->GetAttr<String>(attr::kCompiler).defined()) {
         auto code_gen = src_func->GetAttr<String>(attr::kCompiler);
-        CHECK(code_gen.defined()) << "No external codegen is set";
+        ICHECK(code_gen.defined()) << "No external codegen is set";
         std::string code_gen_name = code_gen.value();
         cached_ext_funcs.push_back(it.first);
 
         auto symbol_name = src_func->GetAttr<String>(tvm::attr::kGlobalSymbol);
-        CHECK(symbol_name.defined()) << "No external symbol is set for:\n"
-                                     << AsText(src_func, false);
+        ICHECK(symbol_name.defined()) << "No external symbol is set for:\n"
+                                      << AsText(src_func, false);
 
         std::string sn = symbol_name.value();
         if (cached_symbol.count(sn)) {
           cached_symbol[sn] = code_gen_name;
         } else {
-          CHECK_NE(sn, code_gen_name)
+          ICHECK_NE(sn, code_gen_name)
               << "Found duplicated symbol: " << sn << " for: " << code_gen_name;
         }
 
         std::string ext_name = "relay.ext." + code_gen_name;
         auto pf = tvm::runtime::Registry::Get(ext_name);
-        CHECK(pf) << "Failed to find the codegen tool for " << ext_name << "\n";
+        ICHECK(pf) << "Failed to find the codegen tool for " << ext_name << "\n";
         // No need to keep compiler attribute at this point, functions have been
         // extracted for specific codegen.
         src_func = WithAttr(std::move(src_func), attr::kCompiler, NullValue<ObjectRef>());
         runtime::Module ext_mod = (*pf)(src_func);
 
-        CHECK(ext_mod.defined()) << "No external runtime is generated.";
+        ICHECK(ext_mod.defined()) << "No external runtime is generated.";
         ret.push_back(ext_mod);
       }
     }
@@ -661,7 +661,7 @@ class CompileEngineImpl : public CompileEngineNode {
     if (key->source_func->GetAttr<String>(attr::kCompiler).defined()) {
       auto cache_node = make_object<CachedFuncNode>();
       const auto name_node = key->source_func->GetAttr<String>(tvm::attr::kGlobalSymbol);
-      CHECK(name_node.defined()) << "External function has not been attached a name yet.";
+      ICHECK(name_node.defined()) << "External function has not been attached a name yet.";
       cache_node->func_name = std::string(name_node.value());
       cache_node->target = Target("ext_dev");
       cache_node->funcs->Add(GlobalVar(cache_node->func_name), key->source_func);
@@ -671,7 +671,7 @@ class CompileEngineImpl : public CompileEngineNode {
     // Enforce use the target.
     With<Target> target_scope(key->target);
 
-    CHECK(!value->cached_func.defined());
+    ICHECK(!value->cached_func.defined());
     auto cfunc = CreateSchedule(key->source_func, key->target);
     auto cache_node = make_object<CachedFuncNode>(*(cfunc.operator->()));
 
@@ -720,7 +720,7 @@ class CompileEngineImpl : public CompileEngineNode {
     // Enforce use the target.
     With<Target> target_scope(key->target);
 
-    CHECK(!value->cached_func.defined());
+    ICHECK(!value->cached_func.defined());
     auto spair = MakeShapeFunc().Create(key->source_func);
     auto cache_node = make_object<CachedFuncNode>(*(spair.second.operator->()));
     cache_node->func_name = GetUniqueName(cache_node->func_name);
diff --git a/src/relay/backend/compile_engine.h b/src/relay/backend/compile_engine.h
index 95166c7..5582291 100644
--- a/src/relay/backend/compile_engine.h
+++ b/src/relay/backend/compile_engine.h
@@ -154,7 +154,7 @@ class CCacheKey : public ObjectRef {
   const CCacheKeyNode* operator->() const { return static_cast<const CCacheKeyNode*>(get()); }
   // comparator
   inline bool operator==(const CCacheKey& other) const {
-    CHECK(defined() && other.defined());
+    ICHECK(defined() && other.defined());
     return (*this)->Equal(other.operator->());
   }
   using ContainerType = CCacheKeyNode;
@@ -272,7 +272,7 @@ namespace std {
 template <>
 struct hash<::tvm::relay::CCacheKey> {
   size_t operator()(const ::tvm::relay::CCacheKey& key) const {
-    CHECK(key.defined());
+    ICHECK(key.defined());
     return key->Hash();
   }
 };
diff --git a/src/relay/backend/contrib/arm_compute_lib/codegen.cc b/src/relay/backend/contrib/arm_compute_lib/codegen.cc
index 087c895..a963242 100644
--- a/src/relay/backend/contrib/arm_compute_lib/codegen.cc
+++ b/src/relay/backend/contrib/arm_compute_lib/codegen.cc
@@ -87,7 +87,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
     }
     auto fn = cn->op.as<FunctionNode>();
     auto comp = fn->GetAttr<String>(attr::kComposite);
-    CHECK(comp.defined()) << "Arm Compute Library JSON runtime only supports composite functions.";
+    ICHECK(comp.defined()) << "Arm Compute Library JSON runtime only supports composite functions.";
     const std::string name = comp.value();
     std::shared_ptr<JSONGraphNode> json_node;
     if (name == "arm_compute_lib.conv2d" || name == "arm_compute_lib.qnn_conv2d") {
@@ -114,7 +114,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
   static CompositeConvNode UnpackCompositeConvolution(const CallNode* cn) {
     CompositeConvNode nodes{};
     const auto* fn = cn->op.as<FunctionNode>();
-    CHECK(fn);
+    ICHECK(fn);
 
     // Traverse composite convolution function from child to parent
     const auto* current_call = fn->body.as<CallNode>();
@@ -132,9 +132,9 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
     }
     // Enforce a convolution node exists at this point during traversal
     if (nodes.requantize) {
-      CHECK(backend::IsOp(current_call, "qnn.conv2d"));
+      ICHECK(backend::IsOp(current_call, "qnn.conv2d"));
     } else {
-      CHECK(backend::IsOp(current_call, "nn.conv2d"));
+      ICHECK(backend::IsOp(current_call, "nn.conv2d"));
     }
     nodes.conv = current_call;
     if (!current_call->args.empty() && current_call->args[0]->IsInstance<CallNode>()) {
@@ -157,8 +157,8 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
     std::string name = "nn.conv2d";
 
     const auto* conv_attr = nodes.conv->attrs.as<Conv2DAttrs>();
-    CHECK(conv_attr);
-    CHECK(conv_attr->kernel_layout == "OHWI")
+    ICHECK(conv_attr);
+    ICHECK(conv_attr->kernel_layout == "OHWI")
         << "Kernel layout must be OHWI, has the module been pre-processed correctly?";
 
     // Inputs must be added in the same order they appear in the relay graph.
@@ -186,7 +186,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
     // Override attributes
     if (nodes.pad) {
       const auto* pad_attr = nodes.pad->attrs.as<PadAttrs>();
-      CHECK(pad_attr);
+      ICHECK(pad_attr);
       auto p = pad_attr->pad_width;
       // Convert to TVM layout for now, conversion to ACL layout takes place in runtime.
       // Standard convolution pad layout for TVM: top, left, bottom, right.
@@ -216,7 +216,7 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
   static CompositeDenseNode UnpackCompositeDense(const CallNode* cn) {
     CompositeDenseNode nodes{};
     const auto* fn = cn->op.as<FunctionNode>();
-    CHECK(fn);
+    ICHECK(fn);
 
     // Traverse composite dense function from child to parent
     const auto* current_call = fn->body.as<CallNode>();
@@ -230,9 +230,9 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
     }
     // Enforce a dense node exists at this point during traversal
     if (nodes.requantize) {
-      CHECK(backend::IsOp(current_call, "qnn.dense"));
+      ICHECK(backend::IsOp(current_call, "qnn.dense"));
     } else {
-      CHECK(backend::IsOp(current_call, "nn.dense"));
+      ICHECK(backend::IsOp(current_call, "nn.dense"));
     }
     nodes.dense = current_call;
     return nodes;
@@ -282,13 +282,13 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
    */
   std::shared_ptr<JSONGraphNode> CreateCompositeAvgPool2DJSONNode(const CallNode* cn) {
     const auto* fn = cn->op.as<FunctionNode>();
-    CHECK(fn);
+    ICHECK(fn);
     const auto* cast = fn->body.as<CallNode>();
-    CHECK(cast);
+    ICHECK(cast);
     const auto* avg_pool = cast->args[0].as<CallNode>();
-    CHECK(avg_pool);
+    ICHECK(avg_pool);
     const auto* avg_pool_op = avg_pool->op.as<OpNode>();
-    CHECK(avg_pool_op);
+    ICHECK(avg_pool_op);
     const std::string name = avg_pool_op->name;
 
     std::vector<JSONGraphNodeEntry> inputs;
@@ -310,16 +310,16 @@ class ACLJSONSerializer : public backend::contrib::JSONSerializer {
   std::shared_ptr<JSONGraphNode> CreateCompositeL2Pool2DJSONNode(const CallNode* cn) {
     const std::string name = "nn.l2_pool2d";
     const auto* fn = cn->op.as<FunctionNode>();
-    CHECK(fn);
+    ICHECK(fn);
     const auto* sqrt = fn->body.as<CallNode>();
-    CHECK(sqrt);
+    ICHECK(sqrt);
     const auto* avg_pool = sqrt->args[0].as<CallNode>();
-    CHECK(avg_pool);
+    ICHECK(avg_pool);
     const auto* pow = avg_pool->args[0].as<CallNode>();
-    CHECK(pow);
+    ICHECK(pow);
     const auto* exponent = pow->args[1].as<ConstantNode>();
-    CHECK(exponent);
-    CHECK_EQ(*static_cast<float*>(exponent->data->data), 2) << "Exponent must be 2 for L2 pooling";
+    ICHECK(exponent);
+    ICHECK_EQ(*static_cast<float*>(exponent->data->data), 2) << "Exponent must be 2 for L2 pooling";
 
     std::vector<JSONGraphNodeEntry> inputs;
     inputs.push_back(VisitExpr(cn->args[0])[0]);
@@ -363,7 +363,7 @@ TVM_REGISTER_GLOBAL("relay.ext.arm_compute_lib.optimize").set_body_typed(PreProc
  * \return A runtime module.
  */
 runtime::Module ACLCompiler(const ObjectRef& ref) {
-  CHECK(ref->IsInstance<FunctionNode>()) << "The input ref is expected to be a Relay function.";
+  ICHECK(ref->IsInstance<FunctionNode>()) << "The input ref is expected to be a Relay function.";
   Function func = Downcast<Function>(ref);
   std::string func_name = backend::GetExtSymbol(func);
 
@@ -372,7 +372,7 @@ runtime::Module ACLCompiler(const ObjectRef& ref) {
   std::string graph_json = serializer.GetJSON();
   auto param_names = serializer.GetParams();
   const auto* pf = runtime::Registry::Get("runtime.arm_compute_lib_runtime_create");
-  CHECK(pf != nullptr) << "Cannot find JSON runtime module to create";
+  ICHECK(pf != nullptr) << "Cannot find JSON runtime module to create";
   runtime::Module lib = (*pf)(func_name, graph_json, param_names);
   return lib;
 }
diff --git a/src/relay/backend/contrib/codegen_c/codegen.cc b/src/relay/backend/contrib/codegen_c/codegen.cc
index c7b5a8d..935ac16 100644
--- a/src/relay/backend/contrib/codegen_c/codegen.cc
+++ b/src/relay/backend/contrib/codegen_c/codegen.cc
@@ -61,7 +61,7 @@ class CodegenC : public MemoizedExprTranslator<std::vector<Output>>, public Code
     std::vector<Output> outs;
     for (auto field : node->fields) {
       auto res = VisitExpr(field);
-      CHECK_EQ(res.size(), 1U) << "Do not support tuple nest";
+      ICHECK_EQ(res.size(), 1U) << "Do not support tuple nest";
       outs.push_back(res[0]);
     }
     return outs;
@@ -69,7 +69,7 @@ class CodegenC : public MemoizedExprTranslator<std::vector<Output>>, public Code
 
   std::vector<Output> VisitExpr_(const TupleGetItemNode* op) final {
     auto res = VisitExpr(op->tuple);
-    CHECK_GT(res.size(), static_cast<size_t>(op->index));
+    ICHECK_GT(res.size(), static_cast<size_t>(op->index));
 
     // Only keep the item we want for the child node.
     // FIXME(@comaniac): The other items should still be requried for the primary outputs.
@@ -84,7 +84,7 @@ class CodegenC : public MemoizedExprTranslator<std::vector<Output>>, public Code
     // Get const: static_cast<float*>(gcc_0_consts[0]->data)
     output.name = CreateDataReference(ext_func_id_, const_idx_);
     const auto* type_node = cn->checked_type().as<TensorTypeNode>();
-    CHECK(type_node);
+    ICHECK(type_node);
     const auto& dtype = GetDtypeString(type_node);
 
     // Generate the global variable for needed ndarrays
@@ -94,7 +94,7 @@ class CodegenC : public MemoizedExprTranslator<std::vector<Output>>, public Code
       ext_func_body_.insert(ext_func_body_.begin(), checker);
     }
 
-    CHECK(dtype == "float" || dtype == "int") << "Only float and int are supported for now.";
+    ICHECK(dtype == "float" || dtype == "int") << "Only float and int are supported for now.";
     output.dtype = dtype;
 
     std::string const_var_name = CreateConstVar(ext_func_id_, const_idx_);
@@ -130,7 +130,7 @@ class CodegenC : public MemoizedExprTranslator<std::vector<Output>>, public Code
     }
 
     const auto* type_node = call->checked_type().as<TensorTypeNode>();
-    CHECK(type_node);
+    ICHECK(type_node);
     const auto& dtype = GetDtypeString(type_node);
     macro_stream << ", " << dtype;
 
@@ -216,7 +216,7 @@ class CodegenC : public MemoizedExprTranslator<std::vector<Output>>, public Code
 class CSourceCodegen : public CSourceModuleCodegenBase {
  public:
   std::pair<std::string, Array<String>> GenCFunc(const Function& func) {
-    CHECK(func.defined()) << "Input error: expect a Relay function.";
+    ICHECK(func.defined()) << "Input error: expect a Relay function.";
 
     // Record the external symbol for runtime lookup.
     auto sid = GetExtSymbol(func);
@@ -260,7 +260,7 @@ class CSourceCodegen : public CSourceModuleCodegenBase {
 
     code_stream_ << operator_macro << "\n\n";
 
-    CHECK(ref->IsInstance<FunctionNode>());
+    ICHECK(ref->IsInstance<FunctionNode>());
     auto res = GenCFunc(Downcast<Function>(ref));
     std::string code = code_stream_.str();
 
@@ -269,7 +269,7 @@ class CSourceCodegen : public CSourceModuleCodegenBase {
 
     // Create a CSource module
     const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate");
-    CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module";
+    ICHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module";
     return (*pf)(code, "c", sym, variables);
   }
 
diff --git a/src/relay/backend/contrib/codegen_c/codegen_c.h b/src/relay/backend/contrib/codegen_c/codegen_c.h
index 0d395b7..9448b4d 100644
--- a/src/relay/backend/contrib/codegen_c/codegen_c.h
+++ b/src/relay/backend/contrib/codegen_c/codegen_c.h
@@ -85,7 +85,7 @@ class CodegenCBase {
    * \brief Exit a scope.
    */
   void ExitScope() {
-    CHECK_GE(indent_, 2U) << "Wrong ident found.";
+    ICHECK_GE(indent_, 2U) << "Wrong ident found.";
     indent_ -= 2;
   }
 
@@ -262,7 +262,7 @@ class CodegenCBase {
    */
   std::string GetDtypeString(const Var& var) {
     auto ttype = var->checked_type().as<TensorTypeNode>();
-    CHECK(ttype) << "Expect TensorTypeNode";
+    ICHECK(ttype) << "Expect TensorTypeNode";
     return GetDtypeString(ttype);
   }
 
@@ -297,7 +297,7 @@ class CodegenCBase {
    */
   std::string CreateInitChecker(const std::string& symbol) const {
     std::ostringstream oss;
-    oss << "CHECK(!" << symbol
+    oss << "ICHECK(!" << symbol
         << "_consts.empty()) << \"C source module hasn't been initialized.\";\n";
     return oss.str();
   }
diff --git a/src/relay/backend/contrib/codegen_json/codegen_json.h b/src/relay/backend/contrib/codegen_json/codegen_json.h
index 9ed15a8..859ef8c 100644
--- a/src/relay/backend/contrib/codegen_json/codegen_json.h
+++ b/src/relay/backend/contrib/codegen_json/codegen_json.h
@@ -197,8 +197,8 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
     if (const auto* tuple_type = checked_type.as<TupleTypeNode>()) {
       for (size_t i = 0; i < tuple_type->fields.size(); ++i) {
         const auto* tensor_type = tuple_type->fields[i].as<TensorTypeNode>();
-        CHECK(tensor_type) << "Expect TensorType, but received: ."
-                           << tuple_type->fields[i]->GetTypeKey();
+        ICHECK(tensor_type) << "Expect TensorType, but received: ."
+                            << tuple_type->fields[i]->GetTypeKey();
         ret.push_back(JSONGraphNodeEntry(node_id, i));
         shape.emplace_back(GetIntShape(tensor_type->shape));
         dtype.emplace_back(DType2String(tensor_type->dtype));
@@ -206,7 +206,7 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
       node->SetNumOutput(tuple_type->fields.size());
     } else {
       const auto* tensor_type = checked_type.as<TensorTypeNode>();
-      CHECK(tensor_type) << "Expect TensorType, but received: " << checked_type->GetTypeKey();
+      ICHECK(tensor_type) << "Expect TensorType, but received: " << checked_type->GetTypeKey();
       shape.emplace_back(GetIntShape(tensor_type->shape));
       dtype.emplace_back(DType2String(tensor_type->dtype));
       ret.push_back(JSONGraphNodeEntry(node_id, 0));
@@ -228,7 +228,7 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
       extractor.Extract(const_cast<Object*>(call_attr));
     } else if (const auto* fn = cn->op.as<FunctionNode>()) {
       auto pattern = fn->GetAttr<String>(attr::kPartitionedFromPattern);
-      CHECK(pattern.defined());
+      ICHECK(pattern.defined());
       std::vector<std::string> values;
       values.push_back(pattern.value());
       std::vector<dmlc::any> attr;
@@ -243,7 +243,7 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
   }
 
   std::vector<JSONGraphNodeEntry> VisitExpr_(const VarNode* vn) {
-    CHECK(memo_.count(GetRef<Expr>(vn)));
+    ICHECK(memo_.count(GetRef<Expr>(vn)));
     return memo_[GetRef<Expr>(vn)];
   }
 
@@ -270,7 +270,7 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
       name = op_node->name;
     } else if (const auto* fn = cn->op.as<FunctionNode>()) {
       auto comp = fn->GetAttr<String>(attr::kComposite);
-      CHECK(comp.defined()) << "JSON runtime only supports composite functions.";
+      ICHECK(comp.defined()) << "JSON runtime only supports composite functions.";
       name = comp.value();
     } else {
       LOG(FATAL) << "JSON runtime does not support calls to " << cn->op->GetTypeKey();
@@ -289,7 +289,7 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
   }
 
   std::vector<JSONGraphNodeEntry> VisitExpr_(const LetNode* ln) {
-    CHECK_EQ(memo_.count(ln->var), 0);
+    ICHECK_EQ(memo_.count(ln->var), 0);
     memo_[ln->var] = VisitExpr(ln->value);
     return VisitExpr(ln->body);
   }
@@ -300,7 +300,7 @@ class JSONSerializer : public MemoizedExprTranslator<std::vector<JSONGraphNodeEn
   }
 
   std::vector<JSONGraphNodeEntry> VisitExpr_(const FunctionNode* fn) {
-    CHECK(fn->GetAttr<String>(attr::kComposite).defined())
+    ICHECK(fn->GetAttr<String>(attr::kComposite).defined())
         << "JSON runtime only supports composite functions";
     // FunctionNode should be handled by the caller.
     return {};
diff --git a/src/relay/backend/contrib/dnnl/codegen.cc b/src/relay/backend/contrib/dnnl/codegen.cc
index bec9af0..bfc5c77 100644
--- a/src/relay/backend/contrib/dnnl/codegen.cc
+++ b/src/relay/backend/contrib/dnnl/codegen.cc
@@ -57,7 +57,7 @@ inline size_t GetShape1DSize(const Type& type) {
 std::vector<std::string> Conv2d(const CallNode* call) {
   std::vector<std::string> args;
   const auto* conv2d_attr = call->attrs.as<Conv2DAttrs>();
-  CHECK(conv2d_attr);
+  ICHECK(conv2d_attr);
 
   auto ishape = GetShape(call->args[0]->checked_type());
   auto wshape = GetShape(call->args[1]->checked_type());
@@ -155,7 +155,7 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
     std::vector<Output> outs;
     for (auto field : node->fields) {
       auto res = VisitExpr(field);
-      CHECK_EQ(res.size(), 1U) << "Do not support tuple nest";
+      ICHECK_EQ(res.size(), 1U) << "Do not support tuple nest";
       outs.push_back(res[0]);
     }
     return outs;
@@ -163,7 +163,7 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
 
   std::vector<Output> VisitExpr_(const TupleGetItemNode* op) final {
     auto res = VisitExpr(op->tuple);
-    CHECK_GT(res.size(), static_cast<size_t>(op->index));
+    ICHECK_GT(res.size(), static_cast<size_t>(op->index));
 
     // Only keep the item we want for the child node.
     // FIXME(@comaniac): The other items should still be requried for the primary outputs.
@@ -190,8 +190,8 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
     const_idx_++;
 
     const auto* type_node = cn->checked_type().as<TensorTypeNode>();
-    CHECK(type_node);
-    CHECK_EQ(GetDtypeString(type_node), "float") << "Only float is supported for now.";
+    ICHECK(type_node);
+    ICHECK_EQ(GetDtypeString(type_node), "float") << "Only float is supported for now.";
 
     return {output};
   }
@@ -233,7 +233,7 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
 
   GenerateBodyOutput GenerateOpCall(const CallNode* call) {
     const auto* op_node = call->op.as<OpNode>();
-    CHECK(op_node) << "Expect OpNode, but got " << call->op->GetTypeKey();
+    ICHECK(op_node) << "Expect OpNode, but got " << call->op->GetTypeKey();
 
     using ArgFunType = std::function<std::vector<std::string>(const CallNode*)>;
     static const std::map<std::string, std::pair<std::string, ArgFunType>> op_map = {
@@ -257,7 +257,7 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
   GenerateBodyOutput GenerateCompositeFunctionCall(const FunctionNode* callee,
                                                    const CallNode* caller) {
     const auto pattern_name = callee->GetAttr<runtime::String>(attr::kComposite);
-    CHECK(pattern_name.defined()) << "Only functions with composite attribute supported";
+    ICHECK(pattern_name.defined()) << "Only functions with composite attribute supported";
 
     if (pattern_name == "dnnl.conv2d_bias_relu") {
       const auto* conv_call =
@@ -283,7 +283,7 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
                                   const std::vector<std::string>& func_args,
                                   const std::vector<std::string>& attribute_args) {
     // Make function call with input buffers when visiting arguments
-    CHECK_GT(func_args.size(), 0);
+    ICHECK_GT(func_args.size(), 0);
     std::ostringstream decl_stream;
     decl_stream << "(" << func_args[0];
     for (size_t i = 1; i < func_args.size(); ++i) {
@@ -295,11 +295,11 @@ class CodegenDNNL : public MemoizedExprTranslator<std::vector<Output>>, public C
     if (root_call->checked_type()->IsInstance<TupleTypeNode>()) {
       auto type_node = root_call->checked_type().as<TupleTypeNode>();
       for (auto field : type_node->fields) {
-        CHECK(field->IsInstance<TensorTypeNode>());
+        ICHECK(field->IsInstance<TensorTypeNode>());
         out_types.push_back(field);
       }
     } else if (root_call->checked_type()->IsInstance<TensorTypeNode>()) {
-      CHECK(root_call->checked_type()->IsInstance<TensorTypeNode>());
+      ICHECK(root_call->checked_type()->IsInstance<TensorTypeNode>());
       out_types.push_back(root_call->checked_type());
     } else {
       LOG(FATAL) << "Unrecognized type node: " << AsText(root_call->checked_type(), false);
@@ -363,7 +363,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase {
  public:
   // Create a corresponding DNNL function for the given relay Function.
   std::pair<std::string, Array<String>> GenDNNLFunc(const Function& func) {
-    CHECK(func.defined()) << "Input error: expect a Relay function.";
+    ICHECK(func.defined()) << "Input error: expect a Relay function.";
 
     // Record the external symbol for runtime lookup.
     auto sid = GetExtSymbol(func);
@@ -404,7 +404,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase {
     code_stream_ << "using namespace tvm::runtime::contrib;\n";
     code_stream_ << "\n";
 
-    CHECK(ref->IsInstance<FunctionNode>());
+    ICHECK(ref->IsInstance<FunctionNode>());
     auto res = GenDNNLFunc(Downcast<Function>(ref));
     std::string code = code_stream_.str();
     String sym = std::get<0>(res);
@@ -412,7 +412,7 @@ class DNNLModuleCodegen : public CSourceModuleCodegenBase {
 
     // Create a CSource module
     const auto* pf = runtime::Registry::Get("runtime.CSourceModuleCreate");
-    CHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module";
+    ICHECK(pf != nullptr) << "Cannot find csource module to create the external runtime module";
     return (*pf)(code, "c", sym, variables);
   }
 
@@ -441,14 +441,14 @@ class DNNLJSONSerializer : public backend::contrib::JSONSerializer {
       name = op_node->name;
     } else if (const auto* fn = cn->op.as<FunctionNode>()) {
       auto comp = fn->GetAttr<String>(attr::kComposite);
-      CHECK(comp.defined()) << "DNNL JSON runtime only supports composite functions.";
+      ICHECK(comp.defined()) << "DNNL JSON runtime only supports composite functions.";
       name = comp.value();
 
       if (name == "dnnl.conv2d_bias_relu") {
         call = GetRootCall(fn->body.as<CallNode>(), 2, {"nn.conv2d", "add", "nn.relu"});
       } else if (name == "dnnl.conv2d_relu") {
         call = GetRootCall(fn->body.as<CallNode>(), 1, {"nn.conv2d", "nn.relu"});
-        CHECK(call->op.as<OpNode>()) << "Not op node";
+        ICHECK(call->op.as<OpNode>()) << "Not op node";
       } else {
         LOG(FATAL) << "Unrecognized DNNL pattern: " << name;
       }
@@ -476,7 +476,7 @@ class DNNLJSONSerializer : public backend::contrib::JSONSerializer {
  */
 runtime::Module DNNLCompiler(const ObjectRef& ref) {
 #ifdef USE_JSON_RUNTIME
-  CHECK(ref->IsInstance<FunctionNode>());
+  ICHECK(ref->IsInstance<FunctionNode>());
   auto func = Downcast<Function>(ref);
   auto func_name = GetExtSymbol(func);
   DNNLJSONSerializer serializer(func_name, func);
@@ -485,7 +485,7 @@ runtime::Module DNNLCompiler(const ObjectRef& ref) {
   auto params = serializer.GetParams();
 
   const auto* pf = runtime::Registry::Get("runtime.DNNLJSONRuntimeCreate");
-  CHECK(pf != nullptr) << "Cannot find JSON runtime module to create";
+  ICHECK(pf != nullptr) << "Cannot find JSON runtime module to create";
   auto mod = (*pf)(func_name, graph_json, params);
   return mod;
 #else
diff --git a/src/relay/backend/contrib/ethosn/codegen.cc b/src/relay/backend/contrib/ethosn/codegen.cc
index dd92c6b..3097a30 100644
--- a/src/relay/backend/contrib/ethosn/codegen.cc
+++ b/src/relay/backend/contrib/ethosn/codegen.cc
@@ -43,7 +43,7 @@ sl::TensorInfo GetTensorInfo(std::map<Expr, std::vector<sl::TensorInfo>> tensor_
 bool IsEthosnOp(const Call& call, const std::string& op_name) {
   if (call->op->IsInstance<OpNode>()) {
     Op op = Downcast<Op>(call->op);
-    CHECK(op.defined());
+    ICHECK(op.defined());
     return op == Op::Get(op_name);
   } else {
     return false;
@@ -53,7 +53,7 @@ bool IsEthosnOp(const Call& call, const std::string& op_name) {
 bool IsEthosnFunc(const Call& call, const std::string& op_name) {
   if (call->op->IsInstance<FunctionNode>()) {
     Function func = Downcast<Function>(call->op);
-    CHECK(func.defined());
+    ICHECK(func.defined());
     auto name_node = func->GetAttr<String>(attr::kComposite);
     return name_node.value() == op_name;
   }
@@ -62,7 +62,7 @@ bool IsEthosnFunc(const Call& call, const std::string& op_name) {
 
 std::map<Expr, std::vector<sl::TensorInfo>> InferTensorsVisitor::Infer(const Expr& expr) {
   tensor_table_.clear();
-  CHECK(expr->checked_type().defined());
+  ICHECK(expr->checked_type().defined());
   size_t output_size = 1;
   if (auto tuple = expr->checked_type().as<TupleTypeNode>()) {
     output_size = tuple->fields.size();
@@ -162,7 +162,7 @@ void InferTensorsVisitor::VisitExpr_(const CallNode* cn) {
 
 void InferTensorsVisitor::VisitExpr_(const TupleNode* tn) {
   auto tuple = GetRef<Tuple>(tn);
-  CHECK(tensor_table_.find(tuple) != tensor_table_.end());
+  ICHECK(tensor_table_.find(tuple) != tensor_table_.end());
   for (size_t i = 0; i < tn->fields.size(); i++) {
     tensor_table_[tn->fields[i]] = {tensor_table_[tuple][i]};
   }
@@ -176,7 +176,7 @@ void InferTensorsVisitor::VisitExpr_(const TupleGetItemNode* tgn) {
   // Don't assume it must be targeting a TupleNode
   // Vars and calls can still have TupleType
   auto tg = GetRef<TupleGetItem>(tgn);
-  CHECK(tensor_table_.find(tg) != tensor_table_.end());
+  ICHECK(tensor_table_.find(tg) != tensor_table_.end());
   auto tuple = tg->tuple;
   auto type = tuple->checked_type().as<TupleTypeNode>();
   int index = tg->index;
@@ -517,7 +517,7 @@ runtime::Module EthosnCompiler::CreateRuntimeModule(const ObjectRef& ref) {
     IRModule mod;
     Function func = Downcast<Function>(ref);
     auto name_node = func->GetAttr<String>(tvm::attr::kGlobalSymbol);
-    CHECK(name_node.defined()) << "Failed to retrieved external symbol.";
+    ICHECK(name_node.defined()) << "Failed to retrieved external symbol.";
     GlobalVar gvar = GlobalVar(name_node.value());
     mod->Add(gvar, func);
     Function mod_func = Downcast<Function>(mod->functions.at(gvar));
@@ -539,7 +539,7 @@ runtime::ethosn::OrderedCompiledNetwork EthosnCompiler::CompileEthosnFunc(const
   // Finally compile the network
   std::vector<std::unique_ptr<sl::CompiledNetwork>> compiled_networks =
       sl::Compile(*network_with_ids.network, options);
-  CHECK_GE(compiled_networks.size(), 1) << "Ethos-N compiler failed to compile network";
+  ICHECK_GE(compiled_networks.size(), 1) << "Ethos-N compiler failed to compile network";
   auto compiled_network = std::move(compiled_networks[0]);
   // Determine the order that the inputs/outputs are in and how that corresponds to the
   // order that the TVM runtime will expect them in
diff --git a/src/relay/backend/contrib/tensorrt/codegen.cc b/src/relay/backend/contrib/tensorrt/codegen.cc
index f692da3..26f674d 100644
--- a/src/relay/backend/contrib/tensorrt/codegen.cc
+++ b/src/relay/backend/contrib/tensorrt/codegen.cc
@@ -109,7 +109,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer {
 
   void SetPadNodeAttribute(std::shared_ptr<JSONGraphNode> node, const CallNode* cn) {
     const auto* pad_attr = cn->attrs.as<PadAttrs>();
-    CHECK(pad_attr);
+    ICHECK(pad_attr);
     auto p = pad_attr->pad_width;
     const int dim_h = (p.size() == 5) ? 3 : 2;
     const int dim_w = (p.size() == 5) ? 4 : 3;
@@ -124,7 +124,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer {
 
   void SetStridedSliceNodeAttribute(std::shared_ptr<JSONGraphNode> node, const CallNode* cn) {
     const auto* attrs = cn->attrs.as<StridedSliceAttrs>();
-    CHECK(attrs && attrs->begin && attrs->end && attrs->strides)
+    ICHECK(attrs && attrs->begin && attrs->end && attrs->strides)
         << "StridedSlice must have static begin, end, and strides.";
     const bool default_strides =
         !attrs->strides.value().defined() || attrs->strides.value().size() == 0;
@@ -145,10 +145,10 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer {
                                 !attrs->strides.value()[i].defined())
                                    ? 1
                                    : attrs->strides.value()[i].as<IntImmNode>()->value;
-      CHECK_GT(stride_value, 0);
+      ICHECK_GT(stride_value, 0);
       const int size_value = (end_value - begin_value + stride_value - 1) / stride_value;
-      CHECK_GE(begin_value, 0);
-      CHECK_GT(size_value, 0);
+      ICHECK_GE(begin_value, 0);
+      ICHECK_GT(size_value, 0);
       start.push_back(std::to_string(begin_value));
       size.push_back(std::to_string(size_value));
       strides.push_back(std::to_string(stride_value));
@@ -168,7 +168,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer {
     if (!cfg.defined()) {
       cfg = AttrsWithDefaultValues<TensorRTCompilerConfig>();
     }
-    CHECK_EQ(cfg.value()->tensorrt_version.size(), 3);
+    ICHECK_EQ(cfg.value()->tensorrt_version.size(), 3);
     std::vector<std::string> tensorrt_version = {std::to_string(cfg.value()->tensorrt_version[0]),
                                                  std::to_string(cfg.value()->tensorrt_version[1]),
                                                  std::to_string(cfg.value()->tensorrt_version[2])};
@@ -190,7 +190,7 @@ class TensorRTJSONSerializer : public backend::contrib::JSONSerializer {
  * \return A runtime module.
  */
 runtime::Module TensorRTCompiler(const ObjectRef& ref) {
-  CHECK(ref->IsInstance<FunctionNode>()) << "The input ref is expected to be a Relay function.";
+  ICHECK(ref->IsInstance<FunctionNode>()) << "The input ref is expected to be a Relay function.";
   Function func = Downcast<Function>(ref);
   std::string func_name = backend::GetExtSymbol(func);
 
@@ -199,7 +199,7 @@ runtime::Module TensorRTCompiler(const ObjectRef& ref) {
   std::string graph_json = serializer.GetJSON();
   auto param_names = serializer.GetParams();
   const auto* pf = runtime::Registry::Get("runtime.tensorrt_runtime_create");
-  CHECK(pf != nullptr) << "Cannot find TensorRT runtime module create function.";
+  ICHECK(pf != nullptr) << "Cannot find TensorRT runtime module create function.";
   runtime::Module lib = (*pf)(func_name, graph_json, param_names);
   return lib;
 }
diff --git a/src/relay/backend/graph_plan_memory.cc b/src/relay/backend/graph_plan_memory.cc
index 2b08f45..bf58c8d 100644
--- a/src/relay/backend/graph_plan_memory.cc
+++ b/src/relay/backend/graph_plan_memory.cc
@@ -83,7 +83,7 @@ class StorageAllocaBaseVisitor : public ExprVisitor {
     std::vector<StorageToken*> fields;
     for (Expr field : op->fields) {
       auto tok = GetToken(field);
-      CHECK_EQ(tok.size(), 1U);
+      ICHECK_EQ(tok.size(), 1U);
       fields.push_back(tok[0]);
     }
     token_map_[op] = fields;
@@ -91,7 +91,7 @@ class StorageAllocaBaseVisitor : public ExprVisitor {
 
   void VisitExpr_(const TupleGetItemNode* op) final {
     const auto& tok = GetToken(op->tuple);
-    CHECK_LT(static_cast<size_t>(op->index), tok.size());
+    ICHECK_LT(static_cast<size_t>(op->index), tok.size());
     token_map_[op] = {tok[op->index]};
   }
 
@@ -115,7 +115,7 @@ class StorageAllocaBaseVisitor : public ExprVisitor {
   const std::vector<StorageToken*>& GetToken(const Expr& expr) {
     this->VisitExpr(expr);
     auto it = token_map_.find(expr.operator->());
-    CHECK(it != token_map_.end());
+    ICHECK(it != token_map_.end());
     return it->second;
   }
   /*!
@@ -142,14 +142,14 @@ class StorageAllocaInit : protected StorageAllocaBaseVisitor {
   using StorageAllocaBaseVisitor::VisitExpr_;
 
   void CreateToken(const ExprNode* op, bool can_realloc) final {
-    CHECK(!token_map_.count(op));
+    ICHECK(!token_map_.count(op));
     std::vector<StorageToken*> tokens;
     int device_type =
         node_device_map_.count(GetRef<Expr>(op)) ? node_device_map_[GetRef<Expr>(op)]->value : 0;
     if (const auto* tuple_type = op->checked_type().as<TupleTypeNode>()) {
       for (Type t : tuple_type->fields) {
         const auto* ttype = t.as<TensorTypeNode>();
-        CHECK(ttype);
+        ICHECK(ttype);
         StorageToken* token = arena_->make<StorageToken>();
         token->ttype = ttype;
         token->device_type = device_type;
@@ -157,7 +157,7 @@ class StorageAllocaInit : protected StorageAllocaBaseVisitor {
       }
     } else {
       const auto* ttype = op->checked_type().as<TensorTypeNode>();
-      CHECK(ttype);
+      ICHECK(ttype);
       StorageToken* token = arena_->make<StorageToken>();
       token->ttype = ttype;
       token->device_type = device_type;
@@ -233,9 +233,9 @@ class StorageAllocator : public StorageAllocaBaseVisitor {
   using StorageAllocaBaseVisitor::VisitExpr_;
   // override create token by getting token as prototype requirements.
   void CreateToken(const ExprNode* op, bool can_realloc) final {
-    CHECK(!token_map_.count(op));
+    ICHECK(!token_map_.count(op));
     auto it = prototype_.find(op);
-    CHECK(it != prototype_.end());
+    ICHECK(it != prototype_.end());
     std::vector<StorageToken*> tokens;
     for (StorageToken* tok : it->second) {
       if (can_realloc) {
@@ -286,12 +286,12 @@ class StorageAllocator : public StorageAllocaBaseVisitor {
    */
   size_t GetMemorySize(StorageToken* prototype) {
     const TensorTypeNode* ttype = prototype->ttype;
-    CHECK(ttype != nullptr);
+    ICHECK(ttype != nullptr);
     size_t size = 1;
     for (IndexExpr dim : ttype->shape) {
       const int64_t* pval = tir::as_const_int(dim);
-      CHECK(pval != nullptr) << "Cannot allocate memory symbolic tensor shape " << ttype->shape;
-      CHECK_GE(*pval, 0) << "Cannot allocate memory for tensor with negative shape" << *pval;
+      ICHECK(pval != nullptr) << "Cannot allocate memory symbolic tensor shape " << ttype->shape;
+      ICHECK_GE(*pval, 0) << "Cannot allocate memory for tensor with negative shape" << *pval;
       size *= static_cast<size_t>(pval[0]);
     }
     size *= DivRoundUp(ttype->dtype.bits() * ttype->dtype.lanes(), 8);
@@ -316,7 +316,7 @@ class StorageAllocator : public StorageAllocaBaseVisitor {
     for (auto it = mid; it != end; ++it) {
       StorageToken* tok = it->second;
       if (tok->device_type != prototype->device_type) continue;
-      CHECK_EQ(tok->ref_counter, 0);
+      ICHECK_EQ(tok->ref_counter, 0);
       // Use exect matching strategy
       tok->max_bytes = std::max(size, tok->max_bytes);
       tok->ref_counter = prototype->ref_counter;
@@ -329,7 +329,7 @@ class StorageAllocator : public StorageAllocaBaseVisitor {
       --it;
       StorageToken* tok = it->second;
       if (tok->device_type != prototype->device_type) continue;
-      CHECK_EQ(tok->ref_counter, 0);
+      ICHECK_EQ(tok->ref_counter, 0);
       // Use exect matching strategy
       tok->max_bytes = std::max(size, tok->max_bytes);
       tok->ref_counter = prototype->ref_counter;
@@ -356,8 +356,8 @@ class StorageAllocator : public StorageAllocaBaseVisitor {
    * \param tok The token to be released.
    */
   void CheckForRelease(StorageToken* tok) {
-    CHECK_GE(tok->storage_id, 0);
-    CHECK_GE(tok->ref_counter, 0);
+    ICHECK_GE(tok->storage_id, 0);
+    ICHECK_GE(tok->ref_counter, 0);
     if (tok->ref_counter == 0) {
       free_.insert({tok->max_bytes, tok});
     }
diff --git a/src/relay/backend/graph_runtime_codegen.cc b/src/relay/backend/graph_runtime_codegen.cc
index acc99c5..7b71e34 100644
--- a/src/relay/backend/graph_runtime_codegen.cc
+++ b/src/relay/backend/graph_runtime_codegen.cc
@@ -243,9 +243,9 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
   std::vector<GraphNodeRef> AddNode(GraphObjectPtr node, Expr expr) {
     auto checked_type = expr->checked_type();
     size_t count = storage_device_map_.count(expr);
-    CHECK_GT(count, 0) << "Expr is not existing in storage plan";
+    ICHECK_GT(count, 0) << "Expr is not existing in storage plan";
     auto storage_device_info = storage_device_map_[expr];
-    CHECK_EQ(storage_device_info.size(), 2);
+    ICHECK_EQ(storage_device_info.size(), 2);
     // storage
     std::vector<int64_t> storage_info;
     for (auto& v : storage_device_info[0]) {
@@ -282,7 +282,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
           LOG(FATAL) << "type " << checked_type->GetTypeKey() << " not supported";
         }
       }
-      CHECK_EQ(node->Type(), kGraphOpNode);
+      ICHECK_EQ(node->Type(), kGraphOpNode);
       auto op_nd = std::dynamic_pointer_cast<GraphOpNode>(node);
       op_nd->attrs_["shape"] = shape;
       op_nd->attrs_["dtype"] = dtype;
@@ -367,7 +367,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
       target = Target("ext_dev");
       CCacheKey key = (*pf0)(func, target);
       CachedFunc ext_func = (*pf1)(compile_engine_, key);
-      CHECK(ext_func.defined()) << "External function is not defined.";
+      ICHECK(ext_func.defined()) << "External function is not defined.";
 
       // Step into the functions that are handled by external codegen to
       // collect metadata.
@@ -379,7 +379,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
       return GraphAddCallNode(op, ext_func->func_name, ext_func->func_name);
     }
 
-    CHECK_GE(storage_device_map_.count(expr), 0);
+    ICHECK_GE(storage_device_map_.count(expr), 0);
     auto& device_type = storage_device_map_[expr][1];
     auto call_dev_type = device_type[0]->value;
     // Normal Relay Function
@@ -410,7 +410,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
   }
 
   std::vector<GraphNodeRef> VisitExpr_(const LetNode* op) override {
-    CHECK_EQ(var_map_.count(op->var.get()), 0);
+    ICHECK_EQ(var_map_.count(op->var.get()), 0);
     var_map_[op->var.get()] = VisitExpr(op->value);
     return VisitExpr(op->body);
   }
@@ -431,7 +431,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
     return {};
   }
   std::vector<GraphNodeRef> VisitExpr_(const FunctionNode* op) override {
-    CHECK(op->GetAttr<String>(attr::kCompiler).defined())
+    ICHECK(op->GetAttr<String>(attr::kCompiler).defined())
         << "Only functions supported by custom codegen";
     return {};
   }
@@ -479,7 +479,7 @@ class GraphRuntimeCodegen : public backend::MemoizedExprTranslator<std::vector<G
       const auto& storage_id = dmlc::get<std::vector<int64_t>>(node->attrs_["storage_id"]);
       const auto& dtype_vec = dmlc::get<std::vector<std::string>>(node->attrs_["dtype"]);
 
-      CHECK_EQ(node->num_outputs_, shape_vec.size());
+      ICHECK_EQ(node->num_outputs_, shape_vec.size());
       num_entry += node->num_outputs_;
 
       shapes.insert(shapes.end(), shape_vec.begin(), shape_vec.end());
@@ -556,14 +556,14 @@ class GraphRuntimeCodegenModule : public runtime::ModuleNode {
   virtual PackedFunc GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) {
     if (name == "init") {
       return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-        CHECK_EQ(args.num_args, 2) << "The expected of arguments are: "
-                                   << "runtime::Module mod and Map<int, Target> targets";
+        ICHECK_EQ(args.num_args, 2) << "The expected of arguments are: "
+                                    << "runtime::Module mod and Map<int, Target> targets";
         void* mod = args[0];
         Map<Integer, tvm::Target> tmp = args[1];
         TargetsMap targets;
         for (const auto& it : tmp) {
           auto dev_type = it.first.as<tir::IntImmNode>();
-          CHECK(dev_type);
+          ICHECK(dev_type);
           targets[dev_type->value] = it.second;
         }
         codegen_ =
@@ -588,7 +588,7 @@ class GraphRuntimeCodegenModule : public runtime::ModuleNode {
     } else if (name == "get_param_by_name") {
       return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
         String key = args[0];
-        CHECK_GT(this->output_.params.count(key), 0);
+        ICHECK_GT(this->output_.params.count(key), 0);
         *rv = this->output_.params[key];
       });
     } else if (name == "get_irmodule") {
diff --git a/src/relay/backend/interpreter.cc b/src/relay/backend/interpreter.cc
index e58c23b..993fb1a 100644
--- a/src/relay/backend/interpreter.cc
+++ b/src/relay/backend/interpreter.cc
@@ -54,7 +54,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
 
 inline const PackedFunc& GetPackedFunc(const std::string& name) {
   const PackedFunc* pf = tvm::runtime::Registry::Get(name);
-  CHECK(pf != nullptr) << "Cannot find function " << name << " in registry";
+  ICHECK(pf != nullptr) << "Cannot find function " << name << " in registry";
   return *pf;
 }
 
@@ -347,12 +347,12 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
         }
       }
     }
-    CHECK_EQ(arg_counter, cfunc->inputs.size()) << "Shape function input sizes mismatch";
+    ICHECK_EQ(arg_counter, cfunc->inputs.size()) << "Shape function input sizes mismatch";
 
     auto fset_shape_output = [&](size_t i, Type val_type) {
       // TODO(@icemelon): allow recursive tuple
       const TensorTypeNode* rtype = val_type.as<TensorTypeNode>();
-      CHECK(rtype != nullptr);
+      ICHECK(rtype != nullptr);
       int64_t ndim = rtype->shape.size();
       auto arr = NDArray::Empty({ndim}, DataType::Int(64), cpu_ctx);
       outputs[i] = arr;
@@ -371,7 +371,7 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
       auto tt = Downcast<TensorType>(ret_type);
       fset_shape_output(0, tt);
     }
-    CHECK_EQ(cfunc->outputs.size(), out_cnt) << "Shape function output sizes mismatch";
+    ICHECK_EQ(cfunc->outputs.size(), out_cnt) << "Shape function output sizes mismatch";
 
     PackedFunc shape_func;
     Module m;
@@ -428,7 +428,7 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
     if (const auto* tuple_type = func->body->checked_type().as<TupleTypeNode>()) {
       arg_len += tuple_type->fields.size();
     } else {
-      CHECK(func->body->checked_type().as<TensorTypeNode>()) << func->body->checked_type();
+      ICHECK(func->body->checked_type().as<TensorTypeNode>()) << func->body->checked_type();
       arg_len += 1;
     }
     std::vector<TVMValue> values(arg_len);
@@ -439,7 +439,7 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
       const auto nd_array = Downcast<NDArray>(val);
       setter(i, nd_array);
       DLContext arg_ctx = nd_array->ctx;
-      CHECK(arg_ctx.device_type == context_.device_type && arg_ctx.device_id == context_.device_id)
+      ICHECK(arg_ctx.device_type == context_.device_type && arg_ctx.device_id == context_.device_id)
           << "Interpreter expect context to be " << context_ << ", but get " << arg_ctx;
     };
 
@@ -461,12 +461,12 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
     // return type.
     auto fset_output = [&](size_t i, Type val_type) {
       const TensorTypeNode* rtype = val_type.as<TensorTypeNode>();
-      CHECK(rtype != nullptr);
+      ICHECK(rtype != nullptr);
       // Allocate output tensor.
       std::vector<int64_t> shape;
       for (auto dim : rtype->shape) {
         const auto* ivalue = tir::as_const_int(dim);
-        CHECK(ivalue) << "expected concrete dimensions";
+        ICHECK(ivalue) << "expected concrete dimensions";
         shape.push_back(ivalue[0]);
       }
       DLDataType dtype = rtype->dtype;
@@ -480,14 +480,14 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
     bool is_dyn = IsDynamic(ret_type);
 
     if (is_dyn) {
-      CHECK(func->HasNonzeroAttr(attr::kPrimitive));
+      ICHECK(func->HasNonzeroAttr(attr::kPrimitive));
       out_shapes = ComputeDynamicShape(func, args);
     }
 
     PackedFunc packed_func = engine_->JIT(CCacheKey(func, target_));
     TVMRetValue rv;
     if (const TupleTypeNode* rtype = func->body->checked_type().as<TupleTypeNode>()) {
-      CHECK(!is_dyn || out_shapes.size() == rtype->fields.size());
+      ICHECK(!is_dyn || out_shapes.size() == rtype->fields.size());
       std::vector<ObjectRef> fields;
       for (size_t i = 0; i < rtype->fields.size(); ++i) {
         if (is_dyn) {
@@ -503,7 +503,7 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
     } else {
       ObjectRef out_tensor;
       if (is_dyn) {
-        CHECK_EQ(out_shapes.size(), 1);
+        ICHECK_EQ(out_shapes.size(), 1);
         auto sh = out_shapes[0];
         auto tt = Downcast<TensorType>(ret_type);
         out_tensor = fset_output(0, TensorType(sh, tt->dtype));
@@ -526,16 +526,16 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
     // Allocate a frame with the parameters and free variables.
     tvm::Map<Var, ObjectRef> locals;
 
-    CHECK_EQ(func->params.size(), args.size());
+    ICHECK_EQ(func->params.size(), args.size());
 
     for (size_t i = 0; i < func->params.size(); i++) {
-      CHECK_EQ(locals.count(func->params[i]), 0);
+      ICHECK_EQ(locals.count(func->params[i]), 0);
       locals.Set(func->params[i], args[i]);
     }
 
     // Add the var to value mappings from the Closure's environment.
     for (auto it = closure->env.begin(); it != closure->env.end(); ++it) {
-      CHECK_EQ(locals.count((*it).first), 0);
+      ICHECK_EQ(locals.count((*it).first), 0);
       locals.Set((*it).first, (*it).second);
     }
 
@@ -593,9 +593,9 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
   ObjectRef VisitExpr_(const TupleGetItemNode* op) final {
     ObjectRef val = Eval(op->tuple);
     const auto* adt_obj = val.as<ADTObj>();
-    CHECK(adt_obj) << "interal error: when evaluating TupleGetItem expected an ADT value";
+    ICHECK(adt_obj) << "interal error: when evaluating TupleGetItem expected an ADT value";
     auto adt = GetRef<ADT>(adt_obj);
-    CHECK_LT(static_cast<size_t>(op->index), adt.size()) << "internal error: index out of bounds";
+    ICHECK_LT(static_cast<size_t>(op->index), adt.size()) << "internal error: index out of bounds";
     return adt[op->index];
   }
 
@@ -607,7 +607,7 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
       cpu_ctx.device_type = kDLCPU;
       cpu_ctx.device_id = 0;
       NDArray cpu_array = nd_array.CopyTo(cpu_ctx);
-      CHECK_EQ(DataType(cpu_array->dtype), DataType::Bool());
+      ICHECK_EQ(DataType(cpu_array->dtype), DataType::Bool());
       // TODO(@jroesch, @MK): Refactor code into helper from DCE.
       if (reinterpret_cast<uint8_t*>(cpu_array->data)[0]) {
         return Eval(op->true_branch);
@@ -656,11 +656,11 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
 
   bool VisitPattern_(const PatternConstructorNode* op, const ObjectRef& v) final {
     const ConstructorValueObj* cvn = v.as<ConstructorValueObj>();
-    CHECK(cvn) << "need to be a constructor for match";
-    CHECK_NE(op->constructor->tag, -1);
-    CHECK_NE(cvn->tag, -1);
+    ICHECK(cvn) << "need to be a constructor for match";
+    ICHECK_NE(op->constructor->tag, -1);
+    ICHECK_NE(cvn->tag, -1);
     if (op->constructor->tag == cvn->tag) {
-      CHECK_EQ(op->patterns.size(), cvn->fields.size());
+      ICHECK_EQ(op->patterns.size(), cvn->fields.size());
       for (size_t i = 0; i < op->patterns.size(); ++i) {
         if (!VisitPattern(op->patterns[i], cvn->fields[i])) {
           return false;
@@ -673,7 +673,7 @@ class Interpreter : public ExprFunctor<ObjectRef(const Expr& n)>,
 
   bool VisitPattern_(const PatternTupleNode* op, const ObjectRef& v) final {
     auto adt = Downcast<ADT>(v);
-    CHECK_EQ(op->patterns.size(), adt.size());
+    ICHECK_EQ(op->patterns.size(), adt.size());
     for (size_t i = 0; i < op->patterns.size(); ++i) {
       if (!VisitPattern(op->patterns[i], adt[i])) {
         return false;
@@ -730,7 +730,7 @@ TypedPackedFunc<ObjectRef(Expr)> CreateInterpreter(IRModule mod, DLContext conte
   auto intrp = std::make_shared<Interpreter>(mod, context, target);
   auto packed = [intrp](Expr expr) {
     auto f = DetectFeature(expr);
-    CHECK(f.is_subset_of(FeatureSet::All() - fGraph));
+    ICHECK(f.is_subset_of(FeatureSet::All() - fGraph));
     return intrp->Eval(expr);
   };
   return TypedPackedFunc<ObjectRef(Expr)>(packed);
diff --git a/src/relay/backend/param_dict.cc b/src/relay/backend/param_dict.cc
index ef4b658..1d7e08a 100644
--- a/src/relay/backend/param_dict.cc
+++ b/src/relay/backend/param_dict.cc
@@ -37,7 +37,7 @@ namespace relay {
 using namespace runtime;
 
 TVM_REGISTER_GLOBAL("tvm.relay._save_param_dict").set_body([](TVMArgs args, TVMRetValue* rv) {
-  CHECK_EQ(args.size() % 2, 0u);
+  ICHECK_EQ(args.size() % 2, 0u);
   // `args` is in the form "key, value, key, value, ..."
   size_t num_params = args.size() / 2;
   std::vector<std::string> names;
@@ -74,14 +74,14 @@ TVM_REGISTER_GLOBAL("tvm.relay._load_param_dict").set_body([](TVMArgs args, TVMR
   dmlc::MemoryStringStream memstrm(&bytes);
   dmlc::Stream* strm = &memstrm;
   uint64_t header, reserved;
-  CHECK(strm->Read(&header)) << "Invalid parameters file format";
-  CHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format";
-  CHECK(strm->Read(&reserved)) << "Invalid parameters file format";
-  CHECK(strm->Read(&names)) << "Invalid parameters file format";
+  ICHECK(strm->Read(&header)) << "Invalid parameters file format";
+  ICHECK(header == kTVMNDArrayListMagic) << "Invalid parameters file format";
+  ICHECK(strm->Read(&reserved)) << "Invalid parameters file format";
+  ICHECK(strm->Read(&names)) << "Invalid parameters file format";
   uint64_t sz;
   strm->Read(&sz, sizeof(sz));
   size_t size = static_cast<size_t>(sz);
-  CHECK(size == names.size()) << "Invalid parameters file format";
+  ICHECK(size == names.size()) << "Invalid parameters file format";
   tvm::Array<NamedNDArray> ret;
   for (size_t i = 0; i < size; ++i) {
     tvm::runtime::NDArray temp;
diff --git a/src/relay/backend/utils.h b/src/relay/backend/utils.h
index 07f4226..3def635 100644
--- a/src/relay/backend/utils.h
+++ b/src/relay/backend/utils.h
@@ -81,7 +81,7 @@ class MemoizedExprTranslator : public ::tvm::relay::ExprFunctor<OutputType(const
    * \return The result of the call
    */
   virtual OutputType VisitExpr(const Expr& n) {
-    CHECK(n.defined());
+    ICHECK(n.defined());
     auto it = memo_.find(n);
     if (it != memo_.end()) {
       return it->second;
@@ -115,7 +115,7 @@ inline const PackedFunc* GetPackedFunc(const std::string& func_name) {
 template <typename R, typename... Args>
 inline const runtime::TypedPackedFunc<R(Args...)> GetTypedPackedFunc(const std::string& func_name) {
   auto* pf = GetPackedFunc(func_name);
-  CHECK(pf != nullptr) << "can not find packed function";
+  ICHECK(pf != nullptr) << "can not find packed function";
   return runtime::TypedPackedFunc<R(Args...)>(*pf);
 }
 
@@ -129,7 +129,7 @@ inline std::vector<int64_t> GetIntShape(const Array<IndexExpr>& shape) {
   std::vector<int64_t> ret;
   for (const auto& dim : shape) {
     const int64_t* pval = tir::as_const_int(dim);
-    CHECK(pval) << "Expect integer, but received: " << dim->GetTypeKey();
+    ICHECK(pval) << "Expect integer, but received: " << dim->GetTypeKey();
     ret.push_back(*pval);
   }
   return ret;
@@ -192,8 +192,8 @@ inline relay::Function BindParamsByName(
   }
   Expr bound_expr = relay::Bind(func, bind_dict);
   Function ret = Downcast<Function>(bound_expr);
-  CHECK(ret.defined()) << "The returning type is expected to be a Relay Function."
-                       << "\n";
+  ICHECK(ret.defined()) << "The returning type is expected to be a Relay Function."
+                        << "\n";
   return ret;
 }
 
@@ -204,11 +204,11 @@ inline relay::Function BindParamsByName(
  */
 inline std::vector<int> GetShape(const Type& type) {
   const auto* ttype = type.as<TensorTypeNode>();
-  CHECK(ttype) << "Expect TensorTypeNode";
+  ICHECK(ttype) << "Expect TensorTypeNode";
   std::vector<int> shape;
   for (size_t i = 0; i < ttype->shape.size(); ++i) {
     auto* val = ttype->shape[i].as<IntImmNode>();
-    CHECK(val);
+    ICHECK(val);
     shape.push_back(val->value);
   }
   return shape;
@@ -223,7 +223,7 @@ inline std::vector<int> GetShape(const Type& type) {
  */
 inline bool IsOp(const CallNode* call, const std::string& op_name) {
   const auto* op_node = call->op.as<OpNode>();
-  CHECK(op_node) << "Expects a single op.";
+  ICHECK(op_node) << "Expects a single op.";
   Op op = GetRef<Op>(op_node);
   return op == Op::Get(op_name);
 }
@@ -239,14 +239,14 @@ inline bool IsOp(const CallNode* call, const std::string& op_name) {
 
 inline const CallNode* GetRootCall(const CallNode* current_call, int depth,
                                    const std::vector<std::string>& expected_op_names) {
-  CHECK(current_call && depth >= 0 && static_cast<size_t>(depth) < expected_op_names.size() &&
-        IsOp(current_call, expected_op_names[depth]));
+  ICHECK(current_call && depth >= 0 && static_cast<size_t>(depth) < expected_op_names.size() &&
+         IsOp(current_call, expected_op_names[depth]));
 
   if (depth == 0) {
     return current_call;
   }
 
-  CHECK_GT(current_call->args.size(), 0);
+  ICHECK_GT(current_call->args.size(), 0);
 
   const auto* next_call = current_call->args[0].as<CallNode>();
   return GetRootCall(next_call, depth - 1, expected_op_names);
@@ -260,7 +260,7 @@ inline const CallNode* GetRootCall(const CallNode* current_call, int depth,
  */
 inline std::string GetExtSymbol(const Function& func) {
   const auto name_node = func->GetAttr<String>(tvm::attr::kGlobalSymbol);
-  CHECK(name_node.defined()) << "Fail to retrieve external symbol.";
+  ICHECK(name_node.defined()) << "Fail to retrieve external symbol.";
   return std::string(name_node.value());
 }
 
diff --git a/src/relay/backend/vm/compiler.cc b/src/relay/backend/vm/compiler.cc
index c3bf805..4a7e5ee 100644
--- a/src/relay/backend/vm/compiler.cc
+++ b/src/relay/backend/vm/compiler.cc
@@ -60,19 +60,19 @@ Pass InlinePrimitives();
 
 Pass ManifestAlloc(Target target_host, vm::TargetsMap targets) {
   auto f = tvm::runtime::Registry::Get("relay.transform.ManifestAlloc");
-  CHECK(f != nullptr) << "unable to load allocation manifestation pass";
+  ICHECK(f != nullptr) << "unable to load allocation manifestation pass";
   return (*f)(target_host, targets);
 }
 
 Pass MemoryPlan() {
   auto f = tvm::runtime::Registry::Get("relay.transform.MemoryPlan");
-  CHECK(f != nullptr) << "unable to load the memory planning pass";
+  ICHECK(f != nullptr) << "unable to load the memory planning pass";
   return (*f)();
 }
 
 Pass LiftConstants() {
   auto f = tvm::runtime::Registry::Get("relay.transform.LiftConstants");
-  CHECK(f != nullptr) << "unable to load the constant lifting pass";
+  ICHECK(f != nullptr) << "unable to load the constant lifting pass";
   return (*f)();
 }
 
@@ -178,7 +178,7 @@ TreeObjectPtr BuildDecisionTreeFromPattern(MatchValuePtr data, Pattern pattern,
     return TreeBranchNode::Make(cond, then_branch, else_branch);
   } else {
     const auto* pt = pattern.as<PatternTupleNode>();
-    CHECK(pt) << "unhandled case: " << AsText(pattern, false);
+    ICHECK(pt) << "unhandled case: " << AsText(pattern, false);
     size_t field_index = 0;
     for (auto& p : pt->patterns) {
       auto d = std::make_shared<AccessField>(data, field_index++);
@@ -209,10 +209,10 @@ std::vector<int64_t> ToAllocTensorShape(NDArray shape) {
   if (shape->ndim == 0) {
     return raw_shape;
   }
-  CHECK_EQ(shape->ndim, 1u);
-  CHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got "
-                                  << DLDataType2String(shape->dtype);
-  CHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32)
+  ICHECK_EQ(shape->ndim, 1u);
+  ICHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got "
+                                   << DLDataType2String(shape->dtype);
+  ICHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32)
       << "The dtype of constant shape must be int32 or int64, but got"
       << DLDataType2String(shape->dtype);
 
@@ -247,7 +247,7 @@ int GetFallbackDevice() {
   Optional<Integer> opt_fallback_dev =
       pass_ctx->GetConfig("relay.fallback_device_type", Integer(static_cast<int>(kDLCPU)));
   auto fallback_dev = opt_fallback_dev.value();
-  CHECK_GT(fallback_dev->value, 0U);
+  ICHECK_GT(fallback_dev->value, 0U);
   return fallback_dev->value;
 }
 
@@ -271,7 +271,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
     // We then assign register num to the free variables
     for (auto param : func->params) {
       auto arg_register = NewRegister();
-      CHECK_EQ(i, arg_register);
+      ICHECK_EQ(i, arg_register);
       var_register_map_.insert({param, arg_register});
       params_.push_back(param->name_hint());
       ++i;
@@ -281,7 +281,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
       Function inner_func = Downcast<Function>(func->body);
       for (auto param : inner_func->params) {
         auto arg_register = NewRegister();
-        CHECK_EQ(i, arg_register);
+        ICHECK_EQ(i, arg_register);
         var_register_map_.insert({param, arg_register});
         params_.push_back(param->name_hint());
         ++i;
@@ -295,10 +295,10 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
     std::vector<Index> params_device_type;
     for (const auto& it : func->params) {
       if (!expr_device_map_.empty()) {
-        CHECK_GT(expr_device_map_.count(it), 0U);
+        ICHECK_GT(expr_device_map_.count(it), 0U);
         params_device_type.push_back(expr_device_map_[it].device_type);
       } else {
-        CHECK_EQ(targets_.size(), 1U);
+        ICHECK_EQ(targets_.size(), 1U);
         params_device_type.push_back((targets_.begin())->first);
       }
     }
@@ -311,7 +311,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
 
   inline void Emit(const Instruction& instr) {
     DLOG(INFO) << "VMCompiler::Emit: instr=" << instr;
-    CHECK((int)instr.op < 100) << "Invalid opcode " << (int)instr.op;
+    ICHECK((int)instr.op < 100) << "Invalid opcode " << (int)instr.op;
     switch (instr.op) {
       case Opcode::AllocADT:
       case Opcode::AllocTensor:
@@ -348,7 +348,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
       context_->const_device_type.push_back(targets_.begin()->first);
     } else {
       auto con = GetRef<Constant>(const_node);
-      CHECK_GT(expr_device_map_.count(con), 0U);
+      ICHECK_GT(expr_device_map_.count(con), 0U);
       context_->const_device_type.push_back(expr_device_map_[con].device_type);
     }
     context_->constants.push_back(const_node->data);
@@ -358,7 +358,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
   void VisitExpr_(const VarNode* var_node) {
     auto var = GetRef<Var>(var_node);
     auto reg_it = this->var_register_map_.find(var);
-    CHECK(reg_it != this->var_register_map_.end());
+    ICHECK(reg_it != this->var_register_map_.end());
     last_register_ = reg_it->second;
   }
 
@@ -400,7 +400,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
     auto var = GetRef<GlobalVar>(gvar);
     auto func = context_->module->Lookup(var);
     auto it = context_->global_map.find(var);
-    CHECK(it != context_->global_map.end());
+    ICHECK(it != context_->global_map.end());
     // Allocate closure with zero free vars
     Emit(Instruction::AllocClosure(it->second, 0, {}, NewRegister()));
   }
@@ -458,7 +458,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
     auto cfunc = engine_->LowerShapeFunc(key);
     int op_index = -1;
     // pick the only function inside the context
-    CHECK_EQ(cfunc->funcs->functions.size(), 1);
+    ICHECK_EQ(cfunc->funcs->functions.size(), 1);
     auto pfunc = Downcast<tir::PrimFunc>((*cfunc->funcs->functions.begin()).second);
     if (context_->seen_funcs.count(pfunc) == 0) {
       op_index = context_->cached_funcs.size();
@@ -477,7 +477,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
 
     for (auto output : outputs) {
       auto reg = var_register_map_.find(Downcast<Var>(output));
-      CHECK(reg != var_register_map_.end())
+      ICHECK(reg != var_register_map_.end())
           << "internal error: all variables should be in the register mapping";
       argument_registers.push_back(reg->second);
     }
@@ -489,16 +489,16 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
   void EmitInvokeTVMOp(const Function& func, const Expr& inputs, const Expr& outputs) {
     std::vector<Index> argument_registers;
 
-    CHECK(func->GetAttr<Integer>(attr::kPrimitive, 0) != 0)
+    ICHECK(func->GetAttr<Integer>(attr::kPrimitive, 0) != 0)
         << "internal error: invoke_tvm_op requires the first argument to be a relay::Function";
 
     auto input_tuple = inputs.as<TupleNode>();
-    CHECK(input_tuple) << "internal error: invoke_tvm_op inputs must be a tuple,"
-                       << "please file a bug in the memory manifestation pass";
+    ICHECK(input_tuple) << "internal error: invoke_tvm_op inputs must be a tuple,"
+                        << "please file a bug in the memory manifestation pass";
 
     auto output_tuple = outputs.as<TupleNode>();
-    CHECK(output_tuple) << "internal error: invoke_tvm_op outputs must be a tuple,"
-                        << "please file a bug in the memory manifestation pass";
+    ICHECK(output_tuple) << "internal error: invoke_tvm_op outputs must be a tuple,"
+                         << "please file a bug in the memory manifestation pass";
 
     for (auto input : input_tuple->fields) {
       VisitExpr(input);
@@ -507,7 +507,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
 
     for (auto output : output_tuple->fields) {
       auto reg = var_register_map_.find(Downcast<Var>(output));
-      CHECK(reg != var_register_map_.end())
+      ICHECK(reg != var_register_map_.end())
           << "internal error: all variables should be in the register mapping";
       argument_registers.push_back(reg->second);
     }
@@ -520,11 +520,11 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
       // Next generate the invoke instruction.
       if (expr_device_map_.empty()) {
         // homogeneous execution.
-        CHECK_EQ(targets_.size(), 1U);
+        ICHECK_EQ(targets_.size(), 1U);
         const auto& it = targets_.begin();
         target = (*it).second;
       } else {
-        CHECK_GT(expr_device_map_.count(func), 0U)
+        ICHECK_GT(expr_device_map_.count(func), 0U)
             << "Found not annotated expression, please make sure "
                "context analysis has been executed";
         int dev_type = expr_device_map_[func].device_type;
@@ -545,7 +545,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
       context_->cached_funcs.push_back(cfunc);
     } else {
       // TODO(jroesch): support lowered funcs for multiple targets
-      CHECK_EQ(cfunc->funcs->functions.size(), 1);
+      ICHECK_EQ(cfunc->funcs->functions.size(), 1);
       auto pfunc = Downcast<tir::PrimFunc>((*cfunc->funcs->functions.begin()).second);
       if (context_->seen_funcs.find(pfunc) == context_->seen_funcs.end()) {
         op_index = context_->cached_funcs.size();
@@ -571,16 +571,16 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
       matcher
           .Match("vm.invoke_tvm_op",
                  [this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 3);
+                   ICHECK_EQ(args.size(), 3);
                    EmitInvokeTVMOp(Downcast<Function>(args[0]), args[1], args[2]);
                  })
           .Match("memory.alloc_tensor",
                  [this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 3);
+                   ICHECK_EQ(args.size(), 3);
 
                    // Get the attributes.
                    auto alloc_attrs = attrs.as<AllocTensorAttrs>();
-                   CHECK(alloc_attrs != nullptr) << "must be the alloc tensor attrs";
+                   ICHECK(alloc_attrs != nullptr) << "must be the alloc tensor attrs";
                    auto dtype = alloc_attrs->dtype;
 
                    // The storage will be passed dynamically.
@@ -612,22 +612,22 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
           .Match("memory.alloc_storage",
                  [this, call_node](const Array<Expr>& args, const Attrs& attrs,
                                    const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 2);
+                   ICHECK_EQ(args.size(), 2);
                    // Compute the size of the allocation.
                    this->VisitExpr(args[0]);
                    auto size_register = last_register_;
 
-                   CHECK(args[1].as<ConstantNode>());
+                   ICHECK(args[1].as<ConstantNode>());
                    NDArray alignment_arr = args[1].as<ConstantNode>()->data;
-                   CHECK_EQ(alignment_arr->dtype.code, 0U)
+                   ICHECK_EQ(alignment_arr->dtype.code, 0U)
                        << "The dtype of constant shape must be int32 or int64, but got "
                        << DLDataType2String(alignment_arr->dtype);
-                   CHECK_EQ(alignment_arr->dtype.bits, 64U);
+                   ICHECK_EQ(alignment_arr->dtype.bits, 64U);
                    Index alignment = reinterpret_cast<int64_t*>(alignment_arr->data)[0];
 
                    // Get the dtype hint from the attributes.
                    auto alloc_attrs = attrs.as<AllocStorageAttrs>();
-                   CHECK(alloc_attrs != nullptr) << "must be the AllocStorage attrs";
+                   ICHECK(alloc_attrs != nullptr) << "must be the AllocStorage attrs";
                    auto dtype = alloc_attrs->dtype;
 
                    Index device_type;
@@ -637,7 +637,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
                      auto& kv = *(targets_.begin());
                      device_type = kv.first;
                    } else {
-                     CHECK_GT(expr_device_map_.count(GetRef<Call>(call_node)), 0U)
+                     ICHECK_GT(expr_device_map_.count(GetRef<Call>(call_node)), 0U)
                          << " The alloc_storage node is not annotated";
                      device_type = expr_device_map_[GetRef<Call>(call_node)].device_type;
                    }
@@ -647,7 +647,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
                  })
           .Match("vm.shape_func",
                  [this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 3);
+                   ICHECK_EQ(args.size(), 3);
                    auto shape_func = Downcast<Function>(args[0]);
                    auto inputs = Downcast<Tuple>(args[1]);
                    auto outputs = Downcast<Tuple>(args[2]);
@@ -655,11 +655,11 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
                  })
           .Match("vm.shape_of",
                  [this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 1U);
+                   ICHECK_EQ(args.size(), 1U);
                    // Get the attributes.
                    const auto* shape_of_attrs = attrs.as<ShapeOfAttrs>();
-                   CHECK(shape_of_attrs) << "Must be the shape_of attrs";
-                   CHECK_EQ(shape_of_attrs->dtype.bits(), 64)
+                   ICHECK(shape_of_attrs) << "Must be the shape_of attrs";
+                   ICHECK_EQ(shape_of_attrs->dtype.bits(), 64)
                        << "The dtype of shape of must be int64, but got"
                        << DLDataType2String(shape_of_attrs->dtype);
                    this->VisitExpr(args[0]);
@@ -667,7 +667,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
                  })
           .Match("vm.reshape_tensor",
                  [this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 2u);
+                   ICHECK_EQ(args.size(), 2u);
                    this->VisitExpr(args[0]);
                    auto tensor_reg = last_register_;
                    this->VisitExpr(args[1]);
@@ -676,12 +676,12 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
                  })
           .Match("device_copy",
                  [this](const Array<Expr>& args, const Attrs& attrs, const Array<Type>& type_arg) {
-                   CHECK_EQ(args.size(), 1U);
+                   ICHECK_EQ(args.size(), 1U);
                    this->VisitExpr(args[0]);
                    auto src_reg = last_register_;
 
                    auto device_copy_attrs = attrs.as<DeviceCopyAttrs>();
-                   CHECK(device_copy_attrs != nullptr) << "Must be the device copy attrs";
+                   ICHECK(device_copy_attrs != nullptr) << "Must be the device copy attrs";
                    Index src_device_type = device_copy_attrs->src_dev_type;
                    Index dst_device_type = device_copy_attrs->dst_dev_type;
                    Emit(Instruction::DeviceCopy(src_reg, src_device_type, dst_device_type,
@@ -711,7 +711,7 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
       // calling convention.
       auto global = GetRef<GlobalVar>(global_node);
       auto it = context_->global_map.find(global);
-      CHECK(it != context_->global_map.end());
+      ICHECK(it != context_->global_map.end());
       DLOG(INFO) << "VisitExpr_: generating invoke for " << global->name_hint
                  << " with func_index=" << it->second;
 
@@ -855,13 +855,13 @@ class VMFunctionCompiler : ExprFunctor<void(const Expr& expr)> {
 PackedFunc VMCompiler::GetFunction(const std::string& name, const ObjectPtr<Object>& sptr_to_self) {
   if (name == "lower") {
     return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-      CHECK_EQ(args.num_args, 3);
+      ICHECK_EQ(args.num_args, 3);
       IRModule mod = args[0];
       this->Lower(mod, args[1], args[2]);
     });
   } else if (name == "codegen") {
     return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-      CHECK_EQ(args.num_args, 0);
+      ICHECK_EQ(args.num_args, 0);
       this->Codegen();
     });
   } else if (name == "get_executable") {
@@ -884,7 +884,7 @@ PackedFunc VMCompiler::GetFunction(const std::string& name, const ObjectPtr<Obje
     });
   } else if (name == "optimize") {
     return PackedFunc([sptr_to_self, this](TVMArgs args, TVMRetValue* rv) {
-      CHECK_EQ(args.num_args, 3);
+      ICHECK_EQ(args.num_args, 3);
       *rv = this->OptimizeModule(args[0], args[1], args[2]);
     });
   } else {
@@ -900,7 +900,7 @@ void VMCompiler::SetParam(const std::string& name, runtime::NDArray data_in) {
 void VMCompiler::Lower(IRModule mod, const TargetsMap& targets, const tvm::Target& target_host) {
   if (params_.size()) {
     BaseFunc base_func = mod->Lookup("main");
-    CHECK(base_func->IsInstance<FunctionNode>())
+    ICHECK(base_func->IsInstance<FunctionNode>())
         << "VM compiler expects to compile relay::Function";
     auto f = relay::backend::BindParamsByName(Downcast<Function>(base_func), params_);
     auto gvar = mod->GetGlobalVar("main");
@@ -936,7 +936,7 @@ void VMCompiler::Lower(IRModule mod, const TargetsMap& targets, const tvm::Targe
       auto vm_func = func_compiler.Compile(gvar, func);
 
       size_t func_index = context_.global_map.at(gvar);
-      CHECK(func_index < exec_->functions.size());
+      ICHECK(func_index < exec_->functions.size());
       exec_->functions[func_index] = vm_func;
     }
   }
@@ -1123,7 +1123,7 @@ void VMCompiler::Codegen() {
 
     if (target_str == "ext_dev") {
       // Collect metadata in functions that are handled by external codegen.
-      CHECK(mod->ContainGlobalVar(cfunc->func_name));
+      ICHECK(mod->ContainGlobalVar(cfunc->func_name));
       backend::ConstantUpdater const_visit(cfunc->func_name, &params_);
       const_visit(Downcast<Function>(mod->Lookup(cfunc->func_name)));
       continue;
diff --git a/src/relay/backend/vm/lambda_lift.cc b/src/relay/backend/vm/lambda_lift.cc
index 22b8364..f21d096 100644
--- a/src/relay/backend/vm/lambda_lift.cc
+++ b/src/relay/backend/vm/lambda_lift.cc
@@ -82,7 +82,7 @@ class LambdaLifter : public ExprMutator {
       auto var = GetRef<Var>(var_node);
       if (!letrec_.empty() && var == letrec_.back()) {
         auto it = lambda_map_.find(var);
-        CHECK(it != lambda_map_.end());
+        ICHECK(it != lambda_map_.end());
         return Call(it->second, call->args, call_node->attrs, call_node->type_args);
       }
     }
@@ -154,11 +154,12 @@ class LambdaLifter : public ExprMutator {
       lifted_func = MarkClosure(lifted_func);
     }
 
-    CHECK(lifted_func.defined());
+    ICHECK(lifted_func.defined());
 
     if (module_->ContainGlobalVar(name)) {
       const auto existing_func = module_->Lookup(name);
-      CHECK(tvm::StructuralEqual()(lifted_func, existing_func)) << "lifted function hash collision";
+      ICHECK(tvm::StructuralEqual()(lifted_func, existing_func))
+          << "lifted function hash collision";
       // If an identical function already exists, use its global var.
       global = module_->GetGlobalVar(name);
     } else {
diff --git a/src/relay/ir/dataflow_matcher.cc b/src/relay/ir/dataflow_matcher.cc
index 50c05f2..536e659 100644
--- a/src/relay/ir/dataflow_matcher.cc
+++ b/src/relay/ir/dataflow_matcher.cc
@@ -85,7 +85,7 @@ void DFPatternMatcher::ClearMap(size_t watermark) {
 
 bool DFPatternMatcher::VisitDFPattern(const DFPattern& pattern, const Expr& expr) {
   if (memoize_ && memo_.count(pattern)) {
-    CHECK_EQ(memo_[pattern].size(), 1);
+    ICHECK_EQ(memo_[pattern].size(), 1);
     return expr.same_as(memo_[pattern][0]);
   } else {
     auto watermark = matched_nodes_.size();
@@ -133,7 +133,7 @@ bool MatchRetValue(const ObjectRef& lhs, const TVMRetValue& rhs) {
       }
       break;
     default:
-      CHECK(false) << "Unsupported type code in Pattern Node " << rhs.type_code();
+      ICHECK(false) << "Unsupported type code in Pattern Node " << rhs.type_code();
   }
   return false;
 }
@@ -644,7 +644,7 @@ class PatternGrouper {
     auto body = extractor.Mutate(expr);
 
     // Verify the pattern still holds
-    CHECK(DFPatternMatcher(body).Match(pattern_, body));
+    ICHECK(DFPatternMatcher(body).Match(pattern_, body));
     group.function = Function(params, body, NullValue<Type>(), Array<TypeVar>());
     group.name = extractor.GetName();
     // Check to make sure we aren't overlapping with another group or creating an invalid fusion
@@ -765,7 +765,7 @@ class PatternRewriter : protected MixedModeMutator {
     int count = 0;
     bool equal = true;
     static auto* structural_equal = runtime::Registry::Get("node.StructuralEqual");
-    CHECK(structural_equal) << "node.StructuralEqual is not registered.";
+    ICHECK(structural_equal) << "node.StructuralEqual is not registered.";
     do {
       last = post;
       for (auto callback : callbacks) {
diff --git a/src/relay/ir/expr.cc b/src/relay/ir/expr.cc
index 237cb35..f2e0b36 100644
--- a/src/relay/ir/expr.cc
+++ b/src/relay/ir/expr.cc
@@ -47,7 +47,7 @@ TVM_STATIC_IR_FUNCTOR(ReprPrinter, vtable)
     .set_dispatch<ConstantNode>([](const ObjectRef& ref, ReprPrinter* p) {
       auto* node = static_cast<const ConstantNode*>(ref.get());
       const PackedFunc* fprint = Registry::Get("relay._constant_repr");
-      CHECK(fprint) << "unable to find printing function for constants";
+      ICHECK(fprint) << "unable to find printing function for constants";
       std::string data = (*fprint)(GetRef<Constant>(node));
       p->stream << "Constant(" << data << ")";
     });
@@ -56,8 +56,8 @@ TensorType ConstantNode::tensor_type() const {
   auto dtype = DataType(data->dtype);
   Array<tvm::PrimExpr> shape;
   for (int i = 0; i < data->ndim; i++) {
-    CHECK_LE(data->shape[i], std::numeric_limits<int32_t>::max());
-    CHECK_GE(data->shape[i], std::numeric_limits<int32_t>::min());
+    ICHECK_LE(data->shape[i], std::numeric_limits<int32_t>::max());
+    ICHECK_GE(data->shape[i], std::numeric_limits<int32_t>::min());
     shape.push_back(tvm::IntImm(DataType::Int(32), data->shape[i]));
   }
 
diff --git a/src/relay/ir/expr_functor.cc b/src/relay/ir/expr_functor.cc
index a09179b..a22b69c 100644
--- a/src/relay/ir/expr_functor.cc
+++ b/src/relay/ir/expr_functor.cc
@@ -102,8 +102,8 @@ void ExpandDataflow(Expr expr, FCheckVisited fcheck_visited, FVisitLeaf fvisit_l
 }
 
 MixedModeVisitor::MixedModeVisitor(int visit_limit) {
-  CHECK(visit_limit > 0) << "Dataflow visit limit must be greater than 0";
-  CHECK(visit_limit < 10) << "Dataflow visit limit must be less than 10";
+  ICHECK(visit_limit > 0) << "Dataflow visit limit must be greater than 0";
+  ICHECK(visit_limit < 10) << "Dataflow visit limit must be less than 10";
   visit_limit_ = visit_limit;
 }
 
@@ -524,13 +524,13 @@ class ExprBinder : public MixedModeMutator, PatternMutator {
   using MixedModeMutator::VisitExpr_;
 
   Expr VisitExpr_(const LetNode* op) final {
-    CHECK(!args_map_.count(op->var)) << "Cannot bind an internel variable in let";
+    ICHECK(!args_map_.count(op->var)) << "Cannot bind an internel variable in let";
     return ExprMutator::VisitExpr_(op);
   }
 
   Expr VisitExpr_(const FunctionNode* op) final {
     for (Var param : op->params) {
-      CHECK(!args_map_.count(param)) << "Cannnot bind an internal function parameter";
+      ICHECK(!args_map_.count(param)) << "Cannnot bind an internal function parameter";
     }
     return ExprMutator::VisitExpr_(op);
   }
@@ -553,7 +553,7 @@ class ExprBinder : public MixedModeMutator, PatternMutator {
   }
 
   Var VisitVar(const Var& v) final {
-    CHECK(!args_map_.count(v)) << "Cannnot bind an internal pattern variable";
+    ICHECK(!args_map_.count(v)) << "Cannnot bind an internal pattern variable";
     return v;
   }
 
@@ -584,7 +584,7 @@ Expr Bind(const Expr& expr, const tvm::Map<Var, Expr>& args_map) {
       }
     }
     ret = Function(new_params, new_body, func->ret_type, func->type_params, func->attrs);
-    CHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size());
+    ICHECK_EQ(FreeVars(expr).size(), FreeVars(ret).size());
     return std::move(ret);
   } else {
     return ExprBinder(args_map).VisitExpr(expr);
@@ -596,7 +596,7 @@ TVM_REGISTER_GLOBAL("relay.ir.Bind").set_body([](TVMArgs args, TVMRetValue* ret)
   if (input->IsInstance<ExprNode>()) {
     *ret = Bind(Downcast<Expr>(input), args[1]);
   } else {
-    CHECK(input->IsInstance<TypeNode>());
+    ICHECK(input->IsInstance<TypeNode>());
     *ret = Bind(Downcast<Type>(input), args[1]);
   }
 });
diff --git a/src/relay/ir/function.cc b/src/relay/ir/function.cc
index 1439e8b..c9920a6 100644
--- a/src/relay/ir/function.cc
+++ b/src/relay/ir/function.cc
@@ -29,8 +29,8 @@ namespace relay {
 Function::Function(tvm::Array<Var> params, Expr body, Type ret_type,
                    tvm::Array<TypeVar> type_params, DictAttrs attrs, Span span) {
   ObjectPtr<FunctionNode> n = make_object<FunctionNode>();
-  CHECK(params.defined());
-  CHECK(type_params.defined());
+  ICHECK(params.defined());
+  ICHECK(type_params.defined());
   n->params = std::move(params);
   n->body = std::move(body);
   n->ret_type = std::move(ret_type);
diff --git a/src/relay/ir/indexed_graph.h b/src/relay/ir/indexed_graph.h
index 7050827..4bbb741 100644
--- a/src/relay/ir/indexed_graph.h
+++ b/src/relay/ir/indexed_graph.h
@@ -115,8 +115,8 @@ class IndexedGraph {
       return nullptr;
     }
     while (lhs != rhs) {
-      CHECK(lhs);
-      CHECK(rhs);
+      ICHECK(lhs);
+      ICHECK(rhs);
       if (lhs->depth_ < rhs->depth_) {
         rhs = rhs->dominator_parent_;
       } else if (lhs->depth_ > rhs->depth_) {
diff --git a/src/relay/ir/transform.cc b/src/relay/ir/transform.cc
index b5f4d15..596f812 100644
--- a/src/relay/ir/transform.cc
+++ b/src/relay/ir/transform.cc
@@ -128,7 +128,7 @@ IRModule FunctionPassNode::operator()(IRModule mod, const PassContext& pass_ctx)
 
   const PassInfo& pass_info = Info();
 
-  CHECK(mod.defined());
+  ICHECK(mod.defined());
 
   DLOG(INFO) << "Executing function pass : " << pass_info->name
              << " with opt level: " << pass_info->opt_level;
diff --git a/src/relay/op/algorithm/argsort.cc b/src/relay/op/algorithm/argsort.cc
index a240974..455d413 100644
--- a/src/relay/op/algorithm/argsort.cc
+++ b/src/relay/op/algorithm/argsort.cc
@@ -33,10 +33,10 @@ bool ArgsortRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                 const TypeReporter& reporter) {
   // `types` contains: [data, result]
   const ArgsortAttrs* param = attrs.as<ArgsortAttrs>();
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) {
-    CHECK(types[0].as<IncompleteTypeNode>())
+    ICHECK(types[0].as<IncompleteTypeNode>())
         << "Argsort: expect input type to be TensorType but get " << types[0];
     return false;
   }
diff --git a/src/relay/op/algorithm/topk.cc b/src/relay/op/algorithm/topk.cc
index 14308dd..b0e4b5d 100644
--- a/src/relay/op/algorithm/topk.cc
+++ b/src/relay/op/algorithm/topk.cc
@@ -34,15 +34,15 @@ bool TopKRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
              const TypeReporter& reporter) {
   // `types` contains: [data, result]
   const TopKAttrs* param = attrs.as<TopKAttrs>();
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
-  CHECK(data);
+  ICHECK(data);
   int ndim = data->shape.size();
   int axis = param->axis;
   if (axis < 0) {
     axis += ndim;
   }
-  CHECK(axis >= 0 && axis < ndim);
+  ICHECK(axis >= 0 && axis < ndim);
   Array<IndexExpr> out_shape;
   for (int i = 0; i < ndim; ++i) {
     if (i != axis) {
diff --git a/src/relay/op/dyn/algorithm/topk.cc b/src/relay/op/dyn/algorithm/topk.cc
index 1c88730..0ce0a18 100644
--- a/src/relay/op/dyn/algorithm/topk.cc
+++ b/src/relay/op/dyn/algorithm/topk.cc
@@ -33,31 +33,31 @@ bool TopKRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
              const TypeReporter& reporter) {
   // `types` contains: [data, k, result]
   const TopKAttrs* param = attrs.as<TopKAttrs>();
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* k = types[1].as<TensorTypeNode>();
   if (data == nullptr) {
-    CHECK(types[0].as<IncompleteTypeNode>())
+    ICHECK(types[0].as<IncompleteTypeNode>())
         << "tile: expect input type to be TensorType but get " << types[0];
     return false;
   }
   if (k == nullptr) {
-    CHECK(types[1].as<IncompleteTypeNode>())
+    ICHECK(types[1].as<IncompleteTypeNode>())
         << "tile: expect input type to be TensorType but get " << types[1];
     return false;
   }
-  CHECK(k->shape.size() <= 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )";
+  ICHECK(k->shape.size() <= 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )";
   if (k->shape.size() == 1) {
     const IntImmNode* k_shape = k->shape[0].as<IntImmNode>();
-    CHECK(k_shape) << "Parameter k must have static shape";
-    CHECK_EQ(k_shape->value, 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )";
+    ICHECK(k_shape) << "Parameter k must have static shape";
+    ICHECK_EQ(k_shape->value, 1) << "Parameter k must be a Scalar or a Tensor of shape (1, )";
   }
   int ndim = data->shape.size();
   int axis = param->axis;
   if (axis < 0) {
     axis += ndim;
   }
-  CHECK(axis >= 0 && axis < ndim);
+  ICHECK(axis >= 0 && axis < ndim);
   Array<IndexExpr> out_shape;
   for (int i = 0; i < ndim; ++i) {
     if (i != axis) {
diff --git a/src/relay/op/dyn/image/resize.cc b/src/relay/op/dyn/image/resize.cc
index 23e1740..6581250 100644
--- a/src/relay/op/dyn/image/resize.cc
+++ b/src/relay/op/dyn/image/resize.cc
@@ -36,17 +36,17 @@ TVM_REGISTER_NODE_TYPE(ResizeAttrs);
 bool ResizeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                const TypeReporter& reporter) {
   // {data, size, out}
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   static const Layout kNCHW("NCHW");
 
   const ResizeAttrs* param = attrs.as<ResizeAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->layout);
   auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW);
-  CHECK(layout_converter.defined())
+  ICHECK(layout_converter.defined())
       << "Resize only support input layouts that are convertible from NCHW."
       << " But got " << in_layout;
 
diff --git a/src/relay/op/dyn/nn/pad.cc b/src/relay/op/dyn/nn/pad.cc
index 73daccb..42ec784 100644
--- a/src/relay/op/dyn/nn/pad.cc
+++ b/src/relay/op/dyn/nn/pad.cc
@@ -41,7 +41,7 @@ namespace dyn {
 bool PadRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
             const TypeReporter& reporter) {
   // types = [data_type, pad_width_type, pad_value_type, ret_type]
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
@@ -52,13 +52,13 @@ bool PadRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   if (pad_value == nullptr) return false;
 
   int data_rank = data->shape.size();
-  CHECK(data_rank) << "Data shape must have static rank";
+  ICHECK(data_rank) << "Data shape must have static rank";
 
   int pad_width_rank = pad_width->shape.size();
-  CHECK_EQ(pad_width_rank, 2) << "Pad width must be 2D";
+  ICHECK_EQ(pad_width_rank, 2) << "Pad width must be 2D";
 
   const PadAttrs* param = attrs.as<PadAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
   std::vector<IndexExpr> oshape;
   for (int i = 0; i < data_rank; i++) {
@@ -72,7 +72,7 @@ bool PadRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 Array<te::Tensor> PadCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
                              const Type& out_type) {
   const auto* param = attrs.as<PadAttrs>();
-  CHECK(param);
+  ICHECK(param);
 
   auto data = inputs[0];
   auto pad_width = inputs[1];
@@ -88,7 +88,7 @@ Array<te::Tensor> PadCompute(const Attrs& attrs, const Array<te::Tensor>& inputs
   }
 
   const auto* out_ttype = out_type.as<TensorTypeNode>();
-  CHECK(out_ttype != nullptr);
+  ICHECK(out_ttype != nullptr);
 
   return Array<te::Tensor>{topi::pad(inputs[0], pad_before, pad_after, pad_value, "T_pad",
                                      topi::kElementWise, param->pad_mode,
diff --git a/src/relay/op/dyn/nn/upsampling.cc b/src/relay/op/dyn/nn/upsampling.cc
index 8a28475..9386975 100644
--- a/src/relay/op/dyn/nn/upsampling.cc
+++ b/src/relay/op/dyn/nn/upsampling.cc
@@ -41,7 +41,7 @@ namespace dyn {
 bool UpSamplingRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                    const TypeReporter& reporter) {
   // types = [data_type, scale_h_type, scale_w_type, ret_type]
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* scale_h = types[1].as<TensorTypeNode>();
   const auto* scale_w = types[2].as<TensorTypeNode>();
@@ -49,16 +49,16 @@ bool UpSamplingRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   if (scale_h == nullptr) return false;
   if (scale_w == nullptr) return false;
 
-  CHECK_EQ(scale_h->shape.size(), 0);
-  CHECK_EQ(scale_w->shape.size(), 0);
+  ICHECK_EQ(scale_h->shape.size(), 0);
+  ICHECK_EQ(scale_w->shape.size(), 0);
   static const Layout kNCHW("NCHW");
 
   const UpSamplingAttrs* param = attrs.as<UpSamplingAttrs>();
-  CHECK(param);
+  ICHECK(param);
   const Layout in_layout(param->layout);
 
   auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW);
-  CHECK(layout_converter.defined())
+  ICHECK(layout_converter.defined())
       << "UpSampling only supports input layouts that are convertible from NCHW."
       << " But got " << in_layout;
 
@@ -122,18 +122,18 @@ RELAY_REGISTER_OP("dyn.nn.upsampling")
 bool UpSampling3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
   // types = [data_type, scale_d_type, scale_h_type, scale_w_type, ret_type]
-  CHECK_EQ(types.size(), 5);
+  ICHECK_EQ(types.size(), 5);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   static const Layout kNCDHW("NCDHW");
 
   const UpSampling3DAttrs* param = attrs.as<UpSampling3DAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->layout);
 
   auto layout_converter = tir::BijectiveLayout(in_layout, kNCDHW);
-  CHECK(layout_converter.defined())
+  ICHECK(layout_converter.defined())
       << "UpSampling3D only support input layouts that are convertible from NCDHW."
       << " But got " << in_layout;
 
diff --git a/src/relay/op/dyn/nn/upsampling.h b/src/relay/op/dyn/nn/upsampling.h
index 79ed65b..acdc541 100644
--- a/src/relay/op/dyn/nn/upsampling.h
+++ b/src/relay/op/dyn/nn/upsampling.h
@@ -43,7 +43,7 @@ Array<Array<Layout> > UpsamplingInferCorrectLayout(const Attrs& attrs,
   // NOTE: Discard "const" qualifier here.
   T* params = const_cast<T*>(attrs.as<T>());
   if (new_in_layouts.defined()) {
-    CHECK_GT(new_in_layouts.size(), 0);
+    ICHECK_GT(new_in_layouts.size(), 0);
 
     Layout raw_layout(params->layout);
     Layout input = new_in_layouts[0];
diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc
index 863ad64..119eba3 100644
--- a/src/relay/op/dyn/tensor/transform.cc
+++ b/src/relay/op/dyn/tensor/transform.cc
@@ -47,11 +47,11 @@ namespace dyn {
 bool ReshapeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                 const TypeReporter& reporter) {
   // types: [data, newshape, result]
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
 
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) {
-    CHECK(types[0].as<IncompleteTypeNode>())
+    ICHECK(types[0].as<IncompleteTypeNode>())
         << "reshape: expect input type to be TensorType but get " << types[0];
     return false;
   }
@@ -59,7 +59,7 @@ bool ReshapeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   Array<IndexExpr> oshape;
   const auto* newshape = types[1].as<TensorTypeNode>();
   if (newshape == nullptr) {
-    CHECK(types[1].as<IncompleteTypeNode>())
+    ICHECK(types[1].as<IncompleteTypeNode>())
         << "reshape: expect input type to be TensorType but get " << types[1];
     return false;
   }
@@ -76,7 +76,7 @@ bool ReshapeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 Array<te::Tensor> ReshapeCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
                                  const Type& out_type) {
   const auto* out_ttype = out_type.as<TensorTypeNode>();
-  CHECK(out_ttype != nullptr);
+  ICHECK(out_ttype != nullptr);
   Array<IndexExpr> newshape;
   for (auto val : out_ttype->shape) {
     if (val->IsInstance<tir::AnyNode>()) {
@@ -149,21 +149,21 @@ RELAY_REGISTER_OP("dyn.reshape")
 bool TileRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
              const TypeReporter& reporter) {
   // `types` contains: [data, reps, result]
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* reps = types[1].as<TensorTypeNode>();
   if (data == nullptr) {
-    CHECK(types[0].as<IncompleteTypeNode>())
+    ICHECK(types[0].as<IncompleteTypeNode>())
         << "tile: expect input type to be TensorType but get " << types[0];
     return false;
   }
   if (reps == nullptr) {
-    CHECK(types[1].as<IncompleteTypeNode>())
+    ICHECK(types[1].as<IncompleteTypeNode>())
         << "tile: expect input type to be TensorType but get " << types[1];
     return false;
   }
   const IntImmNode* reps_shape = reps->shape[0].as<IntImmNode>();
-  CHECK(reps_shape) << "Parameter reps must have static shape";
+  ICHECK(reps_shape) << "Parameter reps must have static shape";
   const size_t ndim = data->shape.size();
   const size_t rndim = reps_shape->value;
   size_t tndim = (ndim > rndim) ? ndim : rndim;
@@ -178,7 +178,7 @@ bool TileRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 
 Array<te::Tensor> TileCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
                               const Type& out_type) {
-  CHECK_EQ(inputs.size(), 2);
+  ICHECK_EQ(inputs.size(), 2);
   const auto* out_ttype = out_type.as<TensorTypeNode>();
   size_t rndim = inputs[1]->shape[0].as<IntImmNode>()->value;
   return {topi::dyn_tile(inputs[0], out_ttype->shape, rndim)};
@@ -212,7 +212,7 @@ RELAY_REGISTER_OP("dyn.tile")
 bool BroadCastToRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                     const TypeReporter& reporter) {
   // types = [data_type, broadcast_shape_type, ret_type]
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
 
   const auto* input_type = types[0].as<TensorTypeNode>();
   const auto* target_type = types[1].as<TensorTypeNode>();
@@ -225,8 +225,9 @@ bool BroadCastToRel(const Array<Type>& types, int num_inputs, const Attrs& attrs
   auto out_dtype = input_type->dtype;
   // rank must be static
   const IntImmNode* rank = target_type->shape[0].as<IntImmNode>();
-  CHECK(rank) << "Target shape must have static rank";  // rank must be static even in dyn pass
-                                                        // could add support for dyn rank in futures
+  ICHECK(rank)
+      << "Target shape must have static rank";  // rank must be static even in dyn pass
+                                                // could add support for dyn rank in futures
 
   std::vector<IndexExpr> oshape;
   for (int i = 0; i < rank->value; ++i) {
@@ -266,13 +267,13 @@ RELAY_REGISTER_OP("dyn.broadcast_to")
 bool InitOpRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                const TypeReporter& reporter) {
   // types = [zeros_shape, ret_type]
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const InitOpAttrs* param = attrs.as<InitOpAttrs>();
   const auto* fill_shape = types[0].as<TensorTypeNode>();
   DataType out_dtype = param->dtype;
 
   const IntImmNode* shape_shape = fill_shape->shape[0].as<IntImmNode>();
-  CHECK(shape_shape) << "Parameter shape must have static rank";
+  ICHECK(shape_shape) << "Parameter shape must have static rank";
 
   std::vector<IndexExpr> oshape;
   for (int i = 0; i < shape_shape->value; ++i) {
@@ -324,9 +325,9 @@ RELAY_REGISTER_OP("dyn.ones")
 bool OneHotRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                const TypeReporter& reporter) {
   // `types` contains: [indices, on_value, off_value, result]
-  CHECK_EQ(types.size(), 5);
+  ICHECK_EQ(types.size(), 5);
   const auto* indices = types[0].as<TensorTypeNode>();
-  CHECK(indices);
+  ICHECK(indices);
 
   const auto param = attrs.as<OneHotAttrs>();
 
@@ -349,7 +350,7 @@ bool OneHotRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 Array<te::Tensor> OneHotCompute(const Attrs& attrs, const Array<te::Tensor>& inputs,
                                 const Type& out_type) {
   const auto* param = attrs.as<OneHotAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const auto* out_ttype = out_type.as<TensorTypeNode>();
   return Array<te::Tensor>{topi::one_hot(inputs[0], inputs[1](), inputs[2](), -1, param->axis,
                                          param->dtype, out_ttype->shape)};
@@ -393,7 +394,7 @@ RELAY_REGISTER_OP("dyn.one_hot")
 
 bool FullRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
              const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const InitOpAttrs* param = attrs.as<InitOpAttrs>();
   const auto* fill_value = types[0].as<TensorTypeNode>();
   const auto* fill_shape = types[1].as<TensorTypeNode>();
@@ -406,11 +407,11 @@ bool FullRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
     out_dtype = fill_value->dtype;
   }
 
-  CHECK_EQ(fill_value->shape.size(), 0)
+  ICHECK_EQ(fill_value->shape.size(), 0)
       << "Fill value should be a scalar but has dimension " << fill_value->shape.size() << ".";
 
   const IntImmNode* rank = fill_shape->shape[0].as<IntImmNode>();
-  CHECK(rank) << "Parameter shape must have static rank";
+  ICHECK(rank) << "Parameter shape must have static rank";
 
   std::vector<IndexExpr> oshape;
   for (int i = 0; i < rank->value; ++i) {
@@ -449,7 +450,7 @@ RELAY_REGISTER_OP("dyn.full")
 bool StridedSliceRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
   // [data, begin, end, strides, out]
-  CHECK_EQ(types.size(), 5);
+  ICHECK_EQ(types.size(), 5);
   const StridedSliceAttrs* param = attrs.as<StridedSliceAttrs>();
   if (param == nullptr) {
     return false;
@@ -501,9 +502,9 @@ Array<te::Tensor> StridedSliceCompute(const Attrs& attrs, const Array<te::Tensor
   te::Tensor strides = inputs[3];
   // Dynamic computation
   int64_t data_rank = data->shape.size();
-  CHECK(begin->shape[0].as<IntImmNode>()->value == data_rank &&
-        end->shape[0].as<IntImmNode>()->value == data_rank &&
-        strides->shape[0].as<IntImmNode>()->value == data_rank)
+  ICHECK(begin->shape[0].as<IntImmNode>()->value == data_rank &&
+         end->shape[0].as<IntImmNode>()->value == data_rank &&
+         strides->shape[0].as<IntImmNode>()->value == data_rank)
       << "begin, end, and strides are required to have the same length"
       << " if they are dynamic variables.";
   return Array<te::Tensor>{DynamicStridedSlice(data, begin, end, strides)};
diff --git a/src/relay/op/image/dilation2d.cc b/src/relay/op/image/dilation2d.cc
index 462f11f..1f8c7ec 100644
--- a/src/relay/op/image/dilation2d.cc
+++ b/src/relay/op/image/dilation2d.cc
@@ -62,7 +62,7 @@ Expr MakeDilation2D(Expr data, Expr weight, Array<IndexExpr> strides, Array<Inde
 template <typename AttrType>
 bool Dilation2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                    const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[1].as<TensorTypeNode>();
   if (data == nullptr) return false;
@@ -70,23 +70,23 @@ bool Dilation2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   static const Layout kOIHW("IHW");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Dilation2D only support input layouts that are convertible from NCHW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Dilation2D only support kernel layouts that are convertible from OIHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->data_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Dilation2D only support output layouts that are convertible from NCHW."
       << " But got " << out_layout;
 
diff --git a/src/relay/op/image/grid_sample.cc b/src/relay/op/image/grid_sample.cc
index bc69891..d5fa68a 100644
--- a/src/relay/op/image/grid_sample.cc
+++ b/src/relay/op/image/grid_sample.cc
@@ -35,21 +35,21 @@ TVM_REGISTER_NODE_TYPE(AffineGridAttrs);
 
 bool AffineGridRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                    const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   auto batch_size = data->shape[0];
 
   const AffineGridAttrs* param = attrs.as<AffineGridAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
   Array<IndexExpr> oshape;
 
-  CHECK(data->shape.size() == 3U && reporter->AssertEQ(data->shape[1], 2) &&
-        reporter->AssertEQ(data->shape[2], 3))
+  ICHECK(data->shape.size() == 3U && reporter->AssertEQ(data->shape[1], 2) &&
+         reporter->AssertEQ(data->shape[2], 3))
       << "data should be an"
          "affine matrix with shape [batch_size, 2, 3]";
-  CHECK(param->target_shape.defined() && param->target_shape.size() == 2)
+  ICHECK(param->target_shape.defined() && param->target_shape.size() == 2)
       << "target_shape should be 2D";
   oshape.push_back(batch_size);
   oshape.push_back(2);
@@ -97,12 +97,12 @@ TVM_REGISTER_NODE_TYPE(GridSampleAttrs);
 
 bool GridSampleRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                    const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* grid = types[1].as<TensorTypeNode>();
   if (!data || !grid) return false;
   const auto* param = attrs.as<GridSampleAttrs>();
-  CHECK(param);
+  ICHECK(param);
   static const Layout kNCHW("NCHW");
   const Layout in_layout(param->layout);
   auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW);
diff --git a/src/relay/op/image/resize.cc b/src/relay/op/image/resize.cc
index 41b7afe..b8875e4 100644
--- a/src/relay/op/image/resize.cc
+++ b/src/relay/op/image/resize.cc
@@ -35,17 +35,17 @@ TVM_REGISTER_NODE_TYPE(ResizeAttrs);
 
 bool ResizeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   static const Layout kNCHW("NCHW");
 
   const ResizeAttrs* param = attrs.as<ResizeAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->layout);
   auto layout_converter = tir::BijectiveLayout(in_layout, kNCHW);
-  CHECK(layout_converter.defined())
+  ICHECK(layout_converter.defined())
       << "Resize only support input layouts that are convertible from NCHW."
       << " But got " << in_layout;
 
@@ -104,17 +104,17 @@ TVM_REGISTER_NODE_TYPE(Resize3dAttrs);
 
 bool Resize3dRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                  const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   static const Layout kNCDHW("NCDHW");
 
   const Resize3dAttrs* param = attrs.as<Resize3dAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->layout);
   auto layout_converter = tir::BijectiveLayout(in_layout, kNCDHW);
-  CHECK(layout_converter.defined())
+  ICHECK(layout_converter.defined())
       << "Resize3d only support input layouts that are convertible from NCDHW."
       << " But got " << in_layout;
 
@@ -175,14 +175,14 @@ TVM_REGISTER_NODE_TYPE(CropAndResizeAttrs);
 
 bool CropAndResizeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                       const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* boxes = types[1].as<TensorTypeNode>();
   const auto* box_indices = types[2].as<TensorTypeNode>();
   if (data == nullptr || boxes == nullptr || box_indices == nullptr) return false;
 
   const CropAndResizeAttrs* param = attrs.as<CropAndResizeAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   auto crop_size = param->crop_size;
 
   DataType out_dtype = param->out_dtype;
diff --git a/src/relay/op/memory/memory.cc b/src/relay/op/memory/memory.cc
index b853ef6..dc5a1eb 100644
--- a/src/relay/op/memory/memory.cc
+++ b/src/relay/op/memory/memory.cc
@@ -54,19 +54,19 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_storage")
 
 bool AllocStorageRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3u);
+  ICHECK_EQ(types.size(), 3u);
   auto size_type = types[0];
   auto tensor_type = size_type.as<TensorTypeNode>();
-  CHECK(tensor_type != nullptr);
-  CHECK_EQ(tensor_type->dtype, DataType::Int(64));
-  CHECK_EQ(tensor_type->shape.size(), 0);
+  ICHECK(tensor_type != nullptr);
+  ICHECK_EQ(tensor_type->dtype, DataType::Int(64));
+  ICHECK_EQ(tensor_type->shape.size(), 0);
   auto align_type = types[1];
   auto align_ttype = align_type.as<TensorTypeNode>();
-  CHECK(align_ttype != nullptr);
-  CHECK_EQ(align_ttype->dtype, DataType::Int(64));
-  CHECK_EQ(align_ttype->shape.size(), 0);
+  ICHECK(align_ttype != nullptr);
+  ICHECK_EQ(align_ttype->dtype, DataType::Int(64));
+  ICHECK_EQ(align_ttype->shape.size(), 0);
   auto mod = reporter->GetModule();
-  CHECK(mod.defined());
+  ICHECK(mod.defined());
   auto storage_name = mod->GetGlobalTypeVar("Storage");
   auto storage = TypeCall(storage_name, {});
   reporter->Assign(types[2], storage);
@@ -107,10 +107,10 @@ TVM_REGISTER_GLOBAL("relay.op.memory._make.alloc_tensor")
 std::vector<int64_t> FromConstShape(Constant konst) {
   runtime::NDArray shape = konst->data;
   std::vector<int64_t> raw_shape;
-  CHECK_EQ(shape->ndim, 1u);
-  CHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got "
-                                  << runtime::DLDataType2String(shape->dtype);
-  CHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32)
+  ICHECK_EQ(shape->ndim, 1u);
+  ICHECK_EQ(shape->dtype.code, 0U) << "The dtype of constant shape must be int32 or int64, but got "
+                                   << runtime::DLDataType2String(shape->dtype);
+  ICHECK(shape->dtype.bits == 64 || shape->dtype.bits == 32)
       << "The dtype of constant shape must be int32 or int64, but got"
       << runtime::DLDataType2String(shape->dtype);
 
@@ -131,28 +131,28 @@ std::vector<int64_t> FromConstShape(Constant konst) {
 
 bool AllocTensorRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                     const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 4u);
+  ICHECK_EQ(types.size(), 4u);
   auto alloc_attrs = attrs.as<AllocTensorAttrs>();
-  CHECK(alloc_attrs != nullptr) << "must be alloc_tensor attributes";
+  ICHECK(alloc_attrs != nullptr) << "must be alloc_tensor attributes";
   // First argument should be storage.
   auto mod = reporter->GetModule();
-  CHECK(mod.defined());
+  ICHECK(mod.defined());
   auto storage_name = mod->GetGlobalTypeVar("Storage");
   auto storage = relay::TypeCall(storage_name, {});
   reporter->Assign(types[0], storage);
   // Second argument should be the offset.
   auto offset_type = types[1].as<TensorTypeNode>();
-  CHECK(offset_type != nullptr) << "must be a scalar type";
+  ICHECK(offset_type != nullptr) << "must be a scalar type";
 
   // Third argument should be shape tensor.
   auto tt = types[2].as<TensorTypeNode>();
-  CHECK(tt != nullptr) << "must be tensor type";
+  ICHECK(tt != nullptr) << "must be tensor type";
 
   // Be careful about having to allocate scalars.
   int64_t dims = 0;
   if (tt->shape.size() != 0) {
     auto rank = tt->shape[0].as<tvm::IntImmNode>();
-    CHECK(rank != nullptr);
+    ICHECK(rank != nullptr);
     dims = rank->value;
   }
 
@@ -161,14 +161,14 @@ bool AllocTensorRel(const Array<Type>& types, int num_inputs, const Attrs& attrs
   if (alloc_attrs->const_shape.defined()) {
     auto con = alloc_attrs->const_shape;
     auto sh = FromConstShape(con);
-    CHECK_EQ(sh.size(), dims);
+    ICHECK_EQ(sh.size(), dims);
     Array<IndexExpr> out_shape;
     for (auto i = 0u; i < dims; i++) {
       out_shape.push_back(tvm::Integer(sh[i]));
     }
     alloc_type = TensorType(out_shape, alloc_attrs->dtype);
   } else {
-    CHECK(alloc_attrs->assert_shape.defined())
+    ICHECK(alloc_attrs->assert_shape.defined())
         << "the assert_shape must be set when const_shape is not";
     alloc_type = TensorType(alloc_attrs->assert_shape, alloc_attrs->dtype);
     return true;
@@ -198,7 +198,7 @@ RELAY_REGISTER_OP("memory.alloc_tensor")
 
 bool KillRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
              const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2u);
+  ICHECK_EQ(types.size(), 2u);
   // TODO(@jroesch): should only support tensors.
   reporter->Assign(types[1], TupleType::Empty());
   return true;
diff --git a/src/relay/op/nn/bitserial.cc b/src/relay/op/nn/bitserial.cc
index 61a1b8f..8538079 100644
--- a/src/relay/op/nn/bitserial.cc
+++ b/src/relay/op/nn/bitserial.cc
@@ -50,9 +50,9 @@ Array<Array<Layout>> BinaryConv2DInferCorrectLayout(const Attrs& attrs,
 bool BitPackRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                 const TypeReporter& reporter) {
   const BitPackAttrs* param = attrs.as<BitPackAttrs>();
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
-  CHECK(data);
+  ICHECK(data);
   int ndim = data->shape.size();
   int bits = param->bits;
   int pack_axis = param->pack_axis;
@@ -120,20 +120,20 @@ TVM_REGISTER_NODE_TYPE(BinaryConv2DAttrs);
 
 bool BinaryConv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const BinaryConv2DAttrs* param = attrs.as<BinaryConv2DAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
   static const Layout kNCHW("NCHW");
 
   const Layout in_layout(param->data_layout);
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW);
   Array<IndexExpr> dshape_nchw = trans_in_layout.ForwardShape(data->shape);
-  CHECK(param->channels.defined());
-  CHECK(param->kernel_size.defined());
+  ICHECK(param->channels.defined());
+  ICHECK(param->kernel_size.defined());
   Array<IndexExpr> oshape({dshape_nchw[0], param->channels, 0, 0});
   IndexExpr pad_h, pad_w;
   GetPaddingHeightWidth(param->padding, &pad_h, &pad_w);
@@ -199,15 +199,15 @@ TVM_REGISTER_NODE_TYPE(BinaryDenseAttrs);
 
 bool BinaryDenseRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                     const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const BinaryDenseAttrs* param = attrs.as<BinaryDenseAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
-  CHECK(static_cast<int>(data->shape.size()) != 0);
-  CHECK(param->units.defined());
+  ICHECK(static_cast<int>(data->shape.size()) != 0);
+  ICHECK(param->units.defined());
 
   Array<tvm::PrimExpr> oshape = data->shape;
   oshape.Set((oshape.size() - 1), param->units);
diff --git a/src/relay/op/nn/convolution.h b/src/relay/op/nn/convolution.h
index 935058c..f011222 100644
--- a/src/relay/op/nn/convolution.h
+++ b/src/relay/op/nn/convolution.h
@@ -40,7 +40,7 @@ namespace relay {
 template <typename AttrType>
 bool Conv1DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[1].as<TensorTypeNode>();
   if (data == nullptr) return false;
@@ -48,23 +48,23 @@ bool Conv1DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   static const Layout kOIW("OIW");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NCW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from OIW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NCW."
       << " But got " << out_layout;
 
@@ -92,17 +92,17 @@ bool Conv1DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
     auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
     if (param->kernel_size.defined()) {
       // check the size
-      CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]))
+      ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]))
           << "Conv1D: shape of weight is inconsistent with kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << wshape;
     }
     if (param->channels.defined()) {
-      CHECK(reporter->AssertEQ(param->channels, wshape[0]))
+      ICHECK(reporter->AssertEQ(param->channels, wshape[0]))
           << "Conv1D: shape of weight is inconsistent with channels, "
           << " channels=" << param->channels << " wshape=" << wshape;
     }
     if (!dshape_ncw[1].as<tir::AnyNode>() && !wshape[1].as<tir::AnyNode>()) {
-      CHECK(reporter->AssertEQ(dshape_ncw[1], wshape[1]));
+      ICHECK(reporter->AssertEQ(dshape_ncw[1], wshape[1]));
     }
     channels = wshape[0];
     dilated_ksize = 1 + (wshape[2] - 1) * param->dilation[0];
@@ -139,7 +139,7 @@ bool Conv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   static const Layout kOIHW("OIHW");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
@@ -191,8 +191,8 @@ bool Conv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
   // infer weight if the kernel_size and channels are defined
   if (param->kernel_size.defined() && param->channels.defined()) {
-    CHECK_EQ(param->kernel_size.size(), 2);
-    CHECK_EQ(param->dilation.size(), 2);
+    ICHECK_EQ(param->kernel_size.size(), 2);
+    ICHECK_EQ(param->dilation.size(), 2);
     Array<IndexExpr> wshape;
 
     if (is_depthwise) {
@@ -291,7 +291,7 @@ bool Conv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 template <typename AttrType>
 bool Conv3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[1].as<TensorTypeNode>();
   if (data == nullptr) return false;
@@ -299,23 +299,23 @@ bool Conv3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   static const Layout kOIDHW("OIDHW");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCDHW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NCDHW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from OIDHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCDHW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NCDHW."
       << " But got " << out_layout;
 
@@ -324,8 +324,8 @@ bool Conv3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
   IndexExpr channels, dilated_ksize_z, dilated_ksize_y, dilated_ksize_x;
   // infer weight if the kernel_size and channels are defined
   if (param->kernel_size.defined() && param->channels.defined()) {
-    CHECK_EQ(param->kernel_size.size(), 3);
-    CHECK_EQ(param->dilation.size(), 3);
+    ICHECK_EQ(param->kernel_size.size(), 3);
+    ICHECK_EQ(param->dilation.size(), 3);
     Array<IndexExpr> wshape;
     tvm::tir::ExprDeepEqual expr_equal;
 
@@ -355,23 +355,23 @@ bool Conv3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
     if (weight == nullptr) return false;
     auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
     if (param->kernel_size.defined()) {
-      CHECK_EQ(param->kernel_size.size(), 3);
+      ICHECK_EQ(param->kernel_size.size(), 3);
       // check the size
-      CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
-            reporter->AssertEQ(param->kernel_size[1], wshape[3]) &&
-            reporter->AssertEQ(param->kernel_size[2], wshape[4]))
+      ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
+             reporter->AssertEQ(param->kernel_size[1], wshape[3]) &&
+             reporter->AssertEQ(param->kernel_size[2], wshape[4]))
           << "Conv3D: shape of weight is inconsistent with kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << wshape;
     }
 
     if (param->channels.defined()) {
-      CHECK(reporter->AssertEQ(param->channels, wshape[0]))
+      ICHECK(reporter->AssertEQ(param->channels, wshape[0]))
           << "Conv3D: shape of weight is inconsistent with channels, "
           << " channels=" << param->channels << " wshape=" << wshape;
     }
 
     if (!dshape_ncdhw[1].as<tir::AnyNode>() && !wshape[1].as<tir::AnyNode>()) {
-      CHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[1]));
+      ICHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[1]));
     }
     channels = wshape[0];
     dilated_ksize_z = 1 + (wshape[2] - 1) * param->dilation[0];
@@ -413,14 +413,14 @@ bool Conv3DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 // Winograd convolution shape relations
 inline bool Conv2DWinogradWeightTransformRel(const Array<Type>& types, int num_inputs,
                                              const Attrs& attrs, const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const ConvWinogradWeightTransformAttrs* param = attrs.as<ConvWinogradWeightTransformAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
-  CHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout";
+  ICHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout";
 
   std::vector<IndexExpr> oshape{
       param->tile_size + data->shape[2] - 1,
@@ -458,16 +458,16 @@ inline bool Conv2DWinogradWeightTransformRel(const Array<Type>& types, int num_i
 //
 inline bool Conv2DGemmWeightTransformRel(const Array<Type>& types, int num_inputs,
                                          const Attrs& attrs, const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* weight = types[0].as<TensorTypeNode>();
   if (weight == nullptr) return false;
 
   const ConvGemmWeightTransformAttrs* param = attrs.as<ConvGemmWeightTransformAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   int n = param->tile_rows;
   int k = param->tile_cols;
 
-  CHECK_EQ(weight->shape.size(), 4) << "Only support HWIO kernel layout";
+  ICHECK_EQ(weight->shape.size(), 4) << "Only support HWIO kernel layout";
 
   const auto K = weight->shape[0] * weight->shape[1] * weight->shape[2];
   const auto N = weight->shape[3];
@@ -494,14 +494,14 @@ inline bool Conv2DGemmWeightTransformRel(const Array<Type>& types, int num_input
 
 inline bool Conv3DWinogradWeightTransformRel(const Array<Type>& types, int num_inputs,
                                              const Attrs& attrs, const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const ConvWinogradWeightTransformAttrs* param = attrs.as<ConvWinogradWeightTransformAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
-  CHECK_EQ(data->shape.size(), 5) << "Only support NCDHW normal kernel layout";
+  ICHECK_EQ(data->shape.size(), 5) << "Only support NCDHW normal kernel layout";
 
   // Shape of packed weights depends on whether depth is being transformed or not.
   Array<IndexExpr> oshape({0, 0, 0, data->shape[0], data->shape[1]});
@@ -524,7 +524,7 @@ inline bool Conv3DWinogradWeightTransformRel(const Array<Type>& types, int num_i
 inline bool Conv2DWinogradNNPACKWeightTransformRel(const Array<Type>& types, int num_inputs,
                                                    const Attrs& attrs,
                                                    const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) {
     return false;
@@ -532,9 +532,9 @@ inline bool Conv2DWinogradNNPACKWeightTransformRel(const Array<Type>& types, int
 
   const Conv2DWinogradNNPACKWeightTransformAttrs* param =
       attrs.as<Conv2DWinogradNNPACKWeightTransformAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
-  CHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout";
+  ICHECK_EQ(data->shape.size(), 4) << "Only support NCHW normal kernel layout";
 
   std::vector<IndexExpr> oshape{
       data->shape[0],
@@ -554,30 +554,30 @@ inline bool Conv2DWinogradNNPACKWeightTransformRel(const Array<Type>& types, int
 template <typename AttrType>
 bool Conv2DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                        const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   static const Layout kNCHW("NCHW");
   static const Layout kOIHW("OIHW");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NCHW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from OIHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NCHW."
       << " But got " << out_layout;
 
@@ -585,11 +585,11 @@ bool Conv2DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& at
 
   IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
 
-  CHECK(param->kernel_size.defined() && param->channels.defined())
+  ICHECK(param->kernel_size.defined() && param->channels.defined())
       << "The kernel size and channels of a Conv must be set or inferred by previous pass";
 
-  CHECK_EQ(param->kernel_size.size(), 2);
-  CHECK_EQ(param->dilation.size(), 2);
+  ICHECK_EQ(param->kernel_size.size(), 2);
+  ICHECK_EQ(param->dilation.size(), 2);
 
   channels = param->channels;
   dilated_ksize_y = 1 + (param->kernel_size[0] - 1) * param->dilation[0];
@@ -631,30 +631,30 @@ bool Conv2DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& at
 template <typename AttrType>
 bool Conv2DGemmRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                    const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   static const Layout kNHWC("NHWC");
   static const Layout kHWIO("HWIO");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNHWC);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NHWC."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kHWIO);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from HWIO."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNHWC);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NHWC."
       << " But got " << out_layout;
 
@@ -662,11 +662,11 @@ bool Conv2DGemmRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 
   IndexExpr channels, dilated_ksize_y, dilated_ksize_x;
 
-  CHECK(param->kernel_size.defined() && param->channels.defined())
+  ICHECK(param->kernel_size.defined() && param->channels.defined())
       << "The kernel size and channels of a Conv must be set or inferred by previous pass";
 
-  CHECK_EQ(param->kernel_size.size(), 2);
-  CHECK_EQ(param->dilation.size(), 2);
+  ICHECK_EQ(param->kernel_size.size(), 2);
+  ICHECK_EQ(param->dilation.size(), 2);
 
   channels = param->channels;
   dilated_ksize_y = 1 + (param->kernel_size[0] - 1) * param->dilation[0];
@@ -703,30 +703,30 @@ bool Conv2DGemmRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
 template <typename AttrType>
 bool Conv3DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                        const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   static const Layout kNCDHW("NCDHW");
   static const Layout kOIDHW("OIDHW");
 
   const AttrType* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCDHW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NCDHW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from OIDHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCDHW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NCDHW."
       << " But got " << out_layout;
 
@@ -734,11 +734,11 @@ bool Conv3DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& at
 
   IndexExpr channels, dilated_ksize_d, dilated_ksize_y, dilated_ksize_x;
 
-  CHECK(param->kernel_size.defined() && param->channels.defined())
+  ICHECK(param->kernel_size.defined() && param->channels.defined())
       << "The kernel size and channels of a Conv must be set or inferred by previous pass";
 
-  CHECK_EQ(param->kernel_size.size(), 3);
-  CHECK_EQ(param->dilation.size(), 3);
+  ICHECK_EQ(param->kernel_size.size(), 3);
+  ICHECK_EQ(param->dilation.size(), 3);
 
   channels = param->channels;
   dilated_ksize_d = 1 + (param->kernel_size[0] - 1) * param->dilation[0];
@@ -787,7 +787,7 @@ bool Conv3DWinogradRel(const Array<Type>& types, int num_inputs, const Attrs& at
 template <typename AttrType>
 bool Conv1DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                         const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[1].as<TensorTypeNode>();
   if (data == nullptr) return false;
@@ -796,23 +796,23 @@ bool Conv1DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
   static const Layout kOIW("OIW");
 
   const Conv1DTransposeAttrs* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NCW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from OIW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NCW."
       << " But got " << out_layout;
 
@@ -822,8 +822,8 @@ bool Conv1DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
 
   // infer weight if the kernel_size and channels are defined
   if (param->kernel_size.defined() && param->channels.defined()) {
-    CHECK_EQ(param->kernel_size.size(), 1);
-    CHECK_EQ(param->dilation.size(), 1);
+    ICHECK_EQ(param->kernel_size.size(), 1);
+    ICHECK_EQ(param->dilation.size(), 1);
 
     Array<IndexExpr> wshape(
         {dshape_ncw[1], indexdiv(param->channels, param->groups), param->kernel_size[0]});
@@ -839,19 +839,19 @@ bool Conv1DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
     if (weight == nullptr) return false;
     auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
     if (param->kernel_size.defined()) {
-      CHECK_EQ(param->kernel_size.size(), 1);
+      ICHECK_EQ(param->kernel_size.size(), 1);
       // check the size
-      CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]))
+      ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]))
           << "Conv1D: shape of weight is inconsistent with kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (param->channels.defined()) {
-      CHECK(reporter->AssertEQ(param->channels, wshape[1]))
+      ICHECK(reporter->AssertEQ(param->channels, wshape[1]))
           << "Conv1D: shape of weight is inconsistent with channels, "
           << " channels=" << param->channels << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (!dshape_ncw[1].as<tir::AnyNode>() && !wshape[0].as<tir::AnyNode>()) {
-      CHECK(reporter->AssertEQ(indexdiv(dshape_ncw[1], param->groups), wshape[0]));
+      ICHECK(reporter->AssertEQ(indexdiv(dshape_ncw[1], param->groups), wshape[0]));
     }
     channels = wshape[1];
     dilated_ksize_x = 1 + (wshape[2] - 1) * param->dilation[0];
@@ -879,7 +879,7 @@ bool Conv1DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
 template <typename AttrType>
 bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                         const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[1].as<TensorTypeNode>();
   if (data == nullptr) return false;
@@ -888,23 +888,23 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
   static const Layout kOIDHW("OIDHW");
 
   const Conv3DTransposeAttrs* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCDHW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv3d_transpose only support input layouts that are convertible from NCDHW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIDHW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv3d_transpose only support kernel layouts that are convertible from OIDHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCDHW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv3d_transpose only support output layouts that are convertible from NCDHW."
       << " But got " << out_layout;
 
@@ -914,8 +914,8 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
 
   // infer weight if the kernel_size and channels are defined
   if (param->kernel_size.defined() && param->channels.defined()) {
-    CHECK_EQ(param->kernel_size.size(), 3);
-    CHECK_EQ(param->dilation.size(), 3);
+    ICHECK_EQ(param->kernel_size.size(), 3);
+    ICHECK_EQ(param->dilation.size(), 3);
 
     Array<IndexExpr> wshape({dshape_ncdhw[1], indexdiv(param->channels, param->groups),
                              param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]});
@@ -933,21 +933,21 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
     if (weight == nullptr) return false;
     auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
     if (param->kernel_size.defined()) {
-      CHECK_EQ(param->kernel_size.size(), 3);
+      ICHECK_EQ(param->kernel_size.size(), 3);
       // check the size
-      CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
-            reporter->AssertEQ(param->kernel_size[1], wshape[3]) &&
-            reporter->AssertEQ(param->kernel_size[2], wshape[4]))
+      ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
+             reporter->AssertEQ(param->kernel_size[1], wshape[3]) &&
+             reporter->AssertEQ(param->kernel_size[2], wshape[4]))
           << "Conv3D: shape of weight is inconsistent with kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (param->channels.defined()) {
-      CHECK(reporter->AssertEQ(param->channels, wshape[1]))
+      ICHECK(reporter->AssertEQ(param->channels, wshape[1]))
           << "Conv3D: shape of weight is inconsistent with channels, "
           << " channels=" << param->channels << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (!dshape_ncdhw[1].as<tir::AnyNode>() && !wshape[0].as<tir::AnyNode>()) {
-      CHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[0]));
+      ICHECK(reporter->AssertEQ(indexdiv(dshape_ncdhw[1], param->groups), wshape[0]));
     }
     channels = wshape[1];
     dilated_ksize_d = 1 + (wshape[2] - 1) * param->dilation[0];
@@ -991,7 +991,7 @@ bool Conv3DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
 template <typename AttrType>
 bool Conv2DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                         const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[1].as<TensorTypeNode>();
   if (data == nullptr) return false;
@@ -1000,23 +1000,23 @@ bool Conv2DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
   static const Layout kOIHW("OIHW");
 
   const Conv2DTransposeAttrs* param = attrs.as<AttrType>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   const Layout in_layout(param->data_layout);
   const Layout kernel_layout(param->kernel_layout);
 
   const auto trans_in_layout = tir::BijectiveLayout(in_layout, kNCHW);
-  CHECK(trans_in_layout.defined())
+  ICHECK(trans_in_layout.defined())
       << "Conv only support input layouts that are convertible from NCHW."
       << " But got " << in_layout;
 
   const auto trans_kernel_layout = tir::BijectiveLayout(kernel_layout, kOIHW);
-  CHECK(trans_kernel_layout.defined())
+  ICHECK(trans_kernel_layout.defined())
       << "Conv only support kernel layouts that are convertible from OIHW."
       << " But got " << kernel_layout;
 
   Layout out_layout(param->out_layout == "" ? param->data_layout : param->out_layout);
   const auto trans_out_layout = tir::BijectiveLayout(out_layout, kNCHW);
-  CHECK(trans_out_layout.defined())
+  ICHECK(trans_out_layout.defined())
       << "Conv only support output layouts that are convertible from NCHW."
       << " But got " << out_layout;
 
@@ -1026,8 +1026,8 @@ bool Conv2DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
 
   // infer weight if the kernel_size and channels are defined
   if (param->kernel_size.defined() && param->channels.defined()) {
-    CHECK_EQ(param->kernel_size.size(), 2);
-    CHECK_EQ(param->dilation.size(), 2);
+    ICHECK_EQ(param->kernel_size.size(), 2);
+    ICHECK_EQ(param->dilation.size(), 2);
 
     Array<IndexExpr> wshape({dshape_nchw[1], indexdiv(param->channels, param->groups),
                              param->kernel_size[0], param->kernel_size[1]});
@@ -1044,20 +1044,20 @@ bool Conv2DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
     if (weight == nullptr) return false;
     auto wshape = trans_kernel_layout.ForwardShape(weight->shape);
     if (param->kernel_size.defined()) {
-      CHECK_EQ(param->kernel_size.size(), 2);
+      ICHECK_EQ(param->kernel_size.size(), 2);
       // check the size
-      CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
-            reporter->AssertEQ(param->kernel_size[1], wshape[3]))
+      ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
+             reporter->AssertEQ(param->kernel_size[1], wshape[3]))
           << "Conv2D: shape of weight is inconsistent with kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (param->channels.defined()) {
-      CHECK(reporter->AssertEQ(param->channels, wshape[1]))
+      ICHECK(reporter->AssertEQ(param->channels, wshape[1]))
           << "Conv2D: shape of weight is inconsistent with channels, "
           << " channels=" << param->channels << " wshape=" << Array<IndexExpr>(wshape);
     }
     if (!dshape_nchw[1].as<tir::AnyNode>() && !wshape[0].as<tir::AnyNode>()) {
-      CHECK(reporter->AssertEQ(indexdiv(dshape_nchw[1], param->groups), wshape[0]));
+      ICHECK(reporter->AssertEQ(indexdiv(dshape_nchw[1], param->groups), wshape[0]));
     }
     channels = wshape[1];
     dilated_ksize_y = 1 + (wshape[2] - 1) * param->dilation[0];
@@ -1093,21 +1093,21 @@ bool Conv2DTransposeRel(const Array<Type>& types, int num_inputs, const Attrs& a
 template <typename AttrType>
 bool DeformableConv2DRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                          const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   const auto* weight = types[2].as<TensorTypeNode>();
 
-  CHECK(data);
+  ICHECK(data);
   auto* param = attrs.as<AttrType>();
-  CHECK_EQ(param->data_layout, "NCHW") << "data layout not supported.";
-  CHECK_EQ(param->kernel_layout, "OIHW") << "kernel_layout not supported.";
+  ICHECK_EQ(param->data_layout, "NCHW") << "data layout not supported.";
+  ICHECK_EQ(param->kernel_layout, "OIHW") << "kernel_layout not supported.";
 
   IndexExpr channels, dilated_ksize_y, dilated_ksize_x, ksize_y, ksize_x;
 
   // infer weight shape if kernel_size and channels are defiend
   if (param->kernel_size.defined() && param->channels.defined()) {
-    CHECK_EQ(param->kernel_size.size(), 2);
-    CHECK_EQ(param->dilation.size(), 2);
+    ICHECK_EQ(param->kernel_size.size(), 2);
+    ICHECK_EQ(param->dilation.size(), 2);
     Array<IndexExpr> wshape({param->channels, indexdiv(data->shape[1], param->groups),
                              param->kernel_size[0], param->kernel_size[1]});
     channels = param->channels;
@@ -1122,20 +1122,20 @@ bool DeformableConv2DRel(const Array<Type>& types, int num_inputs, const Attrs&
     if (weight == nullptr) return false;
     auto wshape = weight->shape;
     if (param->kernel_size.defined()) {
-      CHECK_EQ(param->kernel_size.size(), 2);
+      ICHECK_EQ(param->kernel_size.size(), 2);
       // check the size
-      CHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
-            reporter->AssertEQ(param->kernel_size[1], wshape[3]))
+      ICHECK(reporter->AssertEQ(param->kernel_size[0], wshape[2]) &&
+             reporter->AssertEQ(param->kernel_size[1], wshape[3]))
           << "DeformableConv2D: shape of weight is inconsistent with kernel_size, "
           << " kernel_size=" << param->kernel_size << " wshape=" << wshape;
     }
     if (param->channels.defined()) {
-      CHECK(reporter->AssertEQ(param->channels, wshape[0]))
+      ICHECK(reporter->AssertEQ(param->channels, wshape[0]))
           << "DeformableConv2D: shape of weight is inconsistent with channels, "
           << " channels=" << param->channels << " wshape=" << wshape;
     }
     if (!data->shape[1].as<tir::AnyNode>() && !wshape[1].as<tir::AnyNode>()) {
-      CHECK(reporter->AssertEQ(indexdiv(data->shape[1], param->groups), wshape[1]));
+      ICHECK(reporter->AssertEQ(indexdiv(data->shape[1], param->groups), wshape[1]));
     }
     channels = wshape[0];
     ksize_y = wshape[2];
diff --git a/src/relay/op/nn/correlation.cc b/src/relay/op/nn/correlation.cc
index 5970cc7..0c2f481 100644
--- a/src/relay/op/nn/correlation.cc
+++ b/src/relay/op/nn/correlation.cc
@@ -64,14 +64,14 @@ Expr MakeCorrelation(Expr data1, Expr data2, int kernel_size, int max_displaceme
 
 bool CorrelationRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                     const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data1 = types[0].as<TensorTypeNode>();
   const auto* data2 = types[1].as<TensorTypeNode>();
   if (data1 == nullptr || data2 == nullptr) return false;
 
   const CorrelationAttrs* param = attrs.as<CorrelationAttrs>();
-  CHECK(param != nullptr);
-  CHECK_EQ(param->layout, "NCHW") << "layout not supported.";
+  ICHECK(param != nullptr);
+  ICHECK_EQ(param->layout, "NCHW") << "layout not supported.";
   IndexExpr pad_h, pad_w;
   GetPaddingHeightWidth(param->padding, &pad_h, &pad_w);
   IndexExpr padded_height = data1->shape[2] + pad_h;
diff --git a/src/relay/op/nn/nn.cc b/src/relay/op/nn/nn.cc
index 58dfab2..ea25c1a 100644
--- a/src/relay/op/nn/nn.cc
+++ b/src/relay/op/nn/nn.cc
@@ -50,17 +50,17 @@ TVM_REGISTER_NODE_TYPE(BiasAddAttrs);
 
 bool BiasAddRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                 const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const BiasAddAttrs* param = attrs.as<BiasAddAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
   int axis = param->axis;
   if (axis < 0) {
     axis = data->shape.size() + axis;
   }
-  CHECK_LE(axis, static_cast<int>(data->shape.size()))
+  ICHECK_LE(axis, static_cast<int>(data->shape.size()))
       << "axis " << param->axis << " is out of range";
 
   // assign output type
@@ -107,15 +107,15 @@ Expr MakeFIFOBuffer(Expr input, Expr buffer, int axis) {
 
 bool FIFOBufferRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                    const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* input = types[0].as<TensorTypeNode>();
   const auto* buffer = types[1].as<TensorTypeNode>();
   const FIFOBufferAttrs* param = attrs.as<FIFOBufferAttrs>();
   if (input == nullptr || buffer == nullptr) {
     return false;
   }
-  CHECK(param != nullptr);
-  CHECK_EQ(input->shape.size(), buffer->shape.size());
+  ICHECK(param != nullptr);
+  ICHECK_EQ(input->shape.size(), buffer->shape.size());
 
   const size_t buffer_axis = static_cast<size_t>(
       param->axis < 0 ? static_cast<int>(buffer->shape.size()) + param->axis : param->axis);
@@ -221,14 +221,14 @@ TVM_REGISTER_NODE_TYPE(PReluAttrs);
 
 bool PReluRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
               const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const PReluAttrs* param = attrs.as<PReluAttrs>();
-  CHECK(param != nullptr);
+  ICHECK(param != nullptr);
 
-  CHECK(param->axis < static_cast<int>(data->shape.size()))
+  ICHECK(param->axis < static_cast<int>(data->shape.size()))
       << "Wrong axis (" << param->axis << ")value.";
 
   // assign alpha type
@@ -245,11 +245,11 @@ Array<Array<Layout>> PReluInferCorrectLayout(const Attrs& attrs,
                                              const Array<Layout>& new_in_layouts,
                                              const Array<Layout>& old_in_layouts,
                                              const Array<tvm::relay::Type>& old_in_types) {
-  CHECK_EQ(old_in_layouts.size(), 2U);
-  CHECK_EQ(old_in_types.size(), 2U);
+  ICHECK_EQ(old_in_layouts.size(), 2U);
+  ICHECK_EQ(old_in_types.size(), 2U);
   Layout data_layout = old_in_layouts[0];
   if (new_in_layouts.defined()) {
-    CHECK_EQ(new_in_layouts.size(), 2U);
+    ICHECK_EQ(new_in_layouts.size(), 2U);
   }
   return Array<Array<Layout>>{{data_layout, Layout("C")}, {data_layout}};
 }
@@ -335,8 +335,8 @@ RELAY_REGISTER_OP("nn.log_softmax")
     .set_attr<FTVMCompute>("FTVMCompute", [](const Attrs& attrs, const Array<te::Tensor>& inputs,
                                              const Type& out_type) {
       const auto* param = attrs.as<SoftmaxAttrs>();
-      CHECK(param != nullptr);
-      CHECK(param->axis == -1 || param->axis == static_cast<int32_t>(inputs[0].ndim()) - 1)
+      ICHECK(param != nullptr);
+      ICHECK(param->axis == -1 || param->axis == static_cast<int32_t>(inputs[0].ndim()) - 1)
           << "log_softmax currently only works on last dimension";
       return Array<te::Tensor>{topi::nn::log_softmax(inputs[0])};
     });
@@ -344,7 +344,7 @@ RELAY_REGISTER_OP("nn.log_softmax")
 // relay.nn.batch_flatten
 bool BatchFlattenRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   if (data->shape.size() == 0) return false;
@@ -499,7 +499,7 @@ TVM_REGISTER_NODE_TYPE(DropoutAttrs);
 
 bool DropoutRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                 const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 2);
+  ICHECK_EQ(types.size(), 2);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
@@ -544,7 +544,7 @@ Array<Array<Layout>> BatchNormInferCorrectLayout(const Attrs& attrs,
 
   Array<Array<IndexExpr>> old_in_shapes;
   for (auto old_in_t : old_in_types) {
-    CHECK(old_in_t.as<TensorTypeNode>());
+    ICHECK(old_in_t.as<TensorTypeNode>());
     old_in_shapes.push_back(old_in_t.as<TensorTypeNode>()->shape);
   }
 
@@ -572,14 +572,14 @@ Array<Array<Layout>> BatchNormInferCorrectLayout(const Attrs& attrs,
 
 bool BatchNormRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                   const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 6);
+  ICHECK_EQ(types.size(), 6);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
 
   const BatchNormAttrs* param = attrs.as<BatchNormAttrs>();
 
   // axis of -1 means use the last dimension
-  CHECK(param->axis >= -1 && param->axis < (int)data->shape.size());
+  ICHECK(param->axis >= -1 && param->axis < (int)data->shape.size());
   int axis = (param->axis != -1) ? param->axis : data->shape.size() - 1;
   auto axis_size = data->shape[axis];
 
@@ -666,12 +666,12 @@ TVM_REGISTER_NODE_TYPE(InstanceNormAttrs);
 
 bool InstanceNormRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   const InstanceNormAttrs* param = attrs.as<InstanceNormAttrs>();
   int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size();
-  CHECK(axis >= 0 && axis < (int)data->shape.size());
+  ICHECK(axis >= 0 && axis < (int)data->shape.size());
   reporter->Assign(types[1], TensorType({data->shape[axis]}, data->dtype));
   reporter->Assign(types[2], TensorType({data->shape[axis]}, data->dtype));
   reporter->Assign(types[3], TensorType(data->shape, data->dtype));
@@ -733,12 +733,12 @@ TVM_REGISTER_NODE_TYPE(LayerNormAttrs);
 
 bool LayerNormRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                   const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   const LayerNormAttrs* param = attrs.as<LayerNormAttrs>();
   int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size();
-  CHECK(axis >= 0 && axis < (int)data->shape.size());
+  ICHECK(axis >= 0 && axis < (int)data->shape.size());
   reporter->Assign(types[1], TensorType({data->shape[axis]}, data->dtype));
   reporter->Assign(types[2], TensorType({data->shape[axis]}, data->dtype));
   reporter->Assign(types[3], TensorType(data->shape, data->dtype));
@@ -778,12 +778,12 @@ TVM_REGISTER_NODE_TYPE(GroupNormAttrs);
 
 bool GroupNormRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                   const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 4);
+  ICHECK_EQ(types.size(), 4);
   const auto* data = types[0].as<TensorTypeNode>();
   if (data == nullptr) return false;
   const GroupNormAttrs* param = attrs.as<GroupNormAttrs>();
   int axis = param->axis >= 0 ? param->axis : param->axis + data->shape.size();
-  CHECK(axis >= 0 && axis < (int)data->shape.size());
+  ICHECK(axis >= 0 && axis < (int)data->shape.size());
   reporter->Assign(types[1], TensorType({data->shape[axis]}, data->dtype));
   reporter->Assign(types[2], TensorType({data->shape[axis]}, data->dtype));
   reporter->Assign(types[3], TensorType(data->shape, data->dtype));
@@ -847,11 +847,11 @@ If the input has size k on axis 1, then both gamma and beta have shape (k,).
 // relay.nn.batch_matmul
 bool BatchMatmulRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                     const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* x = types[0].as<TensorTypeNode>();
   const auto* y = types[1].as<TensorTypeNode>();
   if (x == nullptr || y == nullptr) return false;
-  CHECK(x->shape.size() == 3 && y->shape.size() == 3);
+  ICHECK(x->shape.size() == 3 && y->shape.size() == 3);
   bool is_dyn = false;
   Array<tvm::PrimExpr> oshape;
   for (size_t i = 0; i < 3; ++i) {
@@ -867,11 +867,11 @@ bool BatchMatmulRel(const Array<Type>& types, int num_inputs, const Attrs& attrs
     }
   }
   if (!is_dyn) {
-    CHECK(reporter->AssertEQ(x->shape[0], y->shape[0]) || reporter->AssertEQ(x->shape[0], 1) ||
-          reporter->AssertEQ(y->shape[0], 1))
+    ICHECK(reporter->AssertEQ(x->shape[0], y->shape[0]) || reporter->AssertEQ(x->shape[0], 1) ||
+           reporter->AssertEQ(y->shape[0], 1))
         << "BatchDot: batch dimensions don't match, "
         << " x shape=" << x->shape << ", y shape=" << y->shape;
-    CHECK(reporter->AssertEQ(x->shape[2], y->shape[2]))
+    ICHECK(reporter->AssertEQ(x->shape[2], y->shape[2]))
         << "BatchDot: shapes of x and y is inconsistent, "
         << " x shape=" << x->shape << ", y shape=" << y->shape;
 
@@ -913,19 +913,19 @@ are data in batch.
 // relay.nn.cross_entropy
 bool CrossEntropyRel(const Array<Type>& types, int num_inputs, const Attrs& attrs,
                      const TypeReporter& reporter) {
-  CHECK_EQ(types.size(), 3);
+  ICHECK_EQ(types.size(), 3);
   const auto* x = types[0].as<TensorTypeNode>();
   const auto* y = types[1].as<TensorTypeNode>();
   if (x == nullptr || y == nullptr) return false;
-  CHECK(x->shape.size() == 2 && y->shape.size() == 2)
+  ICHECK(x->shape.size() == 2 && y->shape.size() == 2)
       << "CrossEntropy: shapes of x and y is inconsistent, "
       << "x shape = " << x->shape << ", "
       << "y shape = " << y->shape;
... 20292 lines suppressed ...