You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@tvm.apache.org by GitBox <gi...@apache.org> on 2022/09/09 07:52:43 UTC

[GitHub] [tvm] elvin-n commented on a diff in pull request #12545: [AutoTVM] Introducing multi_filter into ConfigSpace autotvm.

elvin-n commented on code in PR #12545:
URL: https://github.com/apache/tvm/pull/12545#discussion_r964733626


##########
python/tvm/autotvm/task/space.py:
##########
@@ -822,6 +830,155 @@ def valid(self):
         """
         return not bool(self.errors)
 
+    def is_index_filtered(self, i):
+        """checks if the index satisfies the multi_filter"""
+        if self._shared_filter is None:
+            return True
+
+        if self._shared_filter_cash is None:
+            self._make_shared_filter_cash()
+
+        return self._shared_filter_cash[i]
+
+    def multi_filter(self, **kwargs):
+        "Keeps arg named 'filter'as function as a multi_filter"
+        if self._collect:
+            self._shared_filter_cash = None

Review Comment:
   don't we need to call clear_shared_filter_cash() instead?



##########
python/tvm/autotvm/tuner/ga_tuner.py:
##########
@@ -49,41 +48,29 @@ def __init__(self, task, pop_size=100, elite_num=3, mutation_prob=0.1):
 
         assert elite_num <= pop_size, "The number of elites must be less than population size"
 
+        # random initialization
+        self.pop_size = min(self.pop_size, task.config_space.filtered_length)
+        self.elite_num = min(self.pop_size, self.elite_num)
+
         # space info
         self.space = task.config_space
-        self.dim_keys = []
-        self.dims = []
-        for k, v in self.space.space_map.items():
-            self.dim_keys.append(k)
-            self.dims.append(len(v))
-
-        self.visited = set([])
+        self.visited = set(self.space.sample_ints(self.pop_size))
 
         # current generation
-        self.genes = []
+        self.genes = [self.space.point2knob(idx) for idx in self.visited]
         self.scores = []
         self.elites = []
         self.elite_scores = []
         self.trial_pt = 0
 
-        # random initialization
-        self.pop_size = min(self.pop_size, len(self.space))
-        self.elite_num = min(self.pop_size, self.elite_num)
-        for _ in range(self.pop_size):
-            tmp_gene = point2knob(np.random.randint(len(self.space)), self.dims)
-            while knob2point(tmp_gene, self.dims) in self.visited:
-                tmp_gene = point2knob(np.random.randint(len(self.space)), self.dims)
-
-            self.genes.append(tmp_gene)
-            self.visited.add(knob2point(tmp_gene, self.dims))
-
     def next_batch(self, batch_size):
         ret = []
-        for _ in range(batch_size):
+        while len(ret) < batch_size and self.trial_pt < self.space.total_length:

Review Comment:
   Should not be there filtered_length? As I understood gen should contain already filtered points



##########
python/tvm/autotvm/task/space.py:
##########
@@ -664,14 +668,18 @@ def __init__(self):
         # private dict to provide sugar
         self.space_map = OrderedDict()  # name -> space
         self._collect = True
-        self._length = None
+        self._total_length = None
+        self._filtered_length = None
+        self._dims = None
         self._entity_map = OrderedDict()  # name -> entity
         self._constraints = []
         self.errors = []
         self.code_hash = None
         self.flop = 0
         self.cost = None
         self.is_fallback = False
+        self._shared_filter = None
+        self._shared_filter_cash = None

Review Comment:
   I see that all initialization of the cache happens only once when it is not defined.
   At the same time if we initialize the cache anyhow, call API initializing the `_shared_filter_cash` and add one more filter, in this case the search space will not correspond to the cached data anymore. Need to invalidate _shared_filter_cache when we add not only multi_filter, but on adding of any filter



##########
python/tvm/autotvm/task/space.py:
##########
@@ -838,22 +995,26 @@ def _add_new_transform(self, space_class, name, axes, policy, **kwargs):
             return [Axis(space, i) for i in range(space.num_output)]
         return [Axis(None, i) for i in range(space_class.get_num_output(axes, policy, **kwargs))]
 
-    def __len__(self):

Review Comment:
   there are pros and cons to remove length. And it seems the number of modified scripts show that len(task.space) became quite public api :( it will be quite painful for endusers if we remove this function
   
   let's return filtered_length by it and add clear comments to declaration of filtered_length and total_length when we need to use each



##########
python/tvm/topi/adreno/conv2d_nchw.py:
##########
@@ -268,7 +268,11 @@ def schedule_conv2d_NCHWc_KCRSk(cfg, s, output):
     cfg.define_split("tile_rx", rx, num_outputs=2)
     cfg.define_knob("auto_unroll_max_step", [0, 512, 1500])
     cfg.define_knob("unroll_explicit", [0, 1])
-
+    cfg.multi_filter(
+        filter=lambda entity: 32

Review Comment:
   need to add same for adreno direct nhwc convolution



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscribe@tvm.apache.org

For queries about this service, please contact Infrastructure at:
users@infra.apache.org