You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@mxnet.apache.org by GitBox <gi...@apache.org> on 2018/05/15 23:06:33 UTC

[GitHub] anirudh2290 commented on a change in pull request #10780: [MXNET-375] Lp Pooling and Global Lp Pooling

anirudh2290 commented on a change in pull request #10780: [MXNET-375] Lp Pooling and Global Lp Pooling
URL: https://github.com/apache/incubator-mxnet/pull/10780#discussion_r188455008
 
 

 ##########
 File path: tests/python/gpu/test_operator_gpu.py
 ##########
 @@ -769,140 +769,161 @@ def test_pooling_versions_helper(pool_op_list, data, kernel, pool_type, pad, str
             ctx_list.append({'ctx': mx.cpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
             if not global_pool:
                 sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                               pooling_convention=pooling_convention, name='pool'))
+                                               pooling_convention=pooling_convention, name='pool', p_value=p_value))
             else:
-                sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool'))
+                sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, name='pool', p_value=p_value))
         # Pooling gpu
         if 'pool_gpu' in pool_op_list:
             ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
             if not global_pool:
                 sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                               pooling_convention=pooling_convention, cudnn_off=True, name='pool'))
+                                               pooling_convention=pooling_convention, cudnn_off=True, name='pool', p_value=p_value))
             else:
                 sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=True,
-                                               name='pool'))
+                                               name='pool', p_value=p_value))
         # CuDNNPooling
         if 'pool_cudnn' in pool_op_list:
             ctx_list.append({'ctx': mx.gpu(0), 'pool_data': data, 'type_dict': {'pool_data': np.float32}})
             if not global_pool:
                 sym_list.append(mx.sym.Pooling(kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                               pooling_convention=pooling_convention, cudnn_off=False, name='pool'))
+                                               pooling_convention=pooling_convention, p_value=p_value, cudnn_off=False, name='pool'))
             else:
-                sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, cudnn_off=False,
-                                               name='pool'))
+                sym_list.append(mx.sym.Pooling(kernel=kernel, pool_type=pool_type, global_pool=True, p_value=p_value,
+                                               cudnn_off=False, name='pool'))
         check_consistency(sym_list, ctx_list)
 
-    def test_1d_pooling(pool_type):
+    def test_1d_pooling(pool_type, p_value=2):
         data = (2, 3, 20)
         kernel = (4,)
         pad = (0,)
         stride = (1,)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='valid', global_pool=False)
+                                     pooling_convention='valid', global_pool=False, p_value=p_value)
 
         pad = (2,)
         stride = (2,)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='valid', global_pool=False)
+                                     pooling_convention='valid', global_pool=False, p_value=p_value)
 
         pad = (0,)
         stride = (1,)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='full', global_pool=False)
+                                     pooling_convention='full', global_pool=False, p_value=p_value)
 
         pad = (2,)
         stride = (2,)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='full', global_pool=False)
+                                     pooling_convention='full', global_pool=False, p_value=p_value)
 
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     global_pool=True)
+                                     global_pool=True, p_value=p_value)
 
-    def test_2d_pooling(pool_type):
+    def test_2d_pooling(pool_type, p_value=2):
         data = (2, 3, 20, 20)
         kernel = (4, 5)
         pad = (0, 0)
         stride = (1, 1)
-        test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
-                                     data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='valid', global_pool=False)
+        if pool_type == 'lp':
+            test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu'],
+                                         data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
+                                         pooling_convention='valid', global_pool=False, p_value=p_value)
+        else:
+            test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
+                                         data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
+                                         pooling_convention='valid', global_pool=False)
 
         # pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
         pad = (2, 3)
         stride = (2, 3)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='valid', global_pool=False)
+                                     pooling_convention='valid', global_pool=False, p_value=p_value)
 
         pad = (0, 0)
         stride = (1, 1)
-        test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
-                                     data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='full', global_pool=False)
+        if pool_type == 'lp':
+            test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
+                                         data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
+                                         pooling_convention='full', global_pool=False, p_value=p_value)
+        else:
+            test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
+                                         data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
+                                         pooling_convention='full', global_pool=False)
 
         # pool_v1 has bugs when pad is not 0, do not test PoolingV1 here
         pad = (2, 3)
         stride = (2, 3)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='full', global_pool=False)
-
-        test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
-                                     data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     global_pool=True)
-
-    def test_3d_pooling(pool_type):
+                                     pooling_convention='full', global_pool=False, p_value=p_value)
+
+        if pool_type == 'lp':
+            test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
+                                         data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
+                                         global_pool=True, p_value=p_value)
+        else:
+            test_pooling_versions_helper(pool_op_list=['pool_v1_cpu', 'pool_v1_gpu', 'pool_cpu', 'pool_gpu', 'pool_cudnn'],
+                                         data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
+                                         global_pool=True)
+
+    def test_3d_pooling(pool_type, p_value=2):
         data = (2, 3, 20, 20, 20)
         kernel = (4, 5, 3)
         pad = (0, 0, 0)
         stride = (1, 1, 1)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='valid', global_pool=False)
+                                     pooling_convention='valid', global_pool=False, p_value=p_value)
 
         pad = (2, 3, 3)
         stride = (2, 3, 1)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='valid', global_pool=False)
+                                     pooling_convention='valid', global_pool=False, p_value=p_value)
 
         pad = (0, 0, 0)
         stride = (1, 1, 1)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='full', global_pool=False)
+                                     pooling_convention='full', global_pool=False, p_value=p_value)
 
         pad = (2, 3, 3)
         stride = (2, 3, 1)
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     pooling_convention='full', global_pool=False)
+                                     pooling_convention='full', global_pool=False, p_value=p_value)
 
         test_pooling_versions_helper(pool_op_list=['pool_cpu', 'pool_gpu', 'pool_cudnn'],
                                      data=data, kernel=kernel, pad=pad, stride=stride, pool_type=pool_type,
-                                     global_pool=True)
+                                     global_pool=True, p_value=p_value)
 
     test_1d_pooling('max')
     test_1d_pooling('avg')
     test_1d_pooling('sum')
+    test_1d_pooling('lp', p_value=1)
+    test_1d_pooling('lp', p_value=2)
 
     test_2d_pooling('max')
     test_2d_pooling('avg')
     test_2d_pooling('sum')
+    test_2d_pooling('lp', p_value=1)
+    test_2d_pooling('lp', p_value=2)
 
     test_3d_pooling('max')
     test_3d_pooling('avg')
     test_3d_pooling('sum')
+    test_3d_pooling('lp', p_value=1)
+    test_3d_pooling('lp', p_value=2)
 
 Review comment:
   Should we also add a test for p_value=3 once. I don't see it beign tested anywhere.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services