You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@madlib.apache.org by nk...@apache.org on 2020/12/09 00:39:15 UTC

[madlib] 06/07: Fix predict test cases

This is an automated email from the ASF dual-hosted git repository.

nkak pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/madlib.git

commit a6ae9b096f4ae1d7be9f0a57f6d97d1268107fc0
Author: Nikhil Kak <nk...@vmware.com>
AuthorDate: Wed Nov 11 17:53:47 2020 -0800

    Fix predict test cases
    
    JIRA: MADLIB-1438
    
    Removed the mocking of keras sessions in predict since they would globally mock the set_session and clear_session functions which made our
    fit/eval transition test cases fail.
    
    This wasn't an issue before because fit/eval were also mocking these
    functions but now we don't need to mock them anywhere.
    
    Also fixed test_predict_error_should_clear_sd by making it fail after we
    populate SD. Previously it was failing before SD was set. Also setting
    normalizing_const to 0 doesn't make it fail so we set current_seg_id to
    -1 to inject failure
    
    Co-authored-by: Ekta Khanna <ek...@vmware.com>
---
 .../test/unit_tests/test_madlib_keras.py_in        | 36 +++++++---------------
 1 file changed, 11 insertions(+), 25 deletions(-)

diff --git a/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in b/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
index 7cdd83c..8d67c09 100644
--- a/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
+++ b/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
@@ -776,9 +776,6 @@ class InternalKerasPredictTestCase(unittest.TestCase):
 
         self.independent_var = [[[240]]]
         self.total_images_per_seg = [3,3,4]
-        self.subject.K.set_session = Mock()
-        self.subject.clear_keras_session = Mock()
-
 
     def tearDown(self):
         self.module_patcher.stop()
@@ -789,7 +786,6 @@ class InternalKerasPredictTestCase(unittest.TestCase):
         serialized_weights = np.array(model_weights, dtype=np.float32).tostring()
 
         k = {'SD': {}}
-        is_response = True
         result = self.subject.internal_keras_predict(
             self.independent_var, self.model.to_json(),
             serialized_weights, 255, 0, self.all_seg_ids,
@@ -803,7 +799,6 @@ class InternalKerasPredictTestCase(unittest.TestCase):
 
         k = {'SD': { 'row_count': 1}}
         k['SD']['segment_model_predict'] = self.model
-        is_response = True
         result = self.subject.internal_keras_predict(
             self.independent_var, None, None, 255, 0,
             self.all_seg_ids, self.total_images_per_seg, False, 0, 4, **k)
@@ -817,17 +812,6 @@ class InternalKerasPredictTestCase(unittest.TestCase):
 
         k = {'SD': { 'row_count': 2}}
         k['SD']['segment_model_predict'] = self.model
-        is_response = True
-        result = self.subject.internal_keras_predict(
-            self.independent_var, None, None, 255, 0,
-            self.all_seg_ids, self.total_images_per_seg, False, 0, 4, **k)
-        self.assertEqual(3, len(result))
-        self.assertEqual(False, 'row_count' in k['SD'])
-        self.assertEqual(False, 'segment_model_predict' in k['SD'])
-
-        k = {'SD': { 'row_count': 2}}
-        k['SD']['segment_model_predict'] = self.model
-        is_response = False
         result = self.subject.internal_keras_predict(
             self.independent_var, None, None, 255, 0,
             self.all_seg_ids, self.total_images_per_seg, False, 0, 4, **k)
@@ -838,20 +822,21 @@ class InternalKerasPredictTestCase(unittest.TestCase):
         self.assertEqual(False, 'row_count' in k['SD'])
         self.assertEqual(False, 'segment_model_predict' in k['SD'])
 
-
     def test_predict_error_should_clear_sd(self):
         self.subject.is_platform_pg = Mock(return_value = False)
-        self.model.add(Dense(3))
+        # self.model.add(Dense(3))
+        model_weights = [1, 2, 3, 4]
+        serialized_weights = np.array(model_weights, dtype=np.float32).tostring()
 
-        # inject error by passing 0 as the normalizing const so that we get a
-        # divide by zero error
-        normalizing_const = 0
+        # inject error by passing current_seg_id as -1
+        current_seg_id = -1
         k = {'SD':{}}
-        is_response = True
-        with self.assertRaises(plpy.PLPYException):
+        with self.assertRaises(plpy.PLPYException) as error:
             self.subject.internal_keras_predict(
-                self.independent_var, None, None, normalizing_const,
-                0, self.all_seg_ids, self.total_images_per_seg, False, 0, 4, **k)
+                self.independent_var, self.model.to_json(), serialized_weights,
+                255, current_seg_id, self.all_seg_ids,
+                self.total_images_per_seg, False, 0, 4, **k)
+        self.assertEqual("ValueError('-1 is not in list',)", str(error.exception))
         self.assertEqual(False, 'row_count' in k['SD'])
         self.assertEqual(False, 'segment_model_predict' in k['SD'])
 
@@ -1447,6 +1432,7 @@ class InputValidatorTestCase(unittest.TestCase):
             obj = self.subject._validate_gpu_config(self.module_name, 'foo', [1,0,0,1])
         self.assertIn('does not have gpu', str(error.exception).lower())
 
+
 class MadlibSerializerTestCase(unittest.TestCase):
     def setUp(self):
         self.plpy_mock = Mock(spec='error')