This is an automated email from the ASF dual-hosted git repository.

okislal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/madlib.git


The following commit(s) were added to refs/heads/master by this push:
     new 00b08ac  DL: Improve parameter parsing and add unit tests
00b08ac is described below

commit 00b08acdb9a21fdaad106649c5b8bc7640b7a7c6
Author: Orhan Kislal <[email protected]>
AuthorDate: Tue May 14 14:32:14 2019 -0700

    DL: Improve parameter parsing and add unit tests
    
    JIRA: MADLIB-1336
    
    This commit enforces that optimizer and loss parameters are required.
    We also improve the parsing logic and the unit tests.
    
    This commit also fixes the fit params parsing to allow NULL (no user defined
    fit params)
    
    Closes #385
    
    Co-authored-by: Nikhil Kak <[email protected]>
---
 .../modules/deep_learning/madlib_keras.py_in       |   1 +
 .../deep_learning/madlib_keras_wrapper.py_in       |  46 ++---
 .../modules/deep_learning/test/madlib_keras.sql_in |  19 +-
 .../test/unit_tests/test_madlib_keras.py_in        | 204 +++++++++++++++------
 4 files changed, 194 insertions(+), 76 deletions(-)

diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras.py_in 
b/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
index e9b3654..d0dde1d 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
@@ -61,6 +61,7 @@ def fit(schema_madlib, source_table, model,model_arch_table,
     dependent_varname = MINIBATCH_OUTPUT_DEPENDENT_COLNAME_DL
     independent_varname = MINIBATCH_OUTPUT_INDEPENDENT_COLNAME_DL
     model_arch_table = quote_ident(model_arch_table)
+    fit_params = "" if not fit_params else fit_params
 
     fit_validator = FitInputValidator(
         source_table, validation_table, model, model_arch_table,
diff --git 
a/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in 
b/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
index 7e5b10a..e30c9d8 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
@@ -161,17 +161,19 @@ def parse_and_validate_compile_params(str_of_args):
         compile_dict:           Dictionary of arguments for keras.compile
     """
 
-    compile_dict = convert_string_of_args_to_dict(str_of_args)
-
-    opt_name,opt_args = parse_optimizer(compile_dict)
-    compile_dict['loss'] = parse_loss(compile_dict)
     literal_eval_compile_params = ['metrics', 'loss_weights',
                                    'weighted_metrics', 'sample_weight_mode']
     accepted_compile_params = literal_eval_compile_params + ['optimizer', 
'loss']
 
+    compile_dict = convert_string_of_args_to_dict(str_of_args)
     compile_dict = validate_and_literal_eval_keys(compile_dict,
                                                   literal_eval_compile_params,
                                                   accepted_compile_params)
+
+    _assert('optimizer' in compile_dict, "optimizer is a required parameter 
for compile")
+    opt_name, opt_args = parse_optimizer(compile_dict)
+
+    _assert('loss' in compile_dict, "loss is a required parameter for compile")
     validate_compile_param_types(compile_dict)
     return (opt_name,opt_args,compile_dict)
 
@@ -213,27 +215,28 @@ def parse_optimizer(compile_dict):
                 final_args[key] = float(value)
     return (opt_name,final_args)
 
-# Parse the loss function.
-def parse_loss(compile_dict):
-    loss_split = compile_dict['loss'].split('.')
-    if (len(loss_split) == 2 and
-        loss_split[0] == 'losses' and
-        loss_split[1] in dir(losses)):
-        return eval('losses.'+loss_split[1])
-    return compile_dict['loss']
 
 # Parse the fit parameters into a dictionary.
 def parse_and_validate_fit_params(fit_param_str):
-    fit_params_dict = convert_string_of_args_to_dict(fit_param_str)
 
-    literal_eval_fit_params = ['batch_size','epochs','verbose','shuffle',
-                           'class_weight','initial_epoch','steps_per_epoch']
-    accepted_fit_params = literal_eval_fit_params + ['shuffle']
+    if fit_param_str:
+        fit_params_dict = convert_string_of_args_to_dict(fit_param_str)
+
+        literal_eval_fit_params = ['batch_size','epochs','verbose',
+                               
'class_weight','initial_epoch','steps_per_epoch']
+        accepted_fit_params = literal_eval_fit_params + ['shuffle']
 
-    fit_params_dict = validate_and_literal_eval_keys(fit_params_dict,
-                                                     literal_eval_fit_params,
-                                                     accepted_fit_params)
-    return fit_params_dict
+        fit_params_dict = validate_and_literal_eval_keys(fit_params_dict,
+                                                         
literal_eval_fit_params,
+                                                         accepted_fit_params)
+        if 'shuffle' in fit_params_dict:
+            shuffle_value = fit_params_dict['shuffle']
+            if shuffle_value == 'True' or shuffle_value == 'False':
+                fit_params_dict['shuffle'] = bool(shuffle_value)
+
+        return fit_params_dict
+    else:
+        return {}
 
 # Validate the keys of the given dictionary and run literal_eval on the
 # user-defined subset
@@ -275,7 +278,8 @@ def compile_model(model, compile_params):
 
 def validate_compile_param_types(compile_dict):
 
-    _assert(compile_dict['metrics'] is None or
+    _assert('metrics' not in compile_dict.keys() or
+            compile_dict['metrics'] is None or
             type(compile_dict['metrics']) is list,
             "wrong input type for compile parameter metrics: multi-output 
model"
             "and user defined metrics are not supported yet, please pass a 
list")
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in 
b/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
index a1968b0..bc0789b 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
@@ -282,7 +282,7 @@ SELECT madlib_keras_fit(
     'keras_out',
     'model_arch',
     1,
-    $$ optimizer='SGD', loss=losses.categorical_crossentropy, 
metrics=['accuracy']$$::text,
+    $$ optimizer='SGD', loss='categorical_crossentropy', 
metrics=['accuracy']$$::text,
     $$ batch_size=2, epochs=1, verbose=0 $$::text,
     1,
     NULL,
@@ -295,7 +295,7 @@ SELECT madlib_keras_fit(
     'keras_out',
     'model_arch',
     1,
-    $$ optimizer='Adam()', loss=losses.categorical_crossentropy, 
metrics=['accuracy']$$::text,
+    $$ optimizer='Adam()', loss='categorical_crossentropy', 
metrics=['accuracy']$$::text,
     $$ batch_size=2, epochs=1, verbose=0 $$::text,
     1,
     NULL,
@@ -308,7 +308,7 @@ SELECT madlib_keras_fit(
     'keras_out',
     'model_arch',
     1,
-    $$ optimizer=Adam(epsilon=None), loss=losses.categorical_crossentropy, 
metrics=['accuracy']$$::text,
+    $$ optimizer=Adam(epsilon=None), loss='categorical_crossentropy', 
metrics=['accuracy']$$::text,
     $$ batch_size=2, epochs=1, verbose=0 $$::text,
 
     1,
@@ -329,6 +329,19 @@ SELECT madlib_keras_fit(
     NULL,
     'model name', 'model desc');
 
+DROP TABLE IF EXISTS keras_out, keras_out_summary;
+SELECT madlib_keras_fit(
+    'cifar_10_sample_batched',
+    'keras_out',
+    'model_arch',
+    1,
+    $$ optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
metrics=['accuracy'], loss_weights=[2], sample_weight_mode=None, 
loss='categorical_crossentropy' $$::text,
+    NULL,
+    1,
+    NULL,
+    NULL,
+    'model name', 'model desc');
+
 -- -- negative test case for passing non numeric y to fit
 -- induce failure by passing a non numeric column
 create table cifar_10_sample_val_failure as select * from cifar_10_sample_val;
diff --git 
a/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
 
b/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
index 8e65662..61f7f0a 100644
--- 
a/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
+++ 
b/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
@@ -414,6 +414,28 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
     def tearDown(self):
         self.module_patcher.stop()
 
+    def test_get_gpu_memory_fraction(self):
+
+        gpus_per_host = 4
+        segments_per_host = 4
+        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
+        self.assertEqual(result, 0.9)
+
+        gpus_per_host = 10
+        segments_per_host = 4
+        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
+        self.assertEqual(result, 0.9)
+
+        gpus_per_host = 2
+        segments_per_host = 6
+        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
+        self.assertEqual(result, 0.3)
+
+        gpus_per_host = 1
+        segments_per_host = 4
+        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
+        self.assertEqual(result, 0.225)
+
     def test_get_device_name_and_set_cuda_env_postgres(self):
         self.subject.is_platform_pg = Mock(return_value = True)
 
@@ -447,19 +469,7 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
     def test_split_and_strip(self):
         self.assertEqual(('a','b'), self.subject.split_and_strip(' a = b '))
 
-    def test_parse_and_validate_fit_params(self):
-        result = {'batch_size':2, 'epochs':1, 'verbose':0}
-        self.assertDictEqual(result, 
self.subject.parse_and_validate_fit_params('batch_size=2, epochs=1, verbose=0'))
-
     def test_parse_optimizer(self):
-
-        import keras.losses as losses
-
-        loss_func = losses.categorical_crossentropy
-        compile_dict = {'loss':'losses.categorical_crossentropy'}
-        self.assertEqual(self.subject.parse_loss(compile_dict), loss_func)
-
-    def test_parse_loss(self):
         opt_name = 'SGD'
         final_args = {'lr':0.01, 'decay':1e-6, 'nesterov':True}
         compile_dict = {}
@@ -469,22 +479,7 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
         self.assertEqual(result_name, opt_name)
         self.assertDictEqual(result_params, final_args)
 
-    def test_parse_and_validate_compile_params(self):
-
-        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
loss='categorical_crossentropy', metrics=['accuracy']"
-        compile_dict = {'optimizer':'SGD(lr=0.01, decay=1e-6, nesterov=True)', 
'metrics':['accuracy'], 'loss':'categorical_crossentropy'}
-        opt_name,opt_args,result_params = 
self.subject.parse_and_validate_compile_params(test_str)
-        self.assertDictEqual(result_params, compile_dict)
-
-    def test_parse_and_validate_fit_params(self):
-
-        test_str = "batch_size=2, epochs=1, verbose=0"
-        fit_dict = {'batch_size':2, 'epochs':1, 'verbose':0}
-        result_params = self.subject.parse_and_validate_fit_params(test_str)
-        self.assertDictEqual(result_params, fit_dict)
-
     def test_validate_and_literal_eval_keys(self):
-
         test_dict = {'batch_size':'2', 'epochs':'1', 'verbose':'0'}
         target_dict = {'batch_size':2, 'epochs':1, 'verbose':0}
         literal_eval_fit_params = ['batch_size','epochs','verbose','shuffle',
@@ -496,58 +491,129 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
                             accepted_fit_params)
         self.assertDictEqual(result_params, target_dict)
 
-    def test_get_gpu_memory_fraction(self):
+    def test_parse_and_validate_fit_params(self):
+        result = {'batch_size':2, 'epochs':1, 'verbose':0}
+        self.assertDictEqual(result, 
self.subject.parse_and_validate_fit_params('batch_size=2, epochs=1, verbose=0'))
 
-        gpus_per_host = 4
-        segments_per_host = 4
-        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
-        self.assertEqual(result, 0.9)
+    def test_parse_and_validate_fit_params(self):
+        test_str = "batch_size=2, epochs=1, verbose=0"
+        fit_dict = {'batch_size':2, 'epochs':1, 'verbose':0}
+        result_params = self.subject.parse_and_validate_fit_params(test_str)
+        self.assertDictEqual(result_params, fit_dict)
 
-        gpus_per_host = 10
-        segments_per_host = 4
-        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
-        self.assertEqual(result, 0.9)
+        test_str = "batch_size=2, epochs=1, verbose=0, shuffle = 'batch'"
+        fit_dict = {'batch_size':2, 'epochs':1, 'verbose':0, 'shuffle':'batch'}
+        result_params = self.subject.parse_and_validate_fit_params(test_str)
+        self.assertDictEqual(result_params, fit_dict)
 
-        gpus_per_host = 2
-        segments_per_host = 6
-        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
-        self.assertEqual(result, 0.3)
+        test_str = "batch_size=2, epochs=1, verbose=0, shuffle = True"
+        fit_dict = {'batch_size':2, 'epochs':1, 'verbose':0, 'shuffle':True}
+        result_params = self.subject.parse_and_validate_fit_params(test_str)
+        self.assertDictEqual(result_params, fit_dict)
 
-        gpus_per_host = 1
-        segments_per_host = 4
-        result = self.subject.get_gpu_memory_fraction(gpus_per_host, 
segments_per_host)
-        self.assertEqual(result, 0.225)
+        test_str = ""
+        fit_dict = {}
+        result_params = self.subject.parse_and_validate_fit_params(test_str)
+        self.assertDictEqual(result_params, fit_dict)
+
+    def test_parse_and_validate_fit_params_duplicate_params(self):
+        test_str = "batch_size=1, batch_size=10, verbose=0"
+        fit_dict = {'batch_size':10, 'verbose':0}
+        result_params = self.subject.parse_and_validate_fit_params(test_str)
+        self.assertDictEqual(result_params, fit_dict)
+
+    def test_parse_and_validate_fit_params_invalid_fit_param_fail(self):
+        test_str = "does_not_exist=2, epochs=1, verbose=0"
+        with self.assertRaises(plpy.PLPYException) as error:
+            self.subject.parse_and_validate_fit_params(test_str)
+
+        self.assertIn('not accepted', str(error.exception))
+
+        test_str = "batch_size=not_lit_eval(1), epochs=1, verbose=0"
+        with self.assertRaises(plpy.PLPYException) as error:
+            self.subject.parse_and_validate_fit_params(test_str)
 
+        self.assertIn('invalid input value', str(error.exception))
+
+    def test_parse_and_validate_compile_params(self):
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), " \
+                   "loss='categorical_crossentropy', metrics=['accuracy']"
+        compile_dict = {'optimizer':'SGD(lr=0.01, decay=1e-6, nesterov=True)',
+                        'metrics':['accuracy'], 
'loss':'categorical_crossentropy'}
+        opt_name,opt_args,result_params = 
self.subject.parse_and_validate_compile_params(test_str)
+        self.assertDictEqual(result_params, compile_dict)
+
+        # Test without the metrics
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), " \
+                   "loss='categorical_crossentropy'"
+        compile_dict = {'optimizer':'SGD(lr=0.01, decay=1e-6, nesterov=True)',
+                        'loss':'categorical_crossentropy'}
+        opt_name,opt_args,result_params = 
self.subject.parse_and_validate_compile_params(test_str)
+        self.assertDictEqual(result_params, compile_dict)
+
+    def test_parse_and_validate_compile_params_default_optimizer_pass(self):
+        test_str = "optimizer='SGD', loss='categorical_crossentropy'"
+        _,_,result_dict = 
self.subject.parse_and_validate_compile_params(test_str)
+        self.assertEqual('SGD', result_dict['optimizer'])
+
+        test_str = "optimizer='sgd()', loss='categorical_crossentropy'"
+        _,_,result_dict = 
self.subject.parse_and_validate_compile_params(test_str)
+        self.assertEqual('sgd()', result_dict['optimizer'])
+
+    def test_parse_and_validate_compile_params_duplicate_loss(self):
+        test_str = "optimizer='SGD', loss='loss1', metrics=['accuracy'], 
loss='loss2'"
+        _,_,result_dict = 
self.subject.parse_and_validate_compile_params(test_str)
+        self.assertEqual('loss2', result_dict['loss'])
+
+    def test_parse_and_validate_compile_params_no_optimizer_fail(self):
+        test_str = "loss='categorical_crossentropy', metrics=['accuracy']"
+
+        with self.assertRaises(plpy.PLPYException):
+            self.subject.parse_and_validate_compile_params(test_str)
+
+    def test_parse_and_validate_compile_params_no_loss_fail(self):
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), " \
+                   "metrics=['accuracy']"
+
+        with self.assertRaises(plpy.PLPYException):
+            self.subject.parse_and_validate_compile_params(test_str)
 
-    ## Negative Tests
     def test_parse_and_validate_compile_params_dict_metrics_fail(self):
-        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
loss='categorical_crossentropy', metrics={'0':'accuracy'}"
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), " \
+                   "loss='categorical_crossentropy', metrics={'0':'accuracy'}"
 
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_compile_params(test_str)
 
     def test_parse_and_validate_compile_params_tensor_loss_weights_fail(self):
 
-        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
loss='categorical_crossentropy', metrics=['accuracy'], loss_weights = 
keras.layers.Input(shape=(32,))"
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True)," \
+                   " loss='categorical_crossentropy', metrics=['accuracy']," \
+                   " loss_weights = keras.layers.Input(shape=(32,))"
 
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_compile_params(test_str)
 
-
     def 
test_parse_and_validate_compile_params_list_dict_sample_weight_mode_fail(self):
-        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
loss='categorical_crossentropy', metrics=['accuracy'], sample_weight_mode = 
[0,1]"
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True)," \
+                   " loss='categorical_crossentropy', metrics=['accuracy']," \
+                   " sample_weight_mode = [0,1]"
 
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_compile_params(test_str)
 
-        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
loss='categorical_crossentropy', metrics=['accuracy'], sample_weight_mode = 
{'0':'1'}"
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True)," \
+                   " loss='categorical_crossentropy', metrics=['accuracy']," \
+                   " sample_weight_mode = {'0':'1'}"
 
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_compile_params(test_str)
 
 
     def test_parse_and_validate_compile_params_target_tensors_fail(self):
-        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True), 
loss='categorical_crossentropy', metrics=['accuracy'], target_tensors = 
keras.layers.Input(shape=(32,))"
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True)," \
+                   " loss='categorical_crossentropy', metrics=['accuracy']," \
+                   " target_tensors = keras.layers.Input(shape=(32,))"
 
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_compile_params(test_str)
@@ -583,7 +649,6 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
             self.subject.parse_and_validate_fit_params(test_str)
 
     def test_parse_and_validate_fit_params_validation_freq_fail(self):
-
         test_str = "batch_size=2, epochs=1, verbose=0, validation_freq=1"
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_fit_params(test_str)
@@ -592,6 +657,41 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
         with self.assertRaises(plpy.PLPYException):
             self.subject.parse_and_validate_fit_params(test_str)
 
+    def test_parse_and_validate_compile_params_syntax_error_fail(self):
+        #missing end parentheses
+        test_str = "optimizer=SGD(lr=0.01, decay=1e-6, nesterov=True"
+        with self.assertRaises(ValueError) as error:
+            self.subject.parse_and_validate_compile_params(test_str)
+        self.assertIn('could not convert string to float', 
str(error.exception))
+
+        #missing beginning parentheses
+        test_str = "optimizer=SGDlr=0.01, decay=1e-6, nesterov=True)," \
+                   " loss='categorical_crossentropy'"
+        with self.assertRaises(plpy.PLPYException) as error:
+            self.subject.parse_and_validate_compile_params(test_str)
+        self.assertIn('not accepted', str(error.exception))
+
+        #missing comma
+        test_str = "optimizer=SGD(lr=0.01 decay=1e-6, nesterov=True)," \
+                   " loss='categorical_crossentropy'"
+        with self.assertRaises(ValueError) as error:
+            self.subject.parse_and_validate_compile_params(test_str)
+        self.assertIn('invalid literal for float', str(error.exception))
+
+        test_str = ""
+        with self.assertRaises(plpy.PLPYException) as error:
+            self.subject.parse_and_validate_compile_params(test_str)
+        self.assertIn('not accepted', str(error.exception))
+
+    def test_parse_and_validate_fit_params_invalid_optimizer_fail(self):
+        test_str = "optimizer='SGD1', loss='categorical_crossentropy'"
+        with self.assertRaises(plpy.PLPYException) as error:
+            self.subject.parse_and_validate_compile_params(test_str)
+        self.assertIn('invalid optimizer', str(error.exception))
+
+
+
+
 class MadlibKerasValidatorTestCase(unittest.TestCase):
     def setUp(self):
         self.plpy_mock = Mock(spec='error')

Reply via email to