This is an automated email from the ASF dual-hosted git repository.
okislal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/madlib.git
The following commit(s) were added to refs/heads/master by this push:
new e44b6cc DL:Suppress unnecessary notices and misc cleanup
e44b6cc is described below
commit e44b6cc51105c61005f39b277288ba99dc4069bb
Author: Ekta Khanna <[email protected]>
AuthorDate: Mon Jun 10 16:17:44 2019 -0700
DL:Suppress unnecessary notices and misc cleanup
This commit suppresses unnecessary notices in `load_keras_model()` and
`delete_keras_model()`. Also, rewords a few error/warning messages and
updates unit test.
Closes #411
---
.../modules/deep_learning/input_data_preprocessor.py_in | 2 +-
.../modules/deep_learning/keras_model_arch_table.py_in | 10 ++++++----
.../postgres/modules/deep_learning/madlib_keras.py_in | 17 ++++-------------
.../modules/deep_learning/madlib_keras_wrapper.py_in | 9 ++++-----
.../modules/deep_learning/test/madlib_keras.sql_in | 2 +-
.../test/unit_tests/test_madlib_keras.py_in | 4 ++--
6 files changed, 18 insertions(+), 26 deletions(-)
diff --git
a/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
b/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
index 2b78a3b..71e5feb 100644
--- a/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
+++ b/src/ports/postgres/modules/deep_learning/input_data_preprocessor.py_in
@@ -273,7 +273,7 @@ class InputDataPreprocessorDL(object):
"as input.")
_assert((is_valid_psql_type(self.dependent_vartype, NUMERIC | TEXT |
BOOLEAN) or
is_valid_psql_type(self.dependent_vartype, NUMERIC |
ONLY_ARRAY)),
- """Invalid dependent variable type, should be one of the type
in this list:
+ """Invalid dependent variable type, should be one of the types
in this list:
numeric, text, boolean, or numeric array""")
def get_distinct_dependent_levels(self, table, dependent_varname,
diff --git
a/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
b/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
index 7d91540..0d6fc7c 100644
--- a/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
+++ b/src/ports/postgres/modules/deep_learning/keras_model_arch_table.py_in
@@ -59,6 +59,7 @@ class ModelArchSchema:
(MODEL_ID, MODEL_ARCH, MODEL_WEIGHTS, NAME, DESCRIPTION,
__INTERNAL_MADLIB_ID__) = col_names
+@MinWarning("error")
def load_keras_model(keras_model_arch_table, model_arch, model_weights,
name, description, **kwargs):
model_arch_table = quote_ident(keras_model_arch_table)
@@ -70,13 +71,13 @@ def load_keras_model(keras_model_arch_table, model_arch,
model_weights,
.format(**locals())
plpy.execute(sql, 0)
- plpy.info("Keras Model Arch: Created new keras model arch table {0}." \
+ plpy.info("Keras Model Arch: Created new keras model architecture
table {0}." \
.format(model_arch_table))
else:
missing_cols = columns_missing_from_table(model_arch_table,
ModelArchSchema.col_names)
if len(missing_cols) > 0:
- plpy.error("Keras Model Arch: Invalid keras model arch table {0},"
+ plpy.error("Keras Model Arch: Invalid keras model architecture
table {0},"
" missing columns: {1}".format(model_arch_table,
missing_cols))
@@ -99,13 +100,14 @@ def load_keras_model(keras_model_arch_table, model_arch,
model_weights,
plpy.info("Keras Model Arch: Added model id {0} to {1} table".
format(select_res[0][ModelArchSchema.MODEL_ID],
model_arch_table))
+@MinWarning("error")
def delete_keras_model(keras_model_arch_table, model_id, **kwargs):
model_arch_table = quote_ident(keras_model_arch_table)
input_tbl_valid(model_arch_table, "Keras Model Arch")
missing_cols = columns_missing_from_table(model_arch_table,
ModelArchSchema.col_names)
if len(missing_cols) > 0:
- plpy.error("Keras Model Arch: Invalid keras model arch table {0},"
+ plpy.error("Keras Model Arch: Invalid keras model architecture table
{0},"
" missing columns: {1}".format(model_arch_table,
missing_cols))
sql = """
@@ -123,7 +125,7 @@ def delete_keras_model(keras_model_arch_table, model_id,
**kwargs):
sql = "SELECT {0} FROM {1}".format(ModelArchSchema.MODEL_ID,
model_arch_table)
res = plpy.execute(sql, 0)
if not res:
- plpy.info("Keras Model Arch: Dropping empty keras model arch "\
+ plpy.info("Keras Model Arch: Dropping empty keras model architecture "\
"table
{model_arch_table}".format(model_arch_table=model_arch_table))
sql = "DROP TABLE {0}".format(model_arch_table)
plpy.execute(sql, 0)
diff --git a/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
b/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
index a85e1f3..9d00619 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras.py_in
@@ -498,9 +498,10 @@ def get_segments_and_gpus(gpus_per_host):
segments_per_host = get_segments_per_host()
if 0 < gpus_per_host < segments_per_host:
- plpy.warning('The number of gpus per host is less than the number of '
- 'segments per host. The support for this case is '
- 'experimental and it may fail.')
+ plpy.warning('The number of GPUs per segment host is less than the
number of '
+ 'segments per segment host. When different segments share
the same GPU, '
+ 'this may fail in some scenarios. The current recommended
configuration '
+ 'is to have 1 GPU available per segment.')
return segments_per_host, gpus_per_host
@@ -528,16 +529,6 @@ def evaluate(schema_madlib, model_table, test_table,
output_table, gpus_per_host
seg_ids, images_per_seg =
get_image_count_per_seg_for_minibatched_data_from_db(test_table)
- res = plpy.execute("""
- SELECT {dependent_varname_col}, {independent_varname_col}
- FROM {test_summary_table}
- """.format(dependent_varname_col=DEPENDENT_VARNAME_COLNAME,
- independent_varname_col=INDEPENDENT_VARNAME_COLNAME,
- test_summary_table=test_summary_table))
-
- dependent_varname = res[0][DEPENDENT_VARNAME_COLNAME]
- independent_varname = res[0][INDEPENDENT_VARNAME_COLNAME]
-
loss, metric =\
get_loss_metric_from_keras_eval(schema_madlib, test_table,
compile_params, model_arch,
model_data, gpus_per_host,
segments_per_host,
diff --git
a/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
b/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
index dcdf8a0..c61bc2f 100644
--- a/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
+++ b/src/ports/postgres/modules/deep_learning/madlib_keras_wrapper.py_in
@@ -207,7 +207,7 @@ def _validate_metrics(compile_dict):
'sparse_categorical_crossentropy', 'top_k_categorical_accuracy',
'sparse_top_k_categorical_accuracy']
_assert(len(compile_dict['metrics']) == 1,
- "Only at most one metric is supported.")
+ "Only one metric at a time is supported.")
_assert(compile_dict['metrics'][0] not in unsupported_metrics_list,
"Metric {0} is not supported.".format(compile_dict['metrics'][0]))
@@ -277,8 +277,7 @@ def parse_and_validate_fit_params(fit_param_str):
def validate_and_literal_eval_keys(keys_dict, literal_eval_list,
accepted_list):
for ckey in keys_dict.keys():
_assert(ckey in accepted_list,
- "{0} is not accepted as a parameter yet. "
- "Please review the user docs".format(ckey))
+ "{0} is not currently accepted as a parameter. ".format(ckey))
if ckey in literal_eval_list:
try:
keys_dict[ckey] = ast.literal_eval(keys_dict[ckey])
@@ -315,13 +314,13 @@ def validate_compile_param_types(compile_dict):
type(compile_dict['loss_weights']) is list or
type(compile_dict['loss_weights']) is dict,
"wrong input type for compile parameter loss_weights: only list "
- "and dictionary are supported for now")
+ "and dictionary are supported.")
_assert('weighted_metrics' not in compile_dict.keys() or
compile_dict['weighted_metrics'] is None or
type(compile_dict['weighted_metrics']) is list,
"wrong input type for compile parameter weighted_metrics: only
list "
- "is supported for now")
+ "is supported.")
_assert('sample_weight_mode' not in compile_dict.keys() or
compile_dict['sample_weight_mode'] is None or
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
b/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
index 57c8fcd..abd2b54 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
@@ -235,7 +235,7 @@ SELECT assert(trap_error($TRAP$madlib_keras_fit(
-- that don't have GPUs. Since Jenkins builds are run on docker containers
-- that don't have GPUs, these queries must error out.
--- IMPRORTANT: The following test must be run when we have a valid
+-- IMPORTANT: The following test must be run when we have a valid
-- keras_saved_out model table. Otherwise, it will fail because of a
-- non-existent model table, while we want to trap failure due to
-- gpus_per_host=2
diff --git
a/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
b/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
index b3e3fdd..2a1c39e 100644
---
a/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
+++
b/src/ports/postgres/modules/deep_learning/test/unit_tests/test_madlib_keras.py_in
@@ -538,7 +538,7 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
with self.assertRaises(plpy.PLPYException) as error:
self.subject.parse_and_validate_fit_params(test_str)
- self.assertIn('not accepted', str(error.exception))
+ self.assertIn('not currently accepted', str(error.exception))
test_str = "batch_size=not_lit_eval(1), epochs=1, verbose=0"
with self.assertRaises(plpy.PLPYException) as error:
@@ -714,7 +714,7 @@ class MadlibKerasWrapperTestCase(unittest.TestCase):
" loss='categorical_crossentropy'"
with self.assertRaises(plpy.PLPYException) as error:
self.subject.parse_and_validate_compile_params(test_str)
- self.assertIn('not accepted', str(error.exception))
+ self.assertIn('not currently accepted', str(error.exception))
#missing comma
test_str = "optimizer=SGD(lr=0.01 decay=1e-6, nesterov=True)," \