This is an automated email from the ASF dual-hosted git repository.
njayaram pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/madlib.git
The following commit(s) were added to refs/heads/master by this push:
new c4f645c DL: Reduce dev check assert threshold while comparing loss
c4f645c is described below
commit c4f645c01a4baeadbbbc589f619eae0908617461
Author: Nandish Jayaram <[email protected]>
AuthorDate: Tue Jun 4 14:46:23 2019 -0700
DL: Reduce dev check assert threshold while comparing loss
Update the threshold while comparing the loss for transfer learning and
warm start tests to 1e-6 (from 1e-10). This test was flaky with the lower
threshold on GPDB 6 (but not other DBs.)
Co-authored-by: Ekta Khanna <[email protected]>
---
.../modules/deep_learning/test/madlib_keras.sql_in | 42 +++++++++++-----------
1 file changed, 21 insertions(+), 21 deletions(-)
diff --git a/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
b/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
index df3935a..57c8fcd 100644
--- a/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
+++ b/src/ports/postgres/modules/deep_learning/test/madlib_keras.sql_in
@@ -1129,15 +1129,15 @@ UPDATE iris_model_arch set model_weights = (select
model_data from iris_model)
-- Warm start test
SELECT madlib_keras_fit('iris_data_packed', -- source table
- 'iris_model', -- model output table
- 'iris_model_arch', -- model arch table
- 2, -- model arch id
- $$ loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'] $$, -- compile_params
- $$ batch_size=5, epochs=3 $$, -- fit_params
- 2, -- num_iterations,
- NULL, NULL, 1,
- true -- warm start
- );
+ 'iris_model', -- model output table
+ 'iris_model_arch', -- model arch table
+ 2, -- model arch id
+ $$ loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'] $$, -- compile_params
+ $$ batch_size=5, epochs=3 $$, -- fit_params
+ 2, -- num_iterations,
+ NULL, NULL, 1,
+ true -- warm start
+ );
SELECT assert(
array_upper(training_loss, 1) = 2 AND
@@ -1146,8 +1146,8 @@ SELECT assert(
FROM iris_model_summary;
SELECT assert(
- abs(first.training_loss_final-second.training_loss[1]) < 1e-10 AND
- abs(first.training_loss_final-second.training_loss[2]) < 1e-10 AND
+ abs(first.training_loss_final-second.training_loss[1]) < 1e-6 AND
+ abs(first.training_loss_final-second.training_loss[2]) < 1e-6 AND
abs(first.training_metrics_final-second.training_metrics[1]) < 1e-10 AND
abs(first.training_metrics_final-second.training_metrics[2]) < 1e-10,
'warm start test failed because training loss and metrics don''t match the
expected value from the previous run of keras fit.')
@@ -1156,14 +1156,14 @@ FROM iris_model_first_run AS first, iris_model_summary
AS second;
-- Transfer learning test
DROP TABLE IF EXISTS iris_model_transfer, iris_model_transfer_summary;
SELECT madlib_keras_fit('iris_data_packed', -- source table
- 'iris_model_transfer', -- model output
table
- 'iris_model_arch', -- model arch table
- 2, -- model arch id
- $$ loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'] $$, -- compile_params
- $$ batch_size=5, epochs=3 $$, -- fit_params
- 2,
- NULL, NULL, 1
- );
+ 'iris_model_transfer', -- model output table
+ 'iris_model_arch', -- model arch table
+ 2, -- model arch id
+ $$ loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'] $$, -- compile_params
+ $$ batch_size=5, epochs=3 $$, -- fit_params
+ 2,
+ NULL, NULL, 1
+ );
SELECT assert(
array_upper(training_loss, 1) = 2 AND
@@ -1172,8 +1172,8 @@ SELECT assert(
FROM iris_model_transfer_summary;
SELECT assert(
- abs(first.training_loss_final-second.training_loss[1]) < 1e-10 AND
- abs(first.training_loss_final-second.training_loss[2]) < 1e-10 AND
+ abs(first.training_loss_final-second.training_loss[1]) < 1e-6 AND
+ abs(first.training_loss_final-second.training_loss[2]) < 1e-6 AND
abs(first.training_metrics_final-second.training_metrics[1]) < 1e-10 AND
abs(first.training_metrics_final-second.training_metrics[2]) < 1e-10,
'Transfer learning test failed because training loss and metrics don''t
match the expected value.')