Author: rhbutani
Date: Tue Dec 17 00:39:01 2013
New Revision: 1551420
URL: http://svn.apache.org/r1551420
Log:
HIVE-5973 SMB joins produce incorrect results with multiple partitions and
buckets (Vikram Dixit via Harish Butani)
Added:
hive/trunk/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out
Modified:
hive/trunk/itests/qtest/pom.xml
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
Modified: hive/trunk/itests/qtest/pom.xml
URL:
http://svn.apache.org/viewvc/hive/trunk/itests/qtest/pom.xml?rev=1551420&r1=1551419&r2=1551420&view=diff
==============================================================================
--- hive/trunk/itests/qtest/pom.xml (original)
+++ hive/trunk/itests/qtest/pom.xml Tue Dec 17 00:39:01 2013
@@ -36,7 +36,7 @@
<run_disabled>false</run_disabled>
<clustermode></clustermode>
<execute.beeline.tests>false</execute.beeline.tests>
-
<minimr.query.files>stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q</minimr.query.files>
+
<minimr.query.files>stats_counter_partitioned.q,list_bucket_dml_10.q,input16_cc.q,scriptfile1.q,scriptfile1_win.q,bucket4.q,bucketmapjoin6.q,disable_merge_for_bucketing.q,reduce_deduplicate.q,smb_mapjoin_8.q,join1.q,groupby2.q,bucketizedhiveinputformat.q,bucketmapjoin7.q,optrstat_groupby.q,bucket_num_reducers.q,bucket5.q,load_fs2.q,bucket_num_reducers2.q,infer_bucket_sort_merge.q,infer_bucket_sort_reducers_power_two.q,infer_bucket_sort_dyn_part.q,infer_bucket_sort_bucketed_table.q,infer_bucket_sort_map_operators.q,infer_bucket_sort_num_buckets.q,leftsemijoin_mr.q,schemeAuthority.q,schemeAuthority2.q,truncate_column_buckets.q,remote_script.q,,load_hdfs_file_with_space_in_the_name.q,parallel_orderby.q,import_exported_table.q,stats_counter.q,auto_sortmerge_join_16.q</minimr.query.files>
<minimr.query.negative.files>cluster_tasklog_retrieval.q,minimr_broken_pipe.q,mapreduce_stack_trace.q,mapreduce_stack_trace_turnoff.q,mapreduce_stack_trace_hadoop20.q,mapreduce_stack_trace_turnoff_hadoop20.q</minimr.query.negative.files>
<beeline.positive.exclude>add_part_exist.q,alter1.q,alter2.q,alter4.q,alter5.q,alter_rename_partition.q,alter_rename_partition_authorization.q,archive.q,archive_corrupt.q,archive_multi.q,archive_mr_1806.q,archive_multi_mr_1806.q,authorization_1.q,authorization_2.q,authorization_4.q,authorization_5.q,authorization_6.q,authorization_7.q,ba_table1.q,ba_table2.q,ba_table3.q,ba_table_udfs.q,binary_table_bincolserde.q,binary_table_colserde.q,cluster.q,columnarserde_create_shortcut.q,combine2.q,constant_prop.q,create_nested_type.q,create_or_replace_view.q,create_struct_table.q,create_union_table.q,database.q,database_location.q,database_properties.q,ddltime.q,describe_database_json.q,drop_database_removes_partition_dirs.q,escape1.q,escape2.q,exim_00_nonpart_empty.q,exim_01_nonpart.q,exim_02_00_part_empty.q,exim_02_part.q,exim_03_nonpart_over_compat.q,exim_04_all_part.q,exim_04_evolved_parts.q,exim_05_some_part.q,exim_06_one_part.q,exim_07_all_part_over_nonoverlap.q,exim_08_nonpart_rena
me.q,exim_09_part_spec_nonoverlap.q,exim_10_external_managed.q,exim_11_managed_external.q,exim_12_external_location.q,exim_13_managed_location.q,exim_14_managed_location_over_existing.q,exim_15_external_part.q,exim_16_part_external.q,exim_17_part_managed.q,exim_18_part_external.q,exim_19_00_part_external_location.q,exim_19_part_external_location.q,exim_20_part_managed_location.q,exim_21_export_authsuccess.q,exim_22_import_exist_authsuccess.q,exim_23_import_part_authsuccess.q,exim_24_import_nonexist_authsuccess.q,global_limit.q,groupby_complex_types.q,groupby_complex_types_multi_single_reducer.q,index_auth.q,index_auto.q,index_auto_empty.q,index_bitmap.q,index_bitmap1.q,index_bitmap2.q,index_bitmap3.q,index_bitmap_auto.q,index_bitmap_rc.q,index_compact.q,index_compact_1.q,index_compact_2.q,index_compact_3.q,index_stale_partitioned.q,init_file.q,input16.q,input16_cc.q,input46.q,input_columnarserde.q,input_dynamicserde.q,input_lazyserde.q,input_testxpath3.q,input_testxpath4.q,insert2_o
verwrite_partitions.q,insertexternal1.q,join_thrift.q,lateral_view.q,load_binary_data.q,load_exist_part_authsuccess.q,load_nonpart_authsuccess.q,load_part_authsuccess.q,loadpart_err.q,lock1.q,lock2.q,lock3.q,lock4.q,merge_dynamic_partition.q,multi_insert.q,multi_insert_move_tasks_share_dependencies.q,null_column.q,ppd_clusterby.q,query_with_semi.q,rename_column.q,sample6.q,sample_islocalmode_hook.q,set_processor_namespaces.q,show_tables.q,source.q,split_sample.q,str_to_map.q,transform1.q,udaf_collect_set.q,udaf_context_ngrams.q,udaf_histogram_numeric.q,udaf_ngrams.q,udaf_percentile_approx.q,udf_array.q,udf_bitmap_and.q,udf_bitmap_or.q,udf_explode.q,udf_format_number.q,udf_map.q,udf_map_keys.q,udf_map_values.q,udf_max.q,udf_min.q,udf_named_struct.q,udf_percentile.q,udf_printf.q,udf_sentences.q,udf_sort_array.q,udf_split.q,udf_struct.q,udf_substr.q,udf_translate.q,udf_union.q,udf_xpath.q,udtf_stack.q,view.q,virtual_column.q</beeline.positive.exclude>
</properties>
Modified:
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java?rev=1551420&r1=1551419&r2=1551420&view=diff
==============================================================================
---
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
(original)
+++
hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DummyStoreOperator.java
Tue Dec 17 00:39:01 2013
@@ -25,6 +25,8 @@ import org.apache.hadoop.hive.ql.metadat
import org.apache.hadoop.hive.ql.plan.DummyStoreDesc;
import org.apache.hadoop.hive.ql.plan.api.OperatorType;
import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject;
+import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils;
+import
org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption;
/**
* For SortMerge joins, this is a dummy operator, which stores the row for the
@@ -71,15 +73,28 @@ public class DummyStoreOperator extends
@Override
protected void initializeOp(Configuration hconf) throws HiveException {
- outputObjInspector = inputObjInspectors[0];
+ /*
+ * The conversion to standard object inspector was necessitated by
HIVE-5973. The issue
+ * happens when a select operator preceeds this operator as in the case of
a subquery. The
+ * select operator does not allocate a new object to hold the deserialized
row. This affects
+ * the operation of the SMB join which puts the object in a priority
queue. Since all elements
+ * of the priority queue point to the same object, the join was resulting
in incorrect
+ * results.
+ *
+ * So the fix is to make a copy of the object as done in the processOp
phase below. This
+ * however necessitates a change in the object inspector that can be used
in processing the
+ * row downstream.
+ */
+ outputObjInspector =
ObjectInspectorUtils.getStandardObjectInspector(inputObjInspectors[0]);
result = new InspectableObject(null, outputObjInspector);
initializeChildren(hconf);
}
@Override
public void processOp(Object row, int tag) throws HiveException {
- // Store the row
- result.o = row;
+ // Store the row. See comments above for why we need a new copy of the row.
+ result.o = ObjectInspectorUtils.copyToStandardObject(row,
inputObjInspectors[0],
+ ObjectInspectorCopyOption.WRITABLE);
}
@Override
Added: hive/trunk/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q?rev=1551420&view=auto
==============================================================================
--- hive/trunk/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q
(added)
+++ hive/trunk/ql/src/test/queries/clientpositive/auto_sortmerge_join_16.q Tue
Dec 17 00:39:01 2013
@@ -0,0 +1,92 @@
+set hive.auto.convert.join=true;
+
+set hive.exec.dynamic.partition.mode=nonstrict;
+set hive.enforce.bucketing=true;
+set hive.enforce.sorting=true;
+
+set hive.auto.convert.sortmerge.join=true;
+set hive.optimize.bucketmapjoin = true;
+set hive.optimize.bucketmapjoin.sortedmerge = true;
+
+CREATE TABLE stage_bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (file_tag STRING);
+
+CREATE TABLE bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (day STRING, pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile;
+
+CREATE TABLE stage_bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (file_tag STRING);
+
+CREATE TABLE bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile;
+
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite
into table stage_bucket_small partition (file_tag='1');
+load data local inpath '../../data/files/srcsortbucket1outof4.txt' overwrite
into table stage_bucket_small partition (file_tag='2');
+
+insert overwrite table bucket_small partition(pri)
+select
+key,
+value,
+file_tag as pri
+from
+stage_bucket_small
+where file_tag between 1 and 2;
+
+load data local inpath '../../data/files/smallsrcsortbucket1outof4.txt'
overwrite into table stage_bucket_big partition (file_tag='1');
+
+insert overwrite table bucket_big partition(day,pri)
+select
+key,
+value,
+'day1' as day,
+1 as pri
+from
+stage_bucket_big
+where
+file_tag='1';
+
+select
+a.key ,
+a.value ,
+b.value ,
+'day1' as day,
+1 as pri
+from
+(
+select
+key,
+value
+from bucket_big where day='day1'
+) a
+left outer join
+(
+select
+key,
+value
+from bucket_small
+where pri between 1 and 2
+) b
+on
+(a.key = b.key)
+;
+
Added:
hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out
URL:
http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out?rev=1551420&view=auto
==============================================================================
--- hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out
(added)
+++ hive/trunk/ql/src/test/results/clientpositive/auto_sortmerge_join_16.q.out
Tue Dec 17 00:39:01 2013
@@ -0,0 +1,248 @@
+PREHOOK: query: CREATE TABLE stage_bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (file_tag STRING)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE stage_bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (file_tag STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@stage_bucket_big
+PREHOOK: query: CREATE TABLE bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (day STRING, pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE bucket_big
+(
+key BIGINT,
+value STRING
+)
+PARTITIONED BY (day STRING, pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@bucket_big
+PREHOOK: query: CREATE TABLE stage_bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (file_tag STRING)
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE stage_bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (file_tag STRING)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@stage_bucket_small
+PREHOOK: query: CREATE TABLE bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+PREHOOK: type: CREATETABLE
+POSTHOOK: query: CREATE TABLE bucket_small
+(
+key BIGINT,
+value string
+)
+PARTITIONED BY (pri bigint)
+clustered by (key) sorted by (key) into 12 buckets
+stored as RCFile
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: default@bucket_small
+PREHOOK: query: load data local inpath
'../../data/files/srcsortbucket1outof4.txt' overwrite into table
stage_bucket_small partition (file_tag='1')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@stage_bucket_small
+POSTHOOK: query: load data local inpath
'../../data/files/srcsortbucket1outof4.txt' overwrite into table
stage_bucket_small partition (file_tag='1')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@stage_bucket_small
+POSTHOOK: Output: default@stage_bucket_small@file_tag=1
+PREHOOK: query: load data local inpath
'../../data/files/srcsortbucket1outof4.txt' overwrite into table
stage_bucket_small partition (file_tag='2')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@stage_bucket_small
+POSTHOOK: query: load data local inpath
'../../data/files/srcsortbucket1outof4.txt' overwrite into table
stage_bucket_small partition (file_tag='2')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@stage_bucket_small
+POSTHOOK: Output: default@stage_bucket_small@file_tag=2
+PREHOOK: query: insert overwrite table bucket_small partition(pri)
+select
+key,
+value,
+file_tag as pri
+from
+stage_bucket_small
+where file_tag between 1 and 2
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stage_bucket_small
+PREHOOK: Input: default@stage_bucket_small@file_tag=1
+PREHOOK: Input: default@stage_bucket_small@file_tag=2
+PREHOOK: Output: default@bucket_small
+POSTHOOK: query: insert overwrite table bucket_small partition(pri)
+select
+key,
+value,
+file_tag as pri
+from
+stage_bucket_small
+where file_tag between 1 and 2
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stage_bucket_small
+POSTHOOK: Input: default@stage_bucket_small@file_tag=1
+POSTHOOK: Input: default@stage_bucket_small@file_tag=2
+POSTHOOK: Output: default@bucket_small@pri=1
+POSTHOOK: Output: default@bucket_small@pri=2
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+PREHOOK: query: load data local inpath
'../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table
stage_bucket_big partition (file_tag='1')
+PREHOOK: type: LOAD
+PREHOOK: Output: default@stage_bucket_big
+POSTHOOK: query: load data local inpath
'../../data/files/smallsrcsortbucket1outof4.txt' overwrite into table
stage_bucket_big partition (file_tag='1')
+POSTHOOK: type: LOAD
+POSTHOOK: Output: default@stage_bucket_big
+POSTHOOK: Output: default@stage_bucket_big@file_tag=1
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+PREHOOK: query: insert overwrite table bucket_big partition(day,pri)
+select
+key,
+value,
+'day1' as day,
+1 as pri
+from
+stage_bucket_big
+where
+file_tag='1'
+PREHOOK: type: QUERY
+PREHOOK: Input: default@stage_bucket_big
+PREHOOK: Input: default@stage_bucket_big@file_tag=1
+PREHOOK: Output: default@bucket_big
+POSTHOOK: query: insert overwrite table bucket_big partition(day,pri)
+select
+key,
+value,
+'day1' as day,
+1 as pri
+from
+stage_bucket_big
+where
+file_tag='1'
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@stage_bucket_big
+POSTHOOK: Input: default@stage_bucket_big@file_tag=1
+POSTHOOK: Output: default@bucket_big@day=day1/pri=1
+POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).key SIMPLE
[(stage_bucket_big)stage_bucket_big.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).value SIMPLE
[(stage_bucket_big)stage_bucket_big.FieldSchema(name:value, type:string,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+PREHOOK: query: select
+a.key ,
+a.value ,
+b.value ,
+'day1' as day,
+1 as pri
+from
+(
+select
+key,
+value
+from bucket_big where day='day1'
+) a
+left outer join
+(
+select
+key,
+value
+from bucket_small
+where pri between 1 and 2
+) b
+on
+(a.key = b.key)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@bucket_big
+PREHOOK: Input: default@bucket_big@day=day1/pri=1
+PREHOOK: Input: default@bucket_small
+PREHOOK: Input: default@bucket_small@pri=1
+PREHOOK: Input: default@bucket_small@pri=2
+#### A masked pattern was here ####
+POSTHOOK: query: select
+a.key ,
+a.value ,
+b.value ,
+'day1' as day,
+1 as pri
+from
+(
+select
+key,
+value
+from bucket_big where day='day1'
+) a
+left outer join
+(
+select
+key,
+value
+from bucket_small
+where pri between 1 and 2
+) b
+on
+(a.key = b.key)
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@bucket_big
+POSTHOOK: Input: default@bucket_big@day=day1/pri=1
+POSTHOOK: Input: default@bucket_small
+POSTHOOK: Input: default@bucket_small@pri=1
+POSTHOOK: Input: default@bucket_small@pri=2
+#### A masked pattern was here ####
+POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).key SIMPLE
[(stage_bucket_big)stage_bucket_big.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_big PARTITION(day=day1,pri=1).value SIMPLE
[(stage_bucket_big)stage_bucket_big.FieldSchema(name:value, type:string,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=1).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).key SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:key, type:bigint,
comment:null), ]
+POSTHOOK: Lineage: bucket_small PARTITION(pri=2).value SIMPLE
[(stage_bucket_small)stage_bucket_small.FieldSchema(name:value, type:string,
comment:null), ]
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+0 val_0 val_0 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+169 val_169 val_169 day1 1
+374 val_374 val_374 day1 1
+374 val_374 val_374 day1 1
+172 val_172 val_172 day1 1
+172 val_172 val_172 day1 1
+172 val_172 val_172 day1 1
+172 val_172 val_172 day1 1
+103 val_103 val_103 day1 1
+103 val_103 val_103 day1 1
+103 val_103 val_103 day1 1
+103 val_103 val_103 day1 1