This is an automated email from the ASF dual-hosted git repository.

mgergely pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new f49d257  HIVE-23547 Enforce testconfiguration.properties file format 
and alphabetical order (Miklos Gergely, reviewed by Laszlo Bodor)
f49d257 is described below

commit f49d257c560c81c38259e95023b20c544acb4d10
Author: miklosgergely <[email protected]>
AuthorDate: Mon May 25 14:00:13 2020 +0200

    HIVE-23547 Enforce testconfiguration.properties file format and 
alphabetical order (Miklos Gergely, reviewed by Laszlo Bodor)
---
 itests/bin/validateTestConfiguration.sh            |  60 ++++
 itests/pom.xml                                     |  25 ++
 .../test/resources/testconfiguration.properties    | 304 ++++++++++-----------
 .../clientpositive/{tez-tag.q => tez_tag.q}        |   0
 .../tez/{tez-tag.q.out => tez_tag.q.out}           |   0
 5 files changed, 237 insertions(+), 152 deletions(-)

diff --git a/itests/bin/validateTestConfiguration.sh 
b/itests/bin/validateTestConfiguration.sh
new file mode 100755
index 0000000..6d57520
--- /dev/null
+++ b/itests/bin/validateTestConfiguration.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+echo "Validating testconfiguration.properties format"
+
+HIVE_ROOT=$1
+export LC_ALL=C
+
+state="out"
+row=0
+last_test_name=
+group=
+while IFS= read -r line; do
+  row=$((row+1))
+  if [ "$state" == "out" ]; then
+    [ -z "$line" ] && continue
+    [[ $line == \#* ]] && continue
+
+    parts=(${line//=/ })
+    if [[ ${#parts[@]} != 2 ]]; then
+      echo "group declaration should contain exactly one '=', but in row $row: 
'$line'"
+      exit 1
+    fi
+
+    group=${parts[0]}
+    last_test_name=
+    state="in"
+  else
+    if ! [[ "$line" =~ [[:space:]][[:space:]]* ]]; then
+      echo "lines within group should start with two spaces, but in row $row:  
'$line'"
+      exit 1
+    fi
+
+    file=${line:2}
+    if [[ ${line: -2} == ",\\" ]]; then
+      file=${file%??}
+    else
+      state="out"
+    fi
+
+    if ! [[ ${file: -2} == ".q" ]]; then
+      echo "file name should end with '.q', but in row $row: '$line'"
+      exit 1
+    fi
+
+    test_name=${file%??}
+    if [[ "$test_name" = *[^a-zA-Z0-9_]* ]]; then
+      echo "test name should contain only letters, numbers and '_' characters, 
but in row $row: '$line'"
+      exit 1
+    fi
+
+    if [[ $last_test_name > $test_name ]]; then
+      echo "files should be in alphabetic order within group, but in group 
$group in row $row: $test_name < $last_test_name "
+      exit 1
+    fi
+
+    last_test_name=$test_name
+  fi
+done < $HIVE_ROOT/itests/src/test/resources/testconfiguration.properties
+
+echo "Validation of testconfiguration.properties finished successfully"
diff --git a/itests/pom.xml b/itests/pom.xml
index d4fb252..faadce3 100644
--- a/itests/pom.xml
+++ b/itests/pom.xml
@@ -482,4 +482,29 @@
     </profile>
   </profiles>
 
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>validate testconfiguration.properties</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <exec executable="bash" dir="${basedir}" failonerror="true">
+                  <arg 
line="${hive.path.to.root}/itests/bin/validateTestConfiguration.sh"/>
+                  <arg line="${hive.path.to.root}"/>
+                </exec>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+    </plugins>
+  </build>
 </project>
diff --git a/itests/src/test/resources/testconfiguration.properties 
b/itests/src/test/resources/testconfiguration.properties
index 92ae8c2..1fd09eb 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -14,18 +14,18 @@ minitez.query.files.shared=\
 minitez.query.files=\
   acid_vectorization_original_tez.q,\
   delete_orig_table.q,\
-  explainuser_3.q,\
   explainanalyze_1.q,\
   explainanalyze_3.q,\
   explainanalyze_4.q,\
   explainanalyze_5.q,\
+  explainuser_3.q,\
   multi_count_distinct.q,\
   orc_merge12.q,\
   orc_vectorization_ppd.q,\
-  tez-tag.q,\
-  tez_union_with_udf.q,\
-  tez_union_udtf.q,\
   tez_complextype_with_null.q,\
+  tez_tag.q,\
+  tez_union_udtf.q,\
+  tez_union_with_udf.q,\
   update_orig_table.q,\
   vector_join_part_col_char.q,\
   vector_non_string_partition.q
@@ -37,89 +37,31 @@ minillap.query.files=\
   add_part_with_loc.q,\
   alter_table_location2.q,\
   alter_table_location3.q,\
+  autoColumnStats_6.q,\
+  autogen_colalias.q,\
+  binary_output_format.q,\
   bucket5.q,\
   bucket6.q,\
+  compressed_skip_header_footer_aggr.q,\
+  create_genericudaf.q,\
+  create_udaf.q,\
+  create_view.q,\
   cte_2.q,\
   cte_4.q,\
+  cttl.q,\
+  dynamic_partition_pruning_2.q,\
   dynamic_semijoin_user_level.q,\
+  dynpart_cast.q,\
+  empty_dir_in_table.q,\
   except_distinct.q,\
   explainuser_2.q,\
-  empty_dir_in_table.q,\
-  insert_into1.q,\
-  insert_into2.q,\
-  intersect_all.q,\
-  intersect_distinct.q,\
-  intersect_merge.q,\
-  llap_udf.q,\
-  llapdecider.q,\
-  mapreduce1.q,\
-  mapreduce2.q,\
-  mm_all.q,\
-  mm_cttas.q,\
-  mm_dp.q,\
-  orc_merge1.q,\
-  orc_merge10.q,\
-  orc_merge2.q,\
-  orc_merge3.q,\
-  orc_merge4.q,\
-  orc_merge_diff_fs.q,\
-  orc_struct_type_vectorization.q,\
-  parallel_colstats.q,\
-  parquet_types_vectorization.q,\
-  parquet_complex_types_vectorization.q,\
-  parquet_map_type_vectorization.q,\
-  parquet_struct_type_vectorization.q,\
-  reduce_deduplicate.q,\
-  reduce_deduplicate_distinct.q,\
-  remote_script.q,\
-  file_with_header_footer.q,\
-  skip_header_footer_aggr.q,\
-  skip_header_footer_proj.q,\
-  compressed_skip_header_footer_aggr.q,\
   external_table_purge.q,\
   external_table_with_space_in_location_path.q,\
-  import_exported_table.q,\
-  orc_llap_counters.q,\
-  orc_llap_counters1.q,\
-  load_hdfs_file_with_space_in_the_name.q,\
-  orc_ppd_basic.q,\
-  schemeAuthority.q,\
-  schemeAuthority2.q,\
-  temp_table_external.q,\
-  temp_table_add_partition_with_location.q,\
-  table_nonprintable.q,\
-  llap_nullscan.q,\
-  rcfile_merge2.q,\
-  rcfile_merge3.q,\
-  rcfile_merge4.q,\
-  rcfile_createas1.q,\
-  orc_ppd_schema_evol_3a.q,\
-  global_limit.q,\
-  dynamic_partition_pruning_2.q,\
-  dynpart_cast.q,\
-  results_cache_diff_fs.q,\
-  results_cache_with_auth.q,\
-  tez_union_dynamic_partition.q,\
-  tez_union_dynamic_partition_2.q,\
-  tez_acid_union_dynamic_partition.q,\
-  tez_acid_union_dynamic_partition_2.q,\
-  unionDistinct_1.q,\
-  whroot_external1.q,\
-  load_fs2.q,\
-  llap_stats.q,\
-  multi_count_distinct_null.q,\
-  cttl.q,\
-  vector_offset_limit.q,\
-  temp_table_add_part_with_loc.q,\
-  temp_table_drop_partitions_filter4.q,\
-  autoColumnStats_6.q,\
-  autogen_colalias.q,\
-  binary_output_format.q,\
-  create_genericudaf.q,\
-  create_udaf.q,\
-  create_view.q,\
+  file_with_header_footer.q,\
   gen_udf_example_add10.q,\
+  global_limit.q,\
   groupby_bigdata.q,\
+  import_exported_table.q,\
   input14.q,\
   input14_limit.q,\
   input17.q,\
@@ -131,34 +73,89 @@ minillap.query.files=\
   input36.q,\
   input38.q,\
   input5.q,\
+  insert_into1.q,\
+  insert_into2.q,\
   insert_into3.q,\
   insert_into4.q,\
   insert_into5.q,\
   insert_into6.q,\
+  intersect_all.q,\
+  intersect_distinct.q,\
+  intersect_merge.q,\
+  llap_nullscan.q,\
+  llap_stats.q,\
+  llap_udf.q,\
+  llapdecider.q,\
   load_binary_data.q,\
+  load_fs2.q,\
+  load_hdfs_file_with_space_in_the_name.q,\
   macro_1.q,\
   macro_duplicate.q,\
+  mapreduce1.q,\
+  mapreduce2.q,\
   mapreduce3.q,\
   mapreduce4.q,\
   mapreduce7.q,\
   mapreduce8.q,\
   merge_test_dummy_operator.q,\
+  mm_all.q,\
+  mm_cttas.q,\
+  mm_dp.q,\
+  multi_count_distinct_null.q,\
   newline.q,\
   nonreserved_keywords_insert_into1.q,\
   nullscript.q,\
   orc_createas1.q,\
+  orc_llap_counters.q,\
+  orc_llap_counters1.q,\
+  orc_merge1.q,\
+  orc_merge10.q,\
+  orc_merge2.q,\
+  orc_merge3.q,\
+  orc_merge4.q,\
+  orc_merge_diff_fs.q,\
+  orc_ppd_basic.q,\
+  orc_ppd_schema_evol_3a.q,\
+  orc_struct_type_vectorization.q,\
+  parallel_colstats.q,\
+  parquet_complex_types_vectorization.q,\
+  parquet_map_type_vectorization.q,\
+  parquet_struct_type_vectorization.q,\
+  parquet_types_vectorization.q,\
   partcols1.q,\
   ppd_transform.q,\
   query_with_semi.q,\
   rcfile_bigdata.q,\
+  rcfile_createas1.q,\
+  rcfile_merge2.q,\
+  rcfile_merge3.q,\
+  rcfile_merge4.q,\
+  reduce_deduplicate.q,\
+  reduce_deduplicate_distinct.q,\
   regexp_extract.q,\
+  remote_script.q,\
+  results_cache_diff_fs.q,\
+  results_cache_with_auth.q,\
+  schemeAuthority.q,\
+  schemeAuthority2.q,\
   script_env_var1.q,\
   script_env_var2.q,\
   script_pipe.q,\
   scriptfile1.q,\
   select_transform_hint.q,\
+  skip_header_footer_aggr.q,\
+  skip_header_footer_proj.q,\
   str_to_map.q,\
+  table_nonprintable.q,\
+  temp_table_add_part_with_loc.q,\
+  temp_table_add_partition_with_location.q,\
+  temp_table_drop_partitions_filter4.q,\
+  temp_table_external.q,\
   temp_table_partcols1.q,\
+  tez_acid_union_dynamic_partition.q,\
+  tez_acid_union_dynamic_partition_2.q,\
+  tez_union_dynamic_partition.q,\
+  tez_union_dynamic_partition_2.q,\
   transform1.q,\
   transform2.q,\
   transform3.q,\
@@ -167,9 +164,12 @@ minillap.query.files=\
   udaf_sum_list.q,\
   udf_printf.q,\
   union23.q,\
+  unionDistinct_1.q,\
   union_script.q,\
   vector_custom_udf_configure.q,\
-  vector_udf3.q
+  vector_offset_limit.q,\
+  vector_udf3.q,\
+  whroot_external1.q
 
 mr.query.files=\
   archive_excludeHadoop20.q,\
@@ -292,22 +292,22 @@ mr.query.files=\
   windowing_windowspec.q
 
 encrypted.query.files=\
-  encryption_join_unencrypted_tbl.q,\
-  encryption_insert_partition_static.q,\
+  encryption_auto_purge_tables.q,\
+  encryption_ctas.q,\
+  encryption_drop_partition.q,\
+  encryption_drop_table.q,\
+  encryption_drop_table_in_encrypted_db.q,\
+  encryption_drop_view.q,\
   encryption_insert_partition_dynamic.q,\
+  encryption_insert_partition_static.q,\
+  encryption_insert_values.q,\
+  encryption_join_unencrypted_tbl.q,\
   encryption_join_with_different_encryption_keys.q,\
-  encryption_select_read_only_encrypted_tbl.q,\
-  encryption_select_read_only_unencrypted_tbl.q,\
   encryption_load_data_to_encrypted_tables.q,\
-  encryption_unencrypted_nonhdfs_external_tables.q,\
   encryption_move_tbl.q,\
-  encryption_drop_table.q,\
-  encryption_insert_values.q,\
-  encryption_drop_view.q,\
-  encryption_drop_partition.q,\
-  encryption_ctas.q,\
-  encryption_auto_purge_tables.q,\
-  encryption_drop_table_in_encrypted_db.q
+  encryption_select_read_only_encrypted_tbl.q,\
+  encryption_select_read_only_unencrypted_tbl.q,\
+  encryption_unencrypted_nonhdfs_external_tables.q
 
 # Queries ran by both MiniLlapLocal and Beeline
 beeline.query.files.shared=\
@@ -318,6 +318,7 @@ beeline.query.files.shared=\
 
 beeline.positive.include=\
   colstats_all_nulls.q,\
+  desc_table_formatted.q,\
   drop_with_concurrency.q,\
   escape_comments.q,\
   explain_outputs.q,\
@@ -331,20 +332,19 @@ beeline.positive.include=\
   smb_mapjoin_2.q,\
   smb_mapjoin_3.q,\
   smb_mapjoin_7.q,\
-  udf_unix_timestamp.q,\
-  desc_table_formatted.q
+  udf_unix_timestamp.q
 
 minimr.query.negative.files=\
   cluster_tasklog_retrieval.q,\
+  ct_noperm_loc.q,\
+  ctas_noperm_loc.q,\
   file_with_header_footer_negative.q,\
   local_mapred_error_cache.q,\
   mapreduce_stack_trace.q,\
   mapreduce_stack_trace_turnoff.q,\
   minimr_broken_pipe.q,\
   table_nonprintable_negative.q,\
-  udf_local_resource.q,\
-  ct_noperm_loc.q,\
-  ctas_noperm_loc.q
+  udf_local_resource.q
 
 # tests are sorted use: perl -pe 's@\\\s*\n@ @g' testconfiguration.properties \
 # | awk -F= '/spark.query.files/{print $2}' | perl -pe '[email protected] *, *@\n@g' \
@@ -354,34 +354,6 @@ spark.query.files=\
   alter_merge_orc.q,\
   alter_merge_stats_orc.q,\
   annotate_stats_join.q,\
-  parquet_vectorization_0.q ,\
-  parquet_vectorization_10.q ,\
-  parquet_vectorization_11.q ,\
-  parquet_vectorization_12.q ,\
-  parquet_vectorization_13.q ,\
-  parquet_vectorization_14.q ,\
-  parquet_vectorization_15.q ,\
-  parquet_vectorization_16.q ,\
-  parquet_vectorization_17.q ,\
-  parquet_vectorization_1.q ,\
-  parquet_vectorization_2.q ,\
-  parquet_vectorization_3.q ,\
-  parquet_vectorization_4.q ,\
-  parquet_vectorization_5.q ,\
-  parquet_vectorization_6.q ,\
-  parquet_vectorization_7.q ,\
-  parquet_vectorization_8.q ,\
-  parquet_vectorization_9.q ,\
-  parquet_vectorization_decimal_date.q ,\
-  parquet_vectorization_div0.q ,\
-  parquet_vectorization_limit.q ,\
-  parquet_vectorization_nested_udf.q ,\
-  parquet_vectorization_not.q ,\
-  parquet_vectorization_offset_limit.q ,\
-  parquet_vectorization_part_project.q ,\
-  parquet_vectorization_part.q ,\
-  parquet_vectorization_part_varchar.q ,\
-  parquet_vectorization_pushdown.q ,\
   auto_join0.q,\
   auto_join1.q,\
   auto_join10.q,\
@@ -565,7 +537,6 @@ spark.query.files=\
   groupby_sort_1_23.q,\
   groupby_sort_skew_1.q,\
   groupby_sort_skew_1_23.q,\
-  qroupby_limit_extrastep.q,\
   having.q,\
   identity_project_remove_skip.q,\
   index_auto_self_join.q,\
@@ -718,6 +689,34 @@ spark.query.files=\
   parallel_join0.q,\
   parallel_join1.q,\
   parquet_join.q,\
+  parquet_vectorization_0.q,\
+  parquet_vectorization_1.q,\
+  parquet_vectorization_10.q,\
+  parquet_vectorization_11.q,\
+  parquet_vectorization_12.q,\
+  parquet_vectorization_13.q,\
+  parquet_vectorization_14.q,\
+  parquet_vectorization_15.q,\
+  parquet_vectorization_16.q,\
+  parquet_vectorization_17.q,\
+  parquet_vectorization_2.q,\
+  parquet_vectorization_3.q,\
+  parquet_vectorization_4.q,\
+  parquet_vectorization_5.q,\
+  parquet_vectorization_6.q,\
+  parquet_vectorization_7.q,\
+  parquet_vectorization_8.q,\
+  parquet_vectorization_9.q,\
+  parquet_vectorization_decimal_date.q,\
+  parquet_vectorization_div0.q,\
+  parquet_vectorization_limit.q,\
+  parquet_vectorization_nested_udf.q,\
+  parquet_vectorization_not.q,\
+  parquet_vectorization_offset_limit.q,\
+  parquet_vectorization_part.q,\
+  parquet_vectorization_part_project.q,\
+  parquet_vectorization_part_varchar.q,\
+  parquet_vectorization_pushdown.q,\
   pcr.q,\
   ppd_gby_join.q,\
   ppd_join.q,\
@@ -741,6 +740,7 @@ spark.query.files=\
   ptf_register_tblfn.q,\
   ptf_seqfile.q,\
   ptf_streaming.q,\
+  qroupby_limit_extrastep.q,\
   rcfile_bigdata.q,\
   reduce_deduplicate_exclude_join.q,\
   router_join_ppr.q,\
@@ -970,9 +970,9 @@ spark.query.files=\
   vectorization_input_format_excludes.q,\
   vectorization_nested_udf.q,\
   vectorization_not.q,\
+  vectorization_parquet_projection.q,\
   vectorization_part.q,\
   vectorization_part_project.q,\
-  vectorization_parquet_projection.q,\
   vectorization_pushdown.q,\
   vectorization_short_regress.q,\
   vectorized_case.q,\
@@ -989,13 +989,15 @@ spark.query.files=\
 # Unlike "spark.query.files" above, these tests only run
 # under Spark engine and only use TestSparkCliDriver.
 spark.only.query.files=\
-  spark_union_merge.q,\
-  spark_combine_equivalent_work_2.q
+  spark_combine_equivalent_work_2.q,\
+  spark_union_merge.q
 
 # Unlike "miniSparkOnYarn.query.files" below, these tests only run
 # under Spark engine and only use TestMiniSparkOnYarnCliDriver.
 miniSparkOnYarn.only.query.files=\
+  dynamic_rdd_cache.q,\
   spark_combine_equivalent_work.q,\
+  spark_constprog_dpp.q,\
   spark_dynamic_partition_pruning.q,\
   spark_dynamic_partition_pruning_2.q,\
   spark_dynamic_partition_pruning_3.q,\
@@ -1004,17 +1006,15 @@ miniSparkOnYarn.only.query.files=\
   spark_dynamic_partition_pruning_6.q,\
   spark_dynamic_partition_pruning_7.q,\
   spark_dynamic_partition_pruning_mapjoin_only.q,\
-  spark_constprog_dpp.q,\
   spark_dynamic_partition_pruning_recursive_mapjoin.q,\
-  dynamic_rdd_cache.q,\
-  spark_multi_insert_parallel_orderby.q,\
-  spark_explainuser_1.q,\
-  spark_vectorized_dynamic_partition_pruning.q,\
-  spark_use_ts_stats_for_mapjoin.q,\
-  spark_use_op_stats.q,\
   spark_explain_groupbyshuffle.q,\
+  spark_explainuser_1.q,\
+  spark_in_process_launcher.q,\
+  spark_multi_insert_parallel_orderby.q,\
   spark_opt_shuffle_serde.q,\
-  spark_in_process_launcher.q
+  spark_use_op_stats.q,\
+  spark_use_ts_stats_for_mapjoin.q,\
+  spark_vectorized_dynamic_partition_pruning.q
 
 miniSparkOnYarn.query.files=\
   auto_sortmerge_join_16.q,\
@@ -1088,20 +1088,17 @@ spark.query.negative.files=\
 spark.only.query.negative.files=\
   spark_job_max_tasks.q,\
   spark_stage_max_tasks.q,\
-  spark_task_failure.q,\
   spark_submit_negative_executor_cores.q,\
-  spark_submit_negative_executor_memory.q
+  spark_submit_negative_executor_memory.q,\
+  spark_task_failure.q
 
 tez.perf.disabled.query.files=\
   mv_query44.q,\
   mv_query67.q
 
 spark.perf.disabled.query.files=\
-  query1b.q,\
-  query14.q,\
-  query64.q,\
-  cbo_query1.q,\
   cbo_ext_query1.q,\
+  cbo_query1.q,\
   cbo_query10.q,\
   cbo_query11.q,\
   cbo_query12.q,\
@@ -1199,30 +1196,33 @@ spark.perf.disabled.query.files=\
   cbo_query98.q,\
   cbo_query99.q,\
   mv_query44.q,\
-  mv_query67.q
+  mv_query67.q,\
+  query14.q,\
+  query1b.q,\
+  query64.q
 
 druid.query.files=\
-  druidmini_test1.q,\
-  druidmini_test_ts.q,\
-  druidmini_joins.q,\
-  druidmini_test_insert.q,\
-  druidmini_mv.q,\
   druid_materialized_view_rewrite_ssb.q,\
+  druid_timeseries.q,\
   druid_timestamptz.q,\
   druid_timestamptz2.q,\
   druid_topn.q,\
-  druid_timeseries.q,\
   druidmini_dynamic_partition.q,\
   druidmini_expressions.q,\
   druidmini_extractTime.q,\
-  druidmini_test_alter.q,\
   druidmini_floorTime.q,\
+  druidmini_joins.q,\
   druidmini_masking.q,\
-  druidmini_semijoin_reduction_all_types.q
+  druidmini_mv.q,\
+  druidmini_semijoin_reduction_all_types.q,\
+  druidmini_test1.q,\
+  druidmini_test_alter.q,\
+  druidmini_test_insert.q,\
+  druidmini_test_ts.q
 
 druid.kafka.query.files=\
-  druidkafkamini_basic.q,\
   druidkafkamini_avro.q,\
+  druidkafkamini_basic.q,\
   druidkafkamini_csv.q,\
   druidkafkamini_delimited.q
 
@@ -1235,5 +1235,5 @@ druid.llap.local.query.files=\
 # tests to be run only by TestErasureCodingHDFSCliDriver
 erasurecoding.only.query.files=\
   erasure_commands.q,\
-  erasure_simple.q,\
-  erasure_explain.q
+  erasure_explain.q,\
+  erasure_simple.q
diff --git a/ql/src/test/queries/clientpositive/tez-tag.q 
b/ql/src/test/queries/clientpositive/tez_tag.q
similarity index 100%
rename from ql/src/test/queries/clientpositive/tez-tag.q
rename to ql/src/test/queries/clientpositive/tez_tag.q
diff --git a/ql/src/test/results/clientpositive/tez/tez-tag.q.out 
b/ql/src/test/results/clientpositive/tez/tez_tag.q.out
similarity index 100%
rename from ql/src/test/results/clientpositive/tez/tez-tag.q.out
rename to ql/src/test/results/clientpositive/tez/tez_tag.q.out

Reply via email to