kasakrisz commented on code in PR #6089:
URL: https://github.com/apache/hive/pull/6089#discussion_r2560044635


##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2254,6 +2292,69 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
     }
   }
 
+  private ColumnStatisticsObj 
columnStatisticsObjWithAdjustedNDV(List<Object[]> list, int i,
+                                                                 boolean 
useDensityFunctionForNDVEstimation, double ndvTuner)
+          throws MetaException {
+    if (list.isEmpty()) {
+      return null;
+    }
+    ColumnStatisticsData data = new ColumnStatisticsData();
+    int j = i;
+    Object[] row = list.getFirst();
+    String colName = (String) row[j++];
+    String colType = (String) row[j++];
+    ColumnStatisticsObj cso = new ColumnStatisticsObj(colName, colType, data);
+    Object llow = row[j++];
+    Object lhigh = row[j++];
+    Object dlow = row[j++];
+    Object dhigh = row[j++];
+    Object declow = row[j++];
+    Object dechigh = row[j++];
+    Object nulls = row[j++];
+    Object dist = row[j++];
+    Object avglen = row[j++];
+    Object maxlen = row[j++];
+    Object trues = row[j++];
+    Object falses = row[j++];
+    Object sumLong = row[j++];
+    Object countLong = row[j++];
+    Object sumDouble = row[j++];
+    Object countDouble = row[j++];
+    Object sumDecimal = row[j++];
+    Object countDecimal = row[j++];
+    Object sumDist = row[j];
+    for (int k = 1; k < list.size(); k++) {

Review Comment:
   Can this be replaced with
   ```
   for (Object[] row : list)
   ```
   ?



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -1942,6 +1937,43 @@ private List<ColumnStatisticsObj> 
columnStatisticsObjForPartitionsBatch(String c
     }
   }
 
+  private Batchable<String, Object[]> jobsBatching(final String queryText0, 
final String catName, final String dbName,

Review Comment:
   Can this method be used for other than loading per partition column 
statistics? If not could you please rename it how more specific name.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2254,6 +2292,69 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
     }
   }
 
+  private ColumnStatisticsObj 
columnStatisticsObjWithAdjustedNDV(List<Object[]> list, int i,
+                                                                 boolean 
useDensityFunctionForNDVEstimation, double ndvTuner)
+          throws MetaException {
+    if (list.isEmpty()) {
+      return null;
+    }
+    ColumnStatisticsData data = new ColumnStatisticsData();
+    int j = i;
+    Object[] row = list.getFirst();
+    String colName = (String) row[j++];
+    String colType = (String) row[j++];
+    ColumnStatisticsObj cso = new ColumnStatisticsObj(colName, colType, data);
+    Object llow = row[j++];
+    Object lhigh = row[j++];
+    Object dlow = row[j++];
+    Object dhigh = row[j++];
+    Object declow = row[j++];
+    Object dechigh = row[j++];
+    Object nulls = row[j++];
+    Object dist = row[j++];
+    Object avglen = row[j++];
+    Object maxlen = row[j++];
+    Object trues = row[j++];
+    Object falses = row[j++];
+    Object sumLong = row[j++];
+    Object countLong = row[j++];
+    Object sumDouble = row[j++];
+    Object countDouble = row[j++];
+    Object sumDecimal = row[j++];
+    Object countDecimal = row[j++];
+    Object sumDist = row[j];
+    for (int k = 1; k < list.size(); k++) {
+      j = i + 2;
+      row = list.get(k);
+      llow = MetastoreDirectSqlUtils.min(llow, row[j++]);
+      lhigh = MetastoreDirectSqlUtils.max(lhigh, row[j++]);
+      dlow = MetastoreDirectSqlUtils.min(dlow, row[j++]);
+      dhigh = MetastoreDirectSqlUtils.max(dhigh, row[j++]);
+      declow = MetastoreDirectSqlUtils.min(declow, row[j++]);
+      dechigh = MetastoreDirectSqlUtils.max(dechigh, row[j++]);
+      nulls = MetastoreDirectSqlUtils.sum(nulls, row[j++]);
+      dist = MetastoreDirectSqlUtils.max(dist, row[j++]);
+      avglen = MetastoreDirectSqlUtils.max(avglen, row[j++]);
+      maxlen = MetastoreDirectSqlUtils.max(maxlen, row[j++]);
+      trues = MetastoreDirectSqlUtils.sum(trues, row[j++]);
+      falses = MetastoreDirectSqlUtils.sum(falses, row[j++]);
+      sumLong = MetastoreDirectSqlUtils.sum(sumLong, row[j++]);
+      countLong = MetastoreDirectSqlUtils.sum(countLong, row[j++]);
+      sumDouble = MetastoreDirectSqlUtils.sum(sumDouble, row[j++]);
+      countDouble = MetastoreDirectSqlUtils.sum(countDouble, row[j++]);
+      sumDecimal = MetastoreDirectSqlUtils.sum(sumDecimal, row[j++]);
+      countDecimal = MetastoreDirectSqlUtils.sum(countDecimal, row[j++]);
+      sumDist = MetastoreDirectSqlUtils.sum(sumDist, row[j]);

Review Comment:
   Please use constant indices instead of `j++`.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2098,29 +2121,39 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
         }
         // get sum for all columns to reduce the number of queries
         Map<String, Map<Integer, Object>> sumMap = new HashMap<String, 
Map<Integer, Object>>();
-        queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), 
sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
-            + " from " + PART_COL_STATS
-            + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-            + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-            + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-            + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = 
? and " + TBLS + ".\"TBL_NAME\" = ? "
-            + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(extraColumnNameTypeParts.size()) + ")"
-            + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-            + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
-            + " group by " + PART_COL_STATS + ".\"COLUMN_NAME\"";
-        start = doTrace ? System.nanoTime() : 0;
-        try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
+        queryText =
+            "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), 
sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
+                + " from " + PART_COL_STATS + " inner join " + PARTITIONS + " 
on " + PART_COL_STATS + ".\"PART_ID\" = "
+                + PARTITIONS + ".\"PART_ID\"" + " inner join " + TBLS + " on " 
+ PARTITIONS + ".\"TBL_ID\" = " + TBLS
+                + ".\"TBL_ID\"" + " inner join " + DBS + " on " + TBLS + 
".\"DB_ID\" = " + DBS + ".\"DB_ID\""
+                + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
+                + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(extraColumnNameTypeParts.size()) + ")"
+                + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")" + " and "
+                + PART_COL_STATS + ".\"ENGINE\" = ? " + " group by " + 
PART_COL_STATS + ".\"COLUMN_NAME\"";

Review Comment:
   Please restore the original formatting of the query.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2174,76 +2240,48 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
                 || IExtrapolatePartStatus.aggrTypes[colStatIndex] == 
IExtrapolatePartStatus.AggrType.Max) {
               // if the aggregation type is min/max, we extrapolate from the
               // left/right borders
-              if (!decimal) {
-                queryText = "select \"" + colStatName + "\",\"PART_NAME\" from 
" + PART_COL_STATS
-                    + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-                    + " inner join " + TBLS + " on " + PARTITIONS + 
".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\""
-                    + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + 
DBS + ".\"DB_ID\""
-                    + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
-                    + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" = ? "
-                    + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-                    + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
-                    + " order by \"" + colStatName + "\"";
-              } else {
-                queryText = "select \"" + colStatName + "\",\"PART_NAME\" from 
" + PART_COL_STATS
-                    + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-                    + " inner join " + TBLS + " on " + PARTITIONS + 
".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\""
-                    + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + 
DBS + ".\"DB_ID\""
-                    + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
-                    + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" = ? "
-                    + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-                    + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
-                    + " order by cast(\"" + colStatName + "\" as decimal)";
-              }
-              start = doTrace ? System.nanoTime() : 0;
-              try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
-                Object qResult = executeWithArray(query.getInnerQuery(),
-                    prepareParams(catName, dbName, tableName, partNames, 
Arrays.asList(colName), engine), queryText);
-                if (qResult == null) {
-                  return Collections.emptyList();
+              String orderByExpr = decimal ? "cast(\"" + colStatName + "\" as 
decimal)" : "\"" + colStatName + "\"";
+
+              queryText =
+                  "select \"" + colStatName + "\",\"PART_NAME\" from " + 
PART_COL_STATS + " inner join " + PARTITIONS
+                      + " on " + PART_COL_STATS + ".\"PART_ID\" = " + 
PARTITIONS + ".\"PART_ID\"" + " inner join "
+                      + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + 
".\"TBL_ID\"" + " inner join " + DBS
+                      + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\"" + 
" where " + DBS
+                      + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? and " 
+ TBLS + ".\"TBL_NAME\" = ? " + " and "
+                      + PART_COL_STATS + ".\"COLUMN_NAME\" in (%1$s)" + " and 
" + PARTITIONS
+                      + ".\"PART_NAME\" in (%2$s)" + " and " + PART_COL_STATS 
+ ".\"ENGINE\" = ? " + " order by "
+                      + orderByExpr;

Review Comment:
   Please format the query
   ```
   select
   from
   inner join
   ...
   where
   ```



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2098,29 +2121,39 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
         }
         // get sum for all columns to reduce the number of queries
         Map<String, Map<Integer, Object>> sumMap = new HashMap<String, 
Map<Integer, Object>>();
-        queryText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), 
sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
-            + " from " + PART_COL_STATS
-            + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-            + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-            + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-            + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = 
? and " + TBLS + ".\"TBL_NAME\" = ? "
-            + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(extraColumnNameTypeParts.size()) + ")"
-            + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-            + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
-            + " group by " + PART_COL_STATS + ".\"COLUMN_NAME\"";
-        start = doTrace ? System.nanoTime() : 0;
-        try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
+        queryText =
+            "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), 
sum(\"NUM_FALSES\"), sum(\"NUM_DISTINCTS\")"
+                + " from " + PART_COL_STATS + " inner join " + PARTITIONS + " 
on " + PART_COL_STATS + ".\"PART_ID\" = "
+                + PARTITIONS + ".\"PART_ID\"" + " inner join " + TBLS + " on " 
+ PARTITIONS + ".\"TBL_ID\" = " + TBLS
+                + ".\"TBL_ID\"" + " inner join " + DBS + " on " + TBLS + 
".\"DB_ID\" = " + DBS + ".\"DB_ID\""
+                + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? "
+                + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(extraColumnNameTypeParts.size()) + ")"
+                + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")" + " and "
+                + PART_COL_STATS + ".\"ENGINE\" = ? " + " group by " + 
PART_COL_STATS + ".\"COLUMN_NAME\"";
+
+        b = jobsBatching(queryText, catName, dbName, tableName, partNames, 
engine, doTrace);
+        try {
           List<String> extraColumnNames = new ArrayList<String>();
           extraColumnNames.addAll(extraColumnNameTypeParts.keySet());
-          Object qResult = executeWithArray(query.getInnerQuery(),
-              prepareParams(catName, dbName, tableName, partNames,
-                  extraColumnNames, engine), queryText);
-          if (qResult == null) {
-            return Collections.emptyList();
+          List<Object[]> unmergedList = Batchable.runBatched(batchSize, 
extraColumnNames, b);
+          Map<String, List<Object[]>> colSubList = 
columnWiseSubList(unmergedList);
+          List<Object[]> mergedList = new ArrayList<>();
+          for (Map.Entry<String, List<Object[]>> entry : 
colSubList.entrySet()) {

Review Comment:
   Can the list to map conversion avoided?



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -1942,6 +1937,43 @@ private List<ColumnStatisticsObj> 
columnStatisticsObjForPartitionsBatch(String c
     }
   }
 
+  private Batchable<String, Object[]> jobsBatching(final String queryText0, 
final String catName, final String dbName,
+      final String tableName, final List<String> partNames, final String 
engine, final boolean doTrace) {
+    return new Batchable<String, Object[]>() {
+      @Override
+      public List<Object[]> run(final List<String> inputColNames)
+          throws MetaException {
+        Batchable<String, Object[]> b2 = new Batchable<String, Object[]>() {

Review Comment:
   Instead of `b2`, please use a more specific name. How about 
`partitionNameBatches`? That way the variable name reflects that we are 
batching on the partition names”



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -1977,116 +2020,96 @@ private List<ColumnStatisticsObj> 
aggrStatsUseDB(String catName, String dbName,
         // And, we also guarantee that the estimation makes sense by comparing 
it to the
         // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
         // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
-        + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
-        + 
"avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
-        + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
-        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
-        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? ";
+        + "sum((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
+        + 
"count((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as 
decimal)),"
+        + 
"sum((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + 
"count((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + "sum((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "count((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" + " inner 
join " + PARTITIONS + " on "
+        + PART_COL_STATS + ".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + " 
inner join " + TBLS + " on " + PARTITIONS
+        + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\"" + " inner join " + DBS + " 
on " + TBLS + ".\"DB_ID\" = " + DBS
+        + ".\"DB_ID\"" + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS
+        + ".\"TBL_NAME\" = ? ";

Review Comment:
   Why was the formatting of the query changed? I think the original was easier 
to read.
   
   ```
   select
   from
   inner join
   ...
   where
   ```



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -1977,116 +2020,96 @@ private List<ColumnStatisticsObj> 
aggrStatsUseDB(String catName, String dbName,
         // And, we also guarantee that the estimation makes sense by comparing 
it to the
         // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
         // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
-        + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
-        + 
"avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
-        + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
-        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
-        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? ";
+        + "sum((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
+        + 
"count((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as 
decimal)),"
+        + 
"sum((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + 
"count((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + "sum((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "count((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" + " inner 
join " + PARTITIONS + " on "
+        + PART_COL_STATS + ".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + " 
inner join " + TBLS + " on " + PARTITIONS
+        + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\"" + " inner join " + DBS + " 
on " + TBLS + ".\"DB_ID\" = " + DBS
+        + ".\"DB_ID\"" + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS
+        + ".\"TBL_NAME\" = ? ";
     String queryText = null;
-    long start = 0;
-    long end = 0;
 
     boolean doTrace = LOG.isDebugEnabled();
     ForwardQueryResult<?> fqr = null;
     // Check if the status of all the columns of all the partitions exists
     // Extrapolation is not needed.
     if (areAllPartsFound) {
-      queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + 
makeParams(colNames.size()) + ")"
-          + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-          + " and \"ENGINE\" = ? "
-          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
-      start = doTrace ? System.nanoTime() : 0;
-      try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
-        Object qResult = executeWithArray(query.getInnerQuery(),
-            prepareParams(catName, dbName, tableName, partNames, colNames,
-                engine), queryText);
-        if (qResult == null) {
-          return Collections.emptyList();
-        }
-        end = doTrace ? System.nanoTime() : 0;
-        MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end);
-        List<Object[]> list = MetastoreDirectSqlUtils.ensureList(qResult);
-        List<ColumnStatisticsObj> colStats =
-            new ArrayList<ColumnStatisticsObj>(list.size());
-        for (Object[] row : list) {
-          colStats.add(prepareCSObjWithAdjustedNDV(row, 0,
-              useDensityFunctionForNDVEstimation, ndvTuner));
+      queryText = commonPrefix + " and \"COLUMN_NAME\" in (%1$s)" + " and " + 
PARTITIONS + ".\"PART_NAME\" in (%2$s)"
+          + " and \"ENGINE\" = ? " + " group by \"COLUMN_NAME\", 
\"COLUMN_TYPE\"";
+      Batchable<String, Object[]> b = jobsBatching(queryText, catName, dbName, 
tableName, partNames, engine, doTrace);
+      List<ColumnStatisticsObj> colStats = new ArrayList<>(colNames.size());
+      try {
+        List<Object[]> list = Batchable.runBatched(batchSize, colNames, b);
+        Map<String, List<Object[]>> colSubList = columnWiseSubList(list);
+        for (Map.Entry<String, List<Object[]>> entry : colSubList.entrySet()) {
+          colStats.add(columnStatisticsObjWithAdjustedNDV(entry.getValue(), 0, 
useDensityFunctionForNDVEstimation, ndvTuner));
           Deadline.checkTimeout();
         }
-        return colStats;
+      } finally {
+        b.closeAllQueries();
       }
+      return colStats;
     } else {
       // Extrapolation is needed for some columns.
       // In this case, at least a column status for a partition is missing.
       // We need to extrapolate this partition based on the other partitions
       List<ColumnStatisticsObj> colStats = new 
ArrayList<ColumnStatisticsObj>(colNames.size());
-      queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", 
count(\"PART_COL_STATS\".\"PART_ID\") "
-          + " from " + PART_COL_STATS
-          + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-          + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-          + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-          + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? "
-          + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(colNames.size()) + ")"
-          + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-          + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
-          + " group by " + PART_COL_STATS + ".\"COLUMN_NAME\", " + 
PART_COL_STATS + ".\"COLUMN_TYPE\"";
-      start = doTrace ? System.nanoTime() : 0;
+      queryText =
+          "select \"COLUMN_NAME\", \"COLUMN_TYPE\", 
count(\"PART_COL_STATS\".\"PART_ID\") " + " from " + PART_COL_STATS
+              + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+              + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " 
+ TBLS + ".\"TBL_ID\"" + " inner join "
+              + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\"" + " 
where " + DBS + ".\"CTLG_NAME\" = ? and "
+              + DBS + ".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? " + " 
and " + PART_COL_STATS
+              + ".\"COLUMN_NAME\" in (%1$s)" + " and " + PARTITIONS + 
".\"PART_NAME\" in (%2$s)" + " and "
+              + PART_COL_STATS + ".\"ENGINE\" = ? " + " group by " + 
PART_COL_STATS + ".\"COLUMN_NAME\", "
+              + PART_COL_STATS + ".\"COLUMN_TYPE\"";

Review Comment:
   Why was the formatting of the query changed? I think the original was easier 
to read.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -1977,116 +2020,96 @@ private List<ColumnStatisticsObj> 
aggrStatsUseDB(String catName, String dbName,
         // And, we also guarantee that the estimation makes sense by comparing 
it to the
         // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
         // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
-        + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
-        + 
"avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
-        + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
-        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
-        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? ";
+        + "sum((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
+        + 
"count((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as 
decimal)),"
+        + 
"sum((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + 
"count((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + "sum((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "count((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" + " inner 
join " + PARTITIONS + " on "
+        + PART_COL_STATS + ".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + " 
inner join " + TBLS + " on " + PARTITIONS
+        + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\"" + " inner join " + DBS + " 
on " + TBLS + ".\"DB_ID\" = " + DBS
+        + ".\"DB_ID\"" + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS
+        + ".\"TBL_NAME\" = ? ";
     String queryText = null;
-    long start = 0;
-    long end = 0;
 
     boolean doTrace = LOG.isDebugEnabled();
     ForwardQueryResult<?> fqr = null;
     // Check if the status of all the columns of all the partitions exists
     // Extrapolation is not needed.
     if (areAllPartsFound) {
-      queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + 
makeParams(colNames.size()) + ")"
-          + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-          + " and \"ENGINE\" = ? "
-          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
-      start = doTrace ? System.nanoTime() : 0;
-      try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
-        Object qResult = executeWithArray(query.getInnerQuery(),
-            prepareParams(catName, dbName, tableName, partNames, colNames,
-                engine), queryText);
-        if (qResult == null) {
-          return Collections.emptyList();
-        }
-        end = doTrace ? System.nanoTime() : 0;
-        MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end);
-        List<Object[]> list = MetastoreDirectSqlUtils.ensureList(qResult);
-        List<ColumnStatisticsObj> colStats =
-            new ArrayList<ColumnStatisticsObj>(list.size());
-        for (Object[] row : list) {
-          colStats.add(prepareCSObjWithAdjustedNDV(row, 0,
-              useDensityFunctionForNDVEstimation, ndvTuner));
+      queryText = commonPrefix + " and \"COLUMN_NAME\" in (%1$s)" + " and " + 
PARTITIONS + ".\"PART_NAME\" in (%2$s)"
+          + " and \"ENGINE\" = ? " + " group by \"COLUMN_NAME\", 
\"COLUMN_TYPE\"";
+      Batchable<String, Object[]> b = jobsBatching(queryText, catName, dbName, 
tableName, partNames, engine, doTrace);
+      List<ColumnStatisticsObj> colStats = new ArrayList<>(colNames.size());
+      try {
+        List<Object[]> list = Batchable.runBatched(batchSize, colNames, b);
+        Map<String, List<Object[]>> colSubList = columnWiseSubList(list);
+        for (Map.Entry<String, List<Object[]>> entry : colSubList.entrySet()) {

Review Comment:
   Could this list to map conversion be avoided? We could save memory in case 
of heavily partitioned tables with lots of columns.
   
   IIUC a column can be appear in multiple batches so the stats has to be 
merged. This is what `columnStatisticsObjWithAdjustedNDV` do. Can it be called 
more than once per column.
   
   



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2254,6 +2292,69 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
     }
   }
 
+  private ColumnStatisticsObj 
columnStatisticsObjWithAdjustedNDV(List<Object[]> list, int i,
+                                                                 boolean 
useDensityFunctionForNDVEstimation, double ndvTuner)
+          throws MetaException {
+    if (list.isEmpty()) {
+      return null;
+    }
+    ColumnStatisticsData data = new ColumnStatisticsData();
+    int j = i;
+    Object[] row = list.getFirst();
+    String colName = (String) row[j++];
+    String colType = (String) row[j++];
+    ColumnStatisticsObj cso = new ColumnStatisticsObj(colName, colType, data);
+    Object llow = row[j++];
+    Object lhigh = row[j++];
+    Object dlow = row[j++];
+    Object dhigh = row[j++];
+    Object declow = row[j++];
+    Object dechigh = row[j++];
+    Object nulls = row[j++];
+    Object dist = row[j++];
+    Object avglen = row[j++];
+    Object maxlen = row[j++];
+    Object trues = row[j++];
+    Object falses = row[j++];
+    Object sumLong = row[j++];
+    Object countLong = row[j++];
+    Object sumDouble = row[j++];
+    Object countDouble = row[j++];
+    Object sumDecimal = row[j++];
+    Object countDecimal = row[j++];

Review Comment:
   Please use constant indices instead of `j++`.



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -2254,6 +2292,69 @@ private List<ColumnStatisticsObj> aggrStatsUseDB(String 
catName, String dbName,
     }
   }
 
+  private ColumnStatisticsObj 
columnStatisticsObjWithAdjustedNDV(List<Object[]> list, int i,

Review Comment:
   Please use meaningful parameter names instead of `list` and `i`.
   The actual parameter value of `i` is always 0. Can it be removed?
   
   When does this method called? I attached a debugger but it never hit?
   ```
   mvn test -pl itests/qtest -Pitests -Dtest=TestTezTPCDS30TBPerfCliDriver  
-Dqfile=query16.q -Dhive.metastore.direct.sql.batch.size=1000 -Drat.skip 
-Dmaven.surefire.debug
   ```
   



##########
standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java:
##########
@@ -1977,116 +2020,96 @@ private List<ColumnStatisticsObj> 
aggrStatsUseDB(String catName, String dbName,
         // And, we also guarantee that the estimation makes sense by comparing 
it to the
         // UpperBound (calculated by "sum(\"NUM_DISTINCTS\")")
         // and LowerBound (calculated by "max(\"NUM_DISTINCTS\")")
-        + "avg((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
-        + 
"avg((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
-        + "avg((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
-        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + ""
-        + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-        + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-        + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-        + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? ";
+        + "sum((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" 
as decimal)),"
+        + 
"count((\"LONG_HIGH_VALUE\"-\"LONG_LOW_VALUE\")/cast(\"NUM_DISTINCTS\" as 
decimal)),"
+        + 
"sum((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + 
"count((\"DOUBLE_HIGH_VALUE\"-\"DOUBLE_LOW_VALUE\")/\"NUM_DISTINCTS\"),"
+        + "sum((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "count((cast(\"BIG_DECIMAL_HIGH_VALUE\" as 
decimal)-cast(\"BIG_DECIMAL_LOW_VALUE\" as decimal))/\"NUM_DISTINCTS\"),"
+        + "sum(\"NUM_DISTINCTS\")" + " from " + PART_COL_STATS + "" + " inner 
join " + PARTITIONS + " on "
+        + PART_COL_STATS + ".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\"" + " 
inner join " + TBLS + " on " + PARTITIONS
+        + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\"" + " inner join " + DBS + " 
on " + TBLS + ".\"DB_ID\" = " + DBS
+        + ".\"DB_ID\"" + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + 
".\"NAME\" = ? and " + TBLS
+        + ".\"TBL_NAME\" = ? ";
     String queryText = null;
-    long start = 0;
-    long end = 0;
 
     boolean doTrace = LOG.isDebugEnabled();
     ForwardQueryResult<?> fqr = null;
     // Check if the status of all the columns of all the partitions exists
     // Extrapolation is not needed.
     if (areAllPartsFound) {
-      queryText = commonPrefix + " and \"COLUMN_NAME\" in (" + 
makeParams(colNames.size()) + ")"
-          + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-          + " and \"ENGINE\" = ? "
-          + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\"";
-      start = doTrace ? System.nanoTime() : 0;
-      try (QueryWrapper query = new 
QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) {
-        Object qResult = executeWithArray(query.getInnerQuery(),
-            prepareParams(catName, dbName, tableName, partNames, colNames,
-                engine), queryText);
-        if (qResult == null) {
-          return Collections.emptyList();
-        }
-        end = doTrace ? System.nanoTime() : 0;
-        MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, end);
-        List<Object[]> list = MetastoreDirectSqlUtils.ensureList(qResult);
-        List<ColumnStatisticsObj> colStats =
-            new ArrayList<ColumnStatisticsObj>(list.size());
-        for (Object[] row : list) {
-          colStats.add(prepareCSObjWithAdjustedNDV(row, 0,
-              useDensityFunctionForNDVEstimation, ndvTuner));
+      queryText = commonPrefix + " and \"COLUMN_NAME\" in (%1$s)" + " and " + 
PARTITIONS + ".\"PART_NAME\" in (%2$s)"
+          + " and \"ENGINE\" = ? " + " group by \"COLUMN_NAME\", 
\"COLUMN_TYPE\"";
+      Batchable<String, Object[]> b = jobsBatching(queryText, catName, dbName, 
tableName, partNames, engine, doTrace);
+      List<ColumnStatisticsObj> colStats = new ArrayList<>(colNames.size());
+      try {
+        List<Object[]> list = Batchable.runBatched(batchSize, colNames, b);
+        Map<String, List<Object[]>> colSubList = columnWiseSubList(list);
+        for (Map.Entry<String, List<Object[]>> entry : colSubList.entrySet()) {
+          colStats.add(columnStatisticsObjWithAdjustedNDV(entry.getValue(), 0, 
useDensityFunctionForNDVEstimation, ndvTuner));
           Deadline.checkTimeout();
         }
-        return colStats;
+      } finally {
+        b.closeAllQueries();
       }
+      return colStats;
     } else {
       // Extrapolation is needed for some columns.
       // In this case, at least a column status for a partition is missing.
       // We need to extrapolate this partition based on the other partitions
       List<ColumnStatisticsObj> colStats = new 
ArrayList<ColumnStatisticsObj>(colNames.size());
-      queryText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", 
count(\"PART_COL_STATS\".\"PART_ID\") "
-          + " from " + PART_COL_STATS
-          + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
-          + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + 
TBLS + ".\"TBL_ID\""
-          + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + 
".\"DB_ID\""
-          + " where " + DBS + ".\"CTLG_NAME\" = ? and " + DBS + ".\"NAME\" = ? 
and " + TBLS + ".\"TBL_NAME\" = ? "
-          + " and " + PART_COL_STATS + ".\"COLUMN_NAME\" in (" + 
makeParams(colNames.size()) + ")"
-          + " and " + PARTITIONS + ".\"PART_NAME\" in (" + 
makeParams(partNames.size()) + ")"
-          + " and " + PART_COL_STATS + ".\"ENGINE\" = ? "
-          + " group by " + PART_COL_STATS + ".\"COLUMN_NAME\", " + 
PART_COL_STATS + ".\"COLUMN_TYPE\"";
-      start = doTrace ? System.nanoTime() : 0;
+      queryText =
+          "select \"COLUMN_NAME\", \"COLUMN_TYPE\", 
count(\"PART_COL_STATS\".\"PART_ID\") " + " from " + PART_COL_STATS
+              + " inner join " + PARTITIONS + " on " + PART_COL_STATS + 
".\"PART_ID\" = " + PARTITIONS + ".\"PART_ID\""
+              + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " 
+ TBLS + ".\"TBL_ID\"" + " inner join "
+              + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\"" + " 
where " + DBS + ".\"CTLG_NAME\" = ? and "
+              + DBS + ".\"NAME\" = ? and " + TBLS + ".\"TBL_NAME\" = ? " + " 
and " + PART_COL_STATS
+              + ".\"COLUMN_NAME\" in (%1$s)" + " and " + PARTITIONS + 
".\"PART_NAME\" in (%2$s)" + " and "
+              + PART_COL_STATS + ".\"ENGINE\" = ? " + " group by " + 
PART_COL_STATS + ".\"COLUMN_NAME\", "
+              + PART_COL_STATS + ".\"COLUMN_TYPE\"";
+
+      Batchable<String, Object[]> b = jobsBatching(queryText, catName, dbName, 
tableName, partNames, engine, doTrace);

Review Comment:
   Instead of `b` please use more specific variable name.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to