This is an automated email from the ASF dual-hosted git repository.

zhangduo pushed a commit to branch branch-2.5
in repository https://gitbox.apache.org/repos/asf/hbase.git


The following commit(s) were added to refs/heads/branch-2.5 by this push:
     new 931c72f770b HBASE-28124 Missing fields in Scan.toJSON (#5678)
931c72f770b is described below

commit 931c72f770bdf2580c6d6f272879a40f380b905c
Author: chandrasekhar-188k 
<154109917+chandrasekhar-1...@users.noreply.github.com>
AuthorDate: Tue Mar 19 18:56:28 2024 +0530

    HBASE-28124 Missing fields in Scan.toJSON (#5678)
    
    Signed-off-by: Pankaj Kumar < pankajku...@apache.org>
    Signed-off-by: Rajeshbabu Chintaguntla <rajeshb...@apache.org>
    Signed-off-by: Duo Zhang <zhang...@apache.org>
    (cherry picked from commit 0763a740960f7cbb177abd596d9cb203aaf5f025)
---
 RELEASENOTES.md                                    |  2 +-
 .../java/org/apache/hadoop/hbase/client/Scan.java  | 31 ++++++-
 .../apache/hadoop/hbase/client/TestOperation.java  | 98 ++++++++++++++++++++++
 3 files changed, 129 insertions(+), 2 deletions(-)

diff --git a/RELEASENOTES.md b/RELEASENOTES.md
index 55e4421ee45..404d7cda0aa 100644
--- a/RELEASENOTES.md
+++ b/RELEASENOTES.md
@@ -25,7 +25,7 @@ These release notes cover new developer and user-facing 
incompatibilities, impor
 
 * [HBASE-28204](https://issues.apache.org/jira/browse/HBASE-28204) | *Major* | 
**Region Canary can take lot more time If any region (except the first region) 
starts with delete markers**
 
-Canary is using Scan for first region of the table and Get for rest of the 
region. RAW Scan was only enabled for first region of any table. If a region 
has high number of deleted rows for the first row of the key-space, then It can 
take really long time for Get to finish execution. 
+Canary is using Scan for first region of the table and Get for rest of the 
region. RAW Scan was only enabled for first region of any table. If a region 
has high number of deleted rows for the first row of the key-space, then It can 
take really long time for Get to finish execution.
 
 With this change, Region canary will use scan to validate that every region is 
accessible and also enables RAW Scan if it's enabled by the user.
 
diff --git 
a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java 
b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
index cdc467d9ca9..74138d9e29f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java
@@ -25,6 +25,7 @@ import java.util.Map;
 import java.util.NavigableSet;
 import java.util.TreeMap;
 import java.util.TreeSet;
+import java.util.stream.Collectors;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
 import org.apache.hadoop.hbase.filter.Filter;
@@ -904,7 +905,7 @@ public class Scan extends Query {
    */
   @Override
   public Map<String, Object> toMap(int maxCols) {
-    // start with the fingerpring map and build on top of it
+    // start with the fingerprint map and build on top of it
     Map<String, Object> map = getFingerprint();
     // map from families to column list replaces fingerprint's list of families
     Map<String, List<String>> familyColumns = new HashMap<>();
@@ -952,6 +953,34 @@ public class Scan extends Query {
     if (getId() != null) {
       map.put("id", getId());
     }
+    map.put("includeStartRow", includeStartRow);
+    map.put("includeStopRow", includeStopRow);
+    map.put("allowPartialResults", allowPartialResults);
+    map.put("storeLimit", storeLimit);
+    map.put("storeOffset", storeOffset);
+    map.put("reversed", reversed);
+    if (null != asyncPrefetch) {
+      map.put("asyncPrefetch", asyncPrefetch);
+    }
+    map.put("mvccReadPoint", mvccReadPoint);
+    map.put("limit", limit);
+    map.put("readType", readType);
+    map.put("needCursorResult", needCursorResult);
+    map.put("targetReplicaId", targetReplicaId);
+    map.put("consistency", consistency);
+    if (!colFamTimeRangeMap.isEmpty()) {
+      Map<String, List<Long>> colFamTimeRangeMapStr = 
colFamTimeRangeMap.entrySet().stream()
+        .collect(Collectors.toMap((e) -> Bytes.toStringBinary(e.getKey()), e 
-> {
+          TimeRange value = e.getValue();
+          List<Long> rangeList = new ArrayList<>();
+          rangeList.add(value.getMin());
+          rangeList.add(value.getMax());
+          return rangeList;
+        }));
+
+      map.put("colFamTimeRangeMap", colFamTimeRangeMapStr);
+    }
+    map.put("priority", getPriority());
     return map;
   }
 
diff --git 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java
index a4552f1a407..6725f161f20 100644
--- 
a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java
+++ 
b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java
@@ -69,6 +69,9 @@ import org.junit.experimental.categories.Category;
 
 import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken;
 import org.apache.hbase.thirdparty.com.google.gson.Gson;
+import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder;
+import org.apache.hbase.thirdparty.com.google.gson.LongSerializationPolicy;
+import org.apache.hbase.thirdparty.com.google.gson.ToNumberPolicy;
 
 /**
  * Run tests that use the functionality of the Operation superclass for Puts, 
Gets, Deletes, Scans,
@@ -345,6 +348,101 @@ public class TestOperation {
       kvMap.get("qualifier"));
   }
 
+  /**
+   * Test the client Scan Operations' JSON encoding to ensure that produced 
JSON is parseable and
+   * that the details are present and not corrupted.
+   * @throws IOException if the JSON conversion fails
+   */
+  @Test
+  public void testScanOperationToJSON() throws IOException {
+    // produce a Scan Operation
+    Scan scan = new Scan().withStartRow(ROW, true);
+    scan.addColumn(FAMILY, QUALIFIER);
+    scan.withStopRow(ROW, true);
+    scan.readVersions(5);
+    scan.setBatch(10);
+    scan.setAllowPartialResults(true);
+    scan.setMaxResultsPerColumnFamily(3);
+    scan.setRowOffsetPerColumnFamily(8);
+    scan.setCaching(20);
+    scan.setMaxResultSize(50);
+    scan.setCacheBlocks(true);
+    scan.setReversed(true);
+    scan.setTimeRange(1000, 2000);
+    scan.setAsyncPrefetch(true);
+    scan.setMvccReadPoint(123);
+    scan.setLimit(5);
+    scan.setReadType(Scan.ReadType.PREAD);
+    scan.setNeedCursorResult(true);
+    scan.setFilter(SCV_FILTER);
+    scan.setReplicaId(1);
+    scan.setConsistency(Consistency.STRONG);
+    scan.setLoadColumnFamiliesOnDemand(true);
+    scan.setColumnFamilyTimeRange(FAMILY, 2000, 3000);
+    scan.setPriority(10);
+
+    // get its JSON representation, and parse it
+    String json = scan.toJSON();
+    Type typeOfHashMap = new TypeToken<Map<String, Object>>() {
+    }.getType();
+    Gson gson = new 
GsonBuilder().setLongSerializationPolicy(LongSerializationPolicy.STRING)
+      .setObjectToNumberStrategy(ToNumberPolicy.LONG_OR_DOUBLE).create();
+    Map<String, Object> parsedJSON = gson.fromJson(json, typeOfHashMap);
+    // check for the row
+    assertEquals("startRow incorrect in Scan.toJSON()", 
Bytes.toStringBinary(ROW),
+      parsedJSON.get("startRow"));
+    // check for the family and the qualifier.
+    List familyInfo = (List) ((Map) 
parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY));
+    assertNotNull("Family absent in Scan.toJSON()", familyInfo);
+    assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size());
+    assertEquals("Qualifier incorrect in Scan.toJSON()", 
Bytes.toStringBinary(QUALIFIER),
+      familyInfo.get(0));
+    assertEquals("stopRow incorrect in Scan.toJSON()", 
Bytes.toStringBinary(ROW),
+      parsedJSON.get("stopRow"));
+    assertEquals("includeStartRow incorrect in Scan.toJSON()", true,
+      parsedJSON.get("includeStartRow"));
+    assertEquals("includeStopRow incorrect in Scan.toJSON()", true,
+      parsedJSON.get("includeStopRow"));
+    assertEquals("maxVersions incorrect in Scan.toJSON()", 5L, 
parsedJSON.get("maxVersions"));
+    assertEquals("batch incorrect in Scan.toJSON()", 10L, 
parsedJSON.get("batch"));
+    assertEquals("allowPartialResults incorrect in Scan.toJSON()", true,
+      parsedJSON.get("allowPartialResults"));
+    assertEquals("storeLimit incorrect in Scan.toJSON()", 3L, 
parsedJSON.get("storeLimit"));
+    assertEquals("storeOffset incorrect in Scan.toJSON()", 8L, 
parsedJSON.get("storeOffset"));
+    assertEquals("caching incorrect in Scan.toJSON()", 20L, 
parsedJSON.get("caching"));
+    assertEquals("maxResultSize incorrect in Scan.toJSON()", "50", 
parsedJSON.get("maxResultSize"));
+    assertEquals("cacheBlocks incorrect in Scan.toJSON()", true, 
parsedJSON.get("cacheBlocks"));
+    assertEquals("reversed incorrect in Scan.toJSON()", true, 
parsedJSON.get("reversed"));
+    List trList = (List) parsedJSON.get("timeRange");
+    assertEquals("timeRange incorrect in Scan.toJSON()", 2, trList.size());
+    assertEquals("timeRange incorrect in Scan.toJSON()", "1000", 
trList.get(0));
+    assertEquals("timeRange incorrect in Scan.toJSON()", "2000", 
trList.get(1));
+
+    assertEquals("asyncPrefetch incorrect in Scan.toJSON()", true, 
parsedJSON.get("asyncPrefetch"));
+    assertEquals("mvccReadPoint incorrect in Scan.toJSON()", "123",
+      parsedJSON.get("mvccReadPoint"));
+    assertEquals("limit incorrect in Scan.toJSON()", 5L, 
parsedJSON.get("limit"));
+    assertEquals("readType incorrect in Scan.toJSON()", "PREAD", 
parsedJSON.get("readType"));
+    assertEquals("needCursorResult incorrect in Scan.toJSON()", true,
+      parsedJSON.get("needCursorResult"));
+
+    Map colFamTimeRange = (Map) parsedJSON.get("colFamTimeRangeMap");
+    assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", 1L, 
colFamTimeRange.size());
+    List testFamily = (List) colFamTimeRange.get("testFamily");
+    assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", 2L, 
testFamily.size());
+    assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", "2000", 
testFamily.get(0));
+    assertEquals("colFamTimeRangeMap incorrect in Scan.toJSON()", "3000", 
testFamily.get(1));
+
+    assertEquals("targetReplicaId incorrect in Scan.toJSON()", 1L,
+      parsedJSON.get("targetReplicaId"));
+    assertEquals("consistency incorrect in Scan.toJSON()", "STRONG", 
parsedJSON.get("consistency"));
+    assertEquals("loadColumnFamiliesOnDemand incorrect in Scan.toJSON()", true,
+      parsedJSON.get("loadColumnFamiliesOnDemand"));
+
+    assertEquals("priority incorrect in Scan.toJSON()", 10L, 
parsedJSON.get("priority"));
+
+  }
+
   @Test
   public void testPutCreationWithByteBuffer() {
     Put p = new Put(ROW);

Reply via email to