saintstack commented on a change in pull request #1737:
URL: https://github.com/apache/hbase/pull/1737#discussion_r431987175



##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
##########
@@ -2444,8 +2451,16 @@ public FlushResultImpl flushcache(boolean 
forceFlushAllStores, boolean writeFlus
       }
 
       try {
-        Collection<HStore> specificStoresToFlush =
+        Collection<HStore> specificStoresToFlush = null;
+        if (!forceFlushAllStores && families != null) {
+          specificStoresToFlush = new ArrayList<>();
+          for (byte[] family : families) {
+            specificStoresToFlush.add(stores.get(family));

Review comment:
       Should this family selection pass through the flushPolicy?

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
##########
@@ -2444,8 +2451,16 @@ public FlushResultImpl flushcache(boolean 
forceFlushAllStores, boolean writeFlus
       }
 
       try {
-        Collection<HStore> specificStoresToFlush =
+        Collection<HStore> specificStoresToFlush = null;
+        if (!forceFlushAllStores && families != null) {
+          specificStoresToFlush = new ArrayList<>();
+          for (byte[] family : families) {
+            specificStoresToFlush.add(stores.get(family));
+          }
+        }else{

Review comment:
       Be careful here. Make code look like rest of the file. Add space around 
the 'else'.

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
##########
@@ -639,26 +641,30 @@ public int getNumLogFiles() {
    * If the number of un-archived WAL files ('live' WALs) is greater than 
maximum allowed,
    * check the first (oldest) WAL, and return those regions which should be 
flushed so that
    * it can be let-go/'archived'.
-   * @return regions (encodedRegionNames) to flush in order to archive oldest 
WAL file.
+   * @return stores of regions (encodedRegionNames) to flush in order to 
archive oldest WAL file.
    */
-  byte[][] findRegionsToForceFlush() throws IOException {
-    byte[][] regions = null;
+  Map<byte[], List<byte[]>> findRegionsToForceFlush() throws IOException {
+    Map<byte[], List<byte[]>> regions = null;
     int logCount = getNumRolledLogFiles();
     if (logCount > this.maxLogs && logCount > 0) {
       Map.Entry<Path, WalProps> firstWALEntry = 
this.walFile2Props.firstEntry();
       regions =
         
this.sequenceIdAccounting.findLower(firstWALEntry.getValue().encodedName2HighestSequenceId);
     }
     if (regions != null) {
-      StringBuilder sb = new StringBuilder();
-      for (int i = 0; i < regions.length; i++) {
-        if (i > 0) {
-          sb.append(", ");
+      List<String> listForPrint = new ArrayList();
+      for (Map.Entry<byte[], List<byte[]>> r : regions.entrySet()) {
+        StringBuffer families = new StringBuffer();
+        for (int i = 0; i < r.getValue().size(); i++) {
+          if (i > 0) {
+            families.append("|");

Review comment:
       Usually ',' for lists.

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
##########
@@ -459,13 +459,18 @@ private FlushType isAboveLowWaterMark() {
   }
 
   @Override
-  public boolean requestFlush(HRegion r, boolean forceFlushAllStores,
-                              FlushLifeCycleTracker tracker) {
+  public boolean requestFlush(HRegion r, boolean 
forceFlushAllStores,FlushLifeCycleTracker tracker) {

Review comment:
       Space after comma (Make your code follow convention of surrounding code)

##########
File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
##########
@@ -639,26 +641,30 @@ public int getNumLogFiles() {
    * If the number of un-archived WAL files ('live' WALs) is greater than 
maximum allowed,
    * check the first (oldest) WAL, and return those regions which should be 
flushed so that
    * it can be let-go/'archived'.
-   * @return regions (encodedRegionNames) to flush in order to archive oldest 
WAL file.
+   * @return stores of regions (encodedRegionNames) to flush in order to 
archive oldest WAL file.
    */
-  byte[][] findRegionsToForceFlush() throws IOException {
-    byte[][] regions = null;
+  Map<byte[], List<byte[]>> findRegionsToForceFlush() throws IOException {
+    Map<byte[], List<byte[]>> regions = null;
     int logCount = getNumRolledLogFiles();
     if (logCount > this.maxLogs && logCount > 0) {
       Map.Entry<Path, WalProps> firstWALEntry = 
this.walFile2Props.firstEntry();
       regions =
         
this.sequenceIdAccounting.findLower(firstWALEntry.getValue().encodedName2HighestSequenceId);
     }
     if (regions != null) {
-      StringBuilder sb = new StringBuilder();
-      for (int i = 0; i < regions.length; i++) {
-        if (i > 0) {
-          sb.append(", ");
+      List<String> listForPrint = new ArrayList();
+      for (Map.Entry<byte[], List<byte[]>> r : regions.entrySet()) {
+        StringBuffer families = new StringBuffer();
+        for (int i = 0; i < r.getValue().size(); i++) {
+          if (i > 0) {
+            families.append("|");
+          }
+          families.append(Bytes.toString(r.getValue().get(i)));
         }
-        sb.append(Bytes.toStringBinary(regions[i]));
+        listForPrint.add(Bytes.toStringBinary(r.getKey()) + "[" + 
families.toString() + "]");
       }
       LOG.info("Too many WALs; count=" + logCount + ", max=" + this.maxLogs +
-        "; forcing flush of " + regions.length + " regions(s): " + 
sb.toString());
+        "; forcing flush partial stores of " + regions.size() + " region(s): " 
+ StringUtils.join(",", listForPrint));

Review comment:
       s/forcing flush partial stores of/forcing (partial) flush of/




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to