[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html index d0faa65..965105d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HTableWrapper.html @@ -279,71 +279,61 @@ 271 } 272 273 @Override -274 public long getWriteBufferSize() { -275 return table.getWriteBufferSize(); -276 } -277 -278 @Override -279 public void setWriteBufferSize(long writeBufferSize) throws IOException { -280 table.setWriteBufferSize(writeBufferSize); -281 } -282 -283 @Override -284 public R extends Message Mapbyte[], R batchCoprocessorService( -285 MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, -286 R responsePrototype) throws ServiceException, Throwable { -287return table.batchCoprocessorService(methodDescriptor, request, startKey, endKey, -288 responsePrototype); -289 } -290 -291 @Override -292 public R extends Message void batchCoprocessorService(MethodDescriptor methodDescriptor, -293 Message request, byte[] startKey, byte[] endKey, R responsePrototype, CallbackR callback) -294 throws ServiceException, Throwable { -295 table.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, -296 callback); -297 } -298 -299 @Override -300 public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, -301 CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { -302return table.checkAndMutate(row, family, qualifier, compareOp, value, rm); +274 public R extends Message Mapbyte[], R batchCoprocessorService( +275 MethodDescriptor methodDescriptor, Message request, byte[] startKey, byte[] endKey, +276 R responsePrototype) throws ServiceException, Throwable { +277return table.batchCoprocessorService(methodDescriptor, request, startKey, endKey, +278 responsePrototype); +279 } +280 +281 @Override +282 public R extends Message void batchCoprocessorService(MethodDescriptor methodDescriptor, +283 Message request, byte[] startKey, byte[] endKey, R responsePrototype, CallbackR callback) +284 throws ServiceException, Throwable { +285 table.batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, +286 callback); +287 } +288 +289 @Override +290 public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, +291 CompareOp compareOp, byte[] value, RowMutations rm) throws IOException { +292return table.checkAndMutate(row, family, qualifier, compareOp, value, rm); +293 } +294 +295 @Override +296 public void setOperationTimeout(int operationTimeout) { +297 table.setOperationTimeout(operationTimeout); +298 } +299 +300 @Override +301 public int getOperationTimeout() { +302return table.getOperationTimeout(); 303 } 304 305 @Override -306 public void setOperationTimeout(int operationTimeout) { -307 table.setOperationTimeout(operationTimeout); -308 } -309 -310 @Override -311 public int getOperationTimeout() { -312return table.getOperationTimeout(); -313 } -314 -315 @Override -316 @Deprecated -317 public void setRpcTimeout(int rpcTimeout) { -318table.setRpcTimeout(rpcTimeout); -319 } -320 -321 @Override -322 public void setWriteRpcTimeout(int writeRpcTimeout) { table.setWriteRpcTimeout(writeRpcTimeout); } -323 -324 @Override -325 public void setReadRpcTimeout(int readRpcTimeout) { table.setReadRpcTimeout(readRpcTimeout); } -326 -327 @Override -328 @Deprecated -329 public int getRpcTimeout() { -330return table.getRpcTimeout(); -331 } -332 -333 @Override -334 public int getWriteRpcTimeout() { return table.getWriteRpcTimeout(); } -335 -336 @Override -337 public int getReadRpcTimeout() { return table.getReadRpcTimeout(); } -338} +306 @Deprecated +307 public void setRpcTimeout(int rpcTimeout) { +308table.setRpcTimeout(rpcTimeout); +309 } +310 +311 @Override +312 public void setWriteRpcTimeout(int writeRpcTimeout) { table.setWriteRpcTimeout(writeRpcTimeout); } +313 +314 @Override +315 public void setReadRpcTimeout(int readRpcTimeout) { table.setReadRpcTimeout(readRpcTimeout); } +316 +317 @Override +318 @Deprecated +319 public int getRpcTimeout() { +320return table.getRpcTimeout(); +321 } +322 +323 @Override +324 public int getWriteRpcTimeout() { return table.getWriteRpcTimeout(); } +325 +326 @Override +327 public int getReadRpcTimeout() { return table.getReadRpcTimeout(); } +328} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/346adc37/devapidocs/src-html/org/apache/hadoop/hbase/client/Query.html
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2aec596e/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html index 704e1fc..eacb7b5 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":41,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":10,"i23":10,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":42,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":9,"i79":10,"i80":10,"i81":9,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":41,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109 ":10,"i110":10,"i111":9,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":42,"i174":10,"i175":10,"i176":10,"i177":42,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":42,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2 09":10,"i210":10,"i211":10,"i212":10,"i213":10,"i214":10,"i215":10,"i216":10,"i217":10,"i218":10,"i219":10,"i220":10,"i221":10,"i222":10,"i223":10,"i224":10,"i225":10,"i226":10,"i227":10,"i228":10,"i229":10,"i230":10,"i231":10,"i232":10}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":42,"i4":10,"i5":10,"i6":10,"i7":10,"i8":10,"i9":10,"i10":9,"i11":10,"i12":10,"i13":10,"i14":41,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":42,"i24":42,"i25":42,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":10,"i32":10,"i33":10,"i34":10,"i35":10,"i36":10,"i37":10,"i38":10,"i39":10,"i40":10,"i41":10,"i42":10,"i43":10,"i44":10,"i45":10,"i46":42,"i47":10,"i48":10,"i49":10,"i50":10,"i51":10,"i52":10,"i53":10,"i54":10,"i55":10,"i56":10,"i57":10,"i58":10,"i59":10,"i60":10,"i61":10,"i62":10,"i63":10,"i64":10,"i65":10,"i66":10,"i67":10,"i68":10,"i69":10,"i70":10,"i71":10,"i72":10,"i73":10,"i74":10,"i75":10,"i76":10,"i77":10,"i78":9,"i79":10,"i80":10,"i81":9,"i82":10,"i83":10,"i84":10,"i85":10,"i86":10,"i87":10,"i88":10,"i89":10,"i90":41,"i91":10,"i92":10,"i93":10,"i94":10,"i95":10,"i96":10,"i97":10,"i98":10,"i99":10,"i100":10,"i101":10,"i102":10,"i103":10,"i104":10,"i105":10,"i106":10,"i107":10,"i108":10,"i109 ":10,"i110":10,"i111":9,"i112":10,"i113":10,"i114":10,"i115":10,"i116":10,"i117":10,"i118":10,"i119":10,"i120":10,"i121":10,"i122":10,"i123":10,"i124":10,"i125":10,"i126":10,"i127":10,"i128":10,"i129":10,"i130":10,"i131":10,"i132":10,"i133":10,"i134":10,"i135":10,"i136":10,"i137":10,"i138":10,"i139":10,"i140":10,"i141":10,"i142":10,"i143":10,"i144":10,"i145":10,"i146":10,"i147":10,"i148":10,"i149":10,"i150":10,"i151":10,"i152":10,"i153":10,"i154":10,"i155":10,"i156":10,"i157":10,"i158":10,"i159":10,"i160":10,"i161":10,"i162":10,"i163":10,"i164":10,"i165":10,"i166":10,"i167":10,"i168":10,"i169":10,"i170":10,"i171":10,"i172":10,"i173":42,"i174":10,"i175":10,"i176":10,"i177":42,"i178":10,"i179":10,"i180":10,"i181":10,"i182":10,"i183":10,"i184":10,"i185":10,"i186":10,"i187":10,"i188":10,"i189":10,"i190":10,"i191":10,"i192":10,"i193":10,"i194":10,"i195":10,"i196":10,"i197":10,"i198":10,"i199":42,"i200":10,"i201":10,"i202":10,"i203":10,"i204":10,"i205":10,"i206":10,"i207":10,"i208":10,"i2
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/1837997e/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html index a691301..9a8f45d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.RegionScannerCloseCallBack.html @@ -165,3337 +165,3323 @@ 157import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ReplicateWALEntryResponse; 158import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterRequest; 159import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.RollWALWriterResponse; -160import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -161import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -162import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerRequest; -163import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.StopServerResponse; -164import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationRequest; -165import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateConfigurationResponse; -166import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest; -167import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse; -168import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry; -169import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionRequest; -170import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WarmupRegionResponse; -171import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -172import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action; -173import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest; -174import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath; -175import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse; -176import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest; -177import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse; -178import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -179import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceRequest; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CoprocessorServiceResponse; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetRequest; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse; -184import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats; -185import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; -186import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse; -187import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; -188import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse; -189import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; -190import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; -191import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest; -192import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadResponse; -193import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction; -194import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult; -195import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException; -196import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; -197import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; -198import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -199import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.RegionLoad; -200import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair; -201import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair; -202import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a2b2dd19/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html index c9a18a3..c80f6d8 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/util/HBaseFsck.HBaseFsckTool.html @@ -2492,2617 +2492,2627 @@ 2484 return; 2485} 2486 } -2487 errors.reportError(ERROR_CODE.LINGERING_SPLIT_PARENT, "Region " -2488 + descriptiveName + " is a split parent in META, in HDFS, " -2489 + "and not deployed on any region server. This could be transient, " -2490 + "consider to run the catalog janitor first!"); -2491 if (shouldFixSplitParents()) { -2492setShouldRerun(); -2493resetSplitParent(hbi); -2494 } -2495} else if (inMeta !inHdfs !isDeployed) { -2496 errors.reportError(ERROR_CODE.NOT_IN_HDFS_OR_DEPLOYED, "Region " -2497 + descriptiveName + " found in META, but not in HDFS " -2498 + "or deployed on any region server."); -2499 if (shouldFixMeta()) { -2500deleteMetaRegion(hbi); -2501 } -2502} else if (inMeta !inHdfs isDeployed) { -2503 errors.reportError(ERROR_CODE.NOT_IN_HDFS, "Region " + descriptiveName -2504 + " found in META, but not in HDFS, " + -2505 "and deployed on " + Joiner.on(", ").join(hbi.deployedOn)); -2506 // We treat HDFS as ground truth. Any information in meta is transient -2507 // and equivalent data can be regenerated. So, lets unassign and remove -2508 // these problems from META. -2509 if (shouldFixAssignments()) { -2510errors.print("Trying to fix unassigned region..."); -2511undeployRegions(hbi); -2512 } -2513 if (shouldFixMeta()) { -2514// wait for it to complete -2515deleteMetaRegion(hbi); -2516 } -2517} else if (inMeta inHdfs !isDeployed shouldBeDeployed) { -2518 errors.reportError(ERROR_CODE.NOT_DEPLOYED, "Region " + descriptiveName -2519 + " not deployed on any region server."); -2520 tryAssignmentRepair(hbi, "Trying to fix unassigned region..."); -2521} else if (inMeta inHdfs isDeployed !shouldBeDeployed) { -2522 errors.reportError(ERROR_CODE.SHOULD_NOT_BE_DEPLOYED, -2523 "Region " + descriptiveName + " should not be deployed according " + -2524 "to META, but is deployed on " + Joiner.on(", ").join(hbi.deployedOn)); -2525 if (shouldFixAssignments()) { -2526errors.print("Trying to close the region " + descriptiveName); -2527setShouldRerun(); -2528 HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn); -2529 } -2530} else if (inMeta inHdfs isMultiplyDeployed) { -2531 errors.reportError(ERROR_CODE.MULTI_DEPLOYED, "Region " + descriptiveName -2532 + " is listed in hbase:meta on region server " + hbi.metaEntry.regionServer -2533 + " but is multiply assigned to region servers " + -2534 Joiner.on(", ").join(hbi.deployedOn)); -2535 // If we are trying to fix the errors -2536 if (shouldFixAssignments()) { -2537errors.print("Trying to fix assignment error..."); -2538setShouldRerun(); -2539 HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn); -2540 } -2541} else if (inMeta inHdfs isDeployed !deploymentMatchesMeta) { -2542 errors.reportError(ERROR_CODE.SERVER_DOES_NOT_MATCH_META, "Region " -2543 + descriptiveName + " listed in hbase:meta on region server " + -2544 hbi.metaEntry.regionServer + " but found on region server " + -2545 hbi.deployedOn.get(0)); -2546 // If we are trying to fix the errors -2547 if (shouldFixAssignments()) { -2548errors.print("Trying to fix assignment error..."); -2549setShouldRerun(); -2550 HBaseFsckRepair.fixMultiAssignment(connection, hbi.metaEntry, hbi.deployedOn); -2551 HBaseFsckRepair.waitUntilAssigned(admin, hbi.getHdfsHRI()); -2552 } -2553} else { -2554 errors.reportError(ERROR_CODE.UNKNOWN, "Region " + descriptiveName + -2555 " is in an unforeseen state:" + -2556 " inMeta=" + inMeta + -2557 " inHdfs=" + inHdfs + -2558 " isDeployed=" + isDeployed + -2559 " isMultiplyDeployed=" + isMultiplyDeployed + -2560 " deploymentMatchesMeta=" + deploymentMatchesMeta + -2561 " shouldBeDeployed=" + shouldBeDeployed); -2562} -2563 } -2564 -2565 /** -2566 * Checks tables integrity. Goes over all regions and scans the tables. -2567 *
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.html b/devapidocs/src-html/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.html index 0e37be8..a40aa29 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.html @@ -115,7 +115,7 @@ 107.append(metricsDescription) 108.append(metricsContext) 109.append(metricsJmxContext) -110.hashCode(); +110.toHashCode(); 111 } 112} http://git-wip-us.apache.org/repos/asf/hbase-site/blob/21766f4a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSDumpServlet.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSDumpServlet.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSDumpServlet.html index 51cb1ce..5cef9cd 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSDumpServlet.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSDumpServlet.html @@ -63,87 +63,88 @@ 055} 056 057OutputStream os = response.getOutputStream(); -058PrintWriter out = new PrintWriter(os); +058try (PrintWriter out = new PrintWriter(os)) { 059 -060out.println("RegionServer status for " + hrs.getServerName() +060 out.println("RegionServer status for " + hrs.getServerName() 061+ " as of " + new Date()); 062 -063out.println("\n\nVersion Info:"); -064out.println(LINE); -065dumpVersionInfo(out); +063 out.println("\n\nVersion Info:"); +064 out.println(LINE); +065 dumpVersionInfo(out); 066 -067out.println("\n\nTasks:"); -068out.println(LINE); -069TaskMonitor.get().dumpAsText(out); +067 out.println("\n\nTasks:"); +068 out.println(LINE); +069 TaskMonitor.get().dumpAsText(out); 070 -071out.println("\n\nRowLocks:"); -072out.println(LINE); -073dumpRowLock(hrs, out); +071 out.println("\n\nRowLocks:"); +072 out.println(LINE); +073 dumpRowLock(hrs, out); 074 -075out.println("\n\nExecutors:"); -076out.println(LINE); -077 dumpExecutors(hrs.getExecutorService(), out); +075 out.println("\n\nExecutors:"); +076 out.println(LINE); +077 dumpExecutors(hrs.getExecutorService(), out); 078 -079out.println("\n\nStacks:"); -080out.println(LINE); -081PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); -082Threads.printThreadInfo(ps, ""); -083ps.flush(); +079 out.println("\n\nStacks:"); +080 out.println(LINE); +081 PrintStream ps = new PrintStream(response.getOutputStream(), false, "UTF-8"); +082 Threads.printThreadInfo(ps, ""); +083 ps.flush(); 084 -085out.println("\n\nRS Configuration:"); -086out.println(LINE); -087Configuration conf = hrs.getConfiguration(); -088out.flush(); -089conf.writeXml(os); -090os.flush(); +085 out.println("\n\nRS Configuration:"); +086 out.println(LINE); +087 Configuration conf = hrs.getConfiguration(); +088 out.flush(); +089 conf.writeXml(os); +090 os.flush(); 091 -092out.println("\n\nLogs"); -093out.println(LINE); -094long tailKb = getTailKbParam(request); -095LogMonitoring.dumpTailOfLogs(out, tailKb); +092 out.println("\n\nLogs"); +093 out.println(LINE); +094 long tailKb = getTailKbParam(request); +095 LogMonitoring.dumpTailOfLogs(out, tailKb); 096 -097out.println("\n\nRS Queue:"); -098out.println(LINE); -099if(isShowQueueDump(conf)) { -100 dumpQueue(hrs, out); -101} +097 out.println("\n\nRS Queue:"); +098 out.println(LINE); +099 if (isShowQueueDump(conf)) { +100dumpQueue(hrs, out); +101 } 102 -103out.flush(); -104 } -105 -106 public static void dumpRowLock(HRegionServer hrs, PrintWriter out) { -107StringBuilder sb = new StringBuilder(); -108for (Region region : hrs.getOnlineRegions()) { -109 HRegion hRegion = (HRegion)region; -110 if (hRegion.getLockedRows().size() 0) { -111for (HRegion.RowLockContext rowLockContext : hRegion.getLockedRows().values()) { -112 sb.setLength(0); -113 sb.append(hRegion.getTableDescriptor().getTableName()).append(",") -114 .append(hRegion.getRegionInfo().getEncodedName()).append(","); -115 sb.append(rowLockContext.toString()); -116 out.println(sb.toString()); -117} -118 } -119} -120 } -121 -122 public static void dumpQueue(HRegionServer hrs, PrintWriter out) -123 throws IOException { -124if (hrs.compactSplitThread != null)
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d5075d7/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html index afd9ccc..904b921 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.ProcedureExecutorListener.html @@ -30,1916 +30,1984 @@ 022import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; 023 024import java.io.IOException; -025import java.util.ArrayList; -026import java.util.Arrays; -027import java.util.HashSet; -028import java.util.Iterator; -029import java.util.List; -030import java.util.Map; -031import java.util.Set; -032import java.util.concurrent.atomic.AtomicBoolean; -033import java.util.concurrent.atomic.AtomicInteger; -034import java.util.concurrent.atomic.AtomicLong; -035import java.util.stream.Collectors; -036import java.util.stream.Stream; -037import java.util.concurrent.ConcurrentHashMap; -038import java.util.concurrent.CopyOnWriteArrayList; -039import java.util.concurrent.DelayQueue; -040import java.util.concurrent.TimeUnit; -041 -042import org.apache.commons.logging.Log; -043import org.apache.commons.logging.LogFactory; -044import org.apache.hadoop.conf.Configuration; -045import org.apache.hadoop.hbase.HConstants; -046import org.apache.hadoop.hbase.ProcedureInfo; -047import org.apache.hadoop.hbase.classification.InterfaceAudience; -048import org.apache.hadoop.hbase.classification.InterfaceStability; -049import org.apache.hadoop.hbase.exceptions.IllegalArgumentIOException; -050import org.apache.hadoop.hbase.procedure2.Procedure.LockState; -051import org.apache.hadoop.hbase.procedure2.store.ProcedureStore; -052import org.apache.hadoop.hbase.procedure2.store.ProcedureStore.ProcedureIterator; -053import org.apache.hadoop.hbase.procedure2.util.DelayedUtil; -054import org.apache.hadoop.hbase.procedure2.util.DelayedUtil.DelayedWithTimeout; -055import org.apache.hadoop.hbase.procedure2.util.StringUtils; -056import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -057import org.apache.hadoop.hbase.security.User; -058import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -059import org.apache.hadoop.hbase.util.NonceKey; -060import org.apache.hadoop.hbase.util.Pair; -061import org.apache.hadoop.hbase.util.Threads; -062 -063/** -064 * Thread Pool that executes the submitted procedures. -065 * The executor has a ProcedureStore associated. -066 * Each operation is logged and on restart the pending procedures are resumed. -067 * -068 * Unless the Procedure code throws an error (e.g. invalid user input) -069 * the procedure will complete (at some point in time), On restart the pending -070 * procedures are resumed and the once failed will be rolledback. -071 * -072 * The user can add procedures to the executor via submitProcedure(proc) -073 * check for the finished state via isFinished(procId) -074 * and get the result via getResult(procId) -075 */ -076@InterfaceAudience.Private -077@InterfaceStability.Evolving -078public class ProcedureExecutorTEnvironment { -079 private static final Log LOG = LogFactory.getLog(ProcedureExecutor.class); -080 -081 public static final String CHECK_OWNER_SET_CONF_KEY = "hbase.procedure.check.owner.set"; -082 private static final boolean DEFAULT_CHECK_OWNER_SET = false; -083 -084 public static final String WORKER_KEEP_ALIVE_TIME_CONF_KEY = -085 "hbase.procedure.worker.keep.alive.time.msec"; -086 private static final long DEFAULT_WORKER_KEEP_ALIVE_TIME = Long.MAX_VALUE; -087 -088 Testing testing = null; -089 public static class Testing { -090protected boolean killIfSuspended = false; -091protected boolean killBeforeStoreUpdate = false; -092protected boolean toggleKillBeforeStoreUpdate = false; -093 -094protected boolean shouldKillBeforeStoreUpdate() { -095 final boolean kill = this.killBeforeStoreUpdate; -096 if (this.toggleKillBeforeStoreUpdate) { -097this.killBeforeStoreUpdate = !kill; -098LOG.warn("Toggle KILL before store update to: " + this.killBeforeStoreUpdate); -099 } -100 return kill; -101} -102 -103protected boolean shouldKillBeforeStoreUpdate(final boolean isSuspended) { -104 return (isSuspended !killIfSuspended) ? false : shouldKillBeforeStoreUpdate(); -105} -106 } -107 -108 public interface ProcedureExecutorListener { -109void procedureLoaded(long procId); -110void procedureAdded(long procId); -111void procedureFinished(long procId); -112 } -113 -114 /**
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0383a9c2/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html index a945b54..422c076 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/RSRpcServices.LogDelegate.html @@ -92,3454 +92,3410 @@ 084import org.apache.hadoop.hbase.client.VersionInfoUtil; 085import org.apache.hadoop.hbase.conf.ConfigurationObserver; 086import org.apache.hadoop.hbase.exceptions.FailedSanityCheckException; -087import org.apache.hadoop.hbase.exceptions.MergeRegionException; -088import org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException; -089import org.apache.hadoop.hbase.exceptions.ScannerResetException; -090import org.apache.hadoop.hbase.filter.ByteArrayComparable; -091import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; -092import org.apache.hadoop.hbase.ipc.HBaseRPCErrorHandler; -093import org.apache.hadoop.hbase.ipc.HBaseRpcController; -094import org.apache.hadoop.hbase.ipc.PriorityFunction; -095import org.apache.hadoop.hbase.ipc.QosPriority; -096import org.apache.hadoop.hbase.ipc.RpcCallContext; -097import org.apache.hadoop.hbase.ipc.RpcCallback; -098import org.apache.hadoop.hbase.ipc.RpcServer; -099import org.apache.hadoop.hbase.ipc.RpcServer.BlockingServiceAndInterface; -100import org.apache.hadoop.hbase.ipc.RpcServerFactory; -101import org.apache.hadoop.hbase.ipc.RpcServerInterface; -102import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException; -103import org.apache.hadoop.hbase.ipc.ServerRpcController; -104import org.apache.hadoop.hbase.master.MasterRpcServices; -105import org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement; -106import org.apache.hadoop.hbase.quotas.OperationQuota; -107import org.apache.hadoop.hbase.quotas.QuotaUtil; -108import org.apache.hadoop.hbase.quotas.RegionServerRpcQuotaManager; -109import org.apache.hadoop.hbase.quotas.RegionServerSpaceQuotaManager; -110import org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot; -111import org.apache.hadoop.hbase.quotas.SpaceViolationPolicyEnforcement; -112import org.apache.hadoop.hbase.regionserver.HRegion.RegionScannerImpl; -113import org.apache.hadoop.hbase.regionserver.Leases.Lease; -114import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException; -115import org.apache.hadoop.hbase.regionserver.Region.Operation; -116import org.apache.hadoop.hbase.regionserver.ScannerContext.LimitScope; -117import org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler; -118import org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler; -119import org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler; -120import org.apache.hadoop.hbase.regionserver.wal.WALEdit; -121import org.apache.hadoop.hbase.security.Superusers; -122import org.apache.hadoop.hbase.security.User; -123import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString; -124import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message; -125import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController; -126import org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException; -127import org.apache.hadoop.hbase.shaded.com.google.protobuf.TextFormat; -128import org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations; -129import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -130import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -131import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearCompactionQueuesResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ExecuteProceduresResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -143import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/f391bcef/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html b/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html index 0ca35ed..a8c351f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/HColumnDescriptor.html @@ -112,694 +112,697 @@ 104 * 105 * @param familyName Column family name. Must be 'printable' -- digit or 106 * letter -- and may not contain a code:/code -107 */ -108 public HColumnDescriptor(final String familyName) { -109this(Bytes.toBytes(familyName)); -110 } -111 -112 /** -113 * Construct a column descriptor specifying only the family name -114 * The other attributes are defaulted. -115 * -116 * @param familyName Column family name. Must be 'printable' -- digit or -117 * letter -- and may not contain a code:/code -118 */ -119 public HColumnDescriptor(final byte [] familyName) { -120this(new ModifyableColumnFamilyDescriptor(familyName)); -121 } -122 -123 /** -124 * Constructor. -125 * Makes a deep copy of the supplied descriptor. -126 * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor. -127 * @param desc The descriptor. -128 */ -129 public HColumnDescriptor(HColumnDescriptor desc) { -130this(desc, true); -131 } -132 -133 protected HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) { -134this(deepClone ? new ModifyableColumnFamilyDescriptor(desc) -135: desc.delegatee); -136 } -137 -138 protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) { -139this.delegatee = delegate; -140 } -141 -142 /** -143 * @param b Family name. -144 * @return codeb/code -145 * @throws IllegalArgumentException If not null and not a legitimate family -146 * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because -147 * codeb/code can be null when deserializing). Cannot start with a '.' -148 * either. Also Family can not be an empty value or equal "recovered.edits". -149 * @deprecated Use {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])}. -150 */ -151 @Deprecated -152 public static byte [] isLegalFamilyName(final byte [] b) { -153return ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b); -154 } -155 -156 /** -157 * @return Name of this column family -158 */ -159 @Override -160 public byte [] getName() { -161return delegatee.getName(); -162 } -163 -164 /** -165 * @return The name string of this column family -166 */ -167 @Override -168 public String getNameAsString() { -169return delegatee.getNameAsString(); -170 } -171 -172 /** -173 * @param key The key. -174 * @return The value. -175 */ -176 @Override -177 public byte[] getValue(byte[] key) { -178return delegatee.getValue(key); -179 } -180 -181 /** -182 * @param key The key. -183 * @return The value as a string. -184 */ -185 public String getValue(String key) { -186byte[] value = getValue(Bytes.toBytes(key)); -187return value == null ? null : Bytes.toString(value); -188 } -189 -190 @Override -191 public MapBytes, Bytes getValues() { -192return delegatee.getValues(); -193 } -194 -195 /** -196 * @param key The key. -197 * @param value The value. -198 * @return this (for chained invocation) -199 */ -200 public HColumnDescriptor setValue(byte[] key, byte[] value) { -201 getDelegateeForModification().setValue(key, value); -202return this; -203 } -204 -205 /** -206 * @param key Key whose key and value we're to remove from HCD parameters. -207 */ -208 public HColumnDescriptor remove(final byte [] key) { -209 getDelegateeForModification().removeValue(new Bytes(key)); -210return this; -211 } -212 -213 /** -214 * @param key The key. -215 * @param value The value. -216 * @return this (for chained invocation) -217 */ -218 public HColumnDescriptor setValue(String key, String value) { -219 getDelegateeForModification().setValue(key, value); -220return this; -221 } -222 -223 /** -224 * @return compression type being used for the column family -225 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 -226 * (a href="https://issues.apache.org/jira/browse/HBASE-13655"HBASE-13655/a;). -227 * Use {@link #getCompressionType()}. -228 */ -229 @Deprecated -230 public Compression.Algorithm getCompression() { -231return getCompressionType(); -232 } -233 -234 /** -235 * @return compression type being used for the column family for major compaction -236 * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 -237 * (a
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html index bcb9b8e..310709a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileStatusConverter.html @@ -50,10 +50,10 @@ 042import org.apache.hadoop.hbase.util.HFileArchiveUtil; 043import org.apache.hadoop.io.MultipleIOException; 044 -045import com.google.common.base.Function; -046import com.google.common.base.Preconditions; -047import com.google.common.collect.Collections2; -048import com.google.common.collect.Lists; +045import org.apache.hadoop.hbase.shaded.com.google.common.base.Function; +046import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +047import org.apache.hadoop.hbase.shaded.com.google.common.collect.Collections2; +048import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; 049 050/** 051 * Utility class to handle the removal of HFiles (or the respective {@link StoreFile StoreFiles}) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html index bcb9b8e..310709a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileablePath.html @@ -50,10 +50,10 @@ 042import org.apache.hadoop.hbase.util.HFileArchiveUtil; 043import org.apache.hadoop.io.MultipleIOException; 044 -045import com.google.common.base.Function; -046import com.google.common.base.Preconditions; -047import com.google.common.collect.Collections2; -048import com.google.common.collect.Lists; +045import org.apache.hadoop.hbase.shaded.com.google.common.base.Function; +046import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +047import org.apache.hadoop.hbase.shaded.com.google.common.collect.Collections2; +048import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; 049 050/** 051 * Utility class to handle the removal of HFiles (or the respective {@link StoreFile StoreFiles}) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html index bcb9b8e..310709a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.FileableStoreFile.html @@ -50,10 +50,10 @@ 042import org.apache.hadoop.hbase.util.HFileArchiveUtil; 043import org.apache.hadoop.io.MultipleIOException; 044 -045import com.google.common.base.Function; -046import com.google.common.base.Preconditions; -047import com.google.common.collect.Collections2; -048import com.google.common.collect.Lists; +045import org.apache.hadoop.hbase.shaded.com.google.common.base.Function; +046import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; +047import org.apache.hadoop.hbase.shaded.com.google.common.collect.Collections2; +048import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists; 049 050/** 051 * Utility class to handle the removal of HFiles (or the respective {@link StoreFile StoreFiles}) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca5b0275/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html index bcb9b8e..310709a 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/backup/HFileArchiver.StoreToFile.html @@ -50,10 +50,10 @@ 042import org.apache.hadoop.hbase.util.HFileArchiveUtil; 043import org.apache.hadoop.io.MultipleIOException; 044 -045import com.google.common.base.Function; -046import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9eba7fcf/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html index e1fbce4..873e17f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/tool/Canary.RegionServerMonitor.html @@ -1089,497 +1089,498 @@ 1081} 1082 } 1083 MapString, AtomicLong actualReadTableLatency = regionSink.getReadLatencyMap(); -1084 for (String tableName : this.configuredReadTableTimeouts.keySet()) { -1085if (actualReadTableLatency.containsKey(tableName)) { -1086 Long actual = actualReadTableLatency.get(tableName).longValue(); -1087 Long configured = this.configuredReadTableTimeouts.get(tableName); -1088 LOG.info("Read operation for " + tableName + " took " + actual + -1089" ms. The configured read timeout was " + configured + " ms."); -1090 if (actual configured) { -1091LOG.error("Read operation for " + tableName + " exceeded the configured read timeout."); -1092 } -1093} else { -1094 LOG.error("Read operation for " + tableName + " failed!"); -1095} -1096 } -1097 if (this.writeSniffing) { -1098String writeTableStringName = this.writeTableName.getNameAsString(); -1099long actualWriteLatency = regionSink.getWriteLatency().longValue(); -1100LOG.info("Write operation for " + writeTableStringName + " took " + actualWriteLatency + " ms. The configured write timeout was " + -1101 this.configuredWriteTableTimeout + " ms."); -1102// Check that the writeTable write operation latency does not exceed the configured timeout. -1103if (actualWriteLatency this.configuredWriteTableTimeout) { -1104 LOG.error("Write operation for " + writeTableStringName + " exceeded the configured write timeout."); -1105} -1106 } -1107} catch (Exception e) { -1108 LOG.error("Run regionMonitor failed", e); -1109 this.errorCode = ERROR_EXIT_CODE; -1110} - } -1112 this.done = true; -1113} -1114 -1115private String[] generateMonitorTables(String[] monitorTargets) throws IOException { -1116 String[] returnTables = null; -1117 -1118 if (this.useRegExp) { -1119Pattern pattern = null; -1120HTableDescriptor[] tds = null; -1121SetString tmpTables = new TreeSet(); -1122try { -1123 if (LOG.isDebugEnabled()) { -1124 LOG.debug(String.format("reading list of tables")); -1125 } -1126 tds = this.admin.listTables(pattern); -1127 if (tds == null) { -1128tds = new HTableDescriptor[0]; -1129 } -1130 for (String monitorTarget : monitorTargets) { -1131pattern = Pattern.compile(monitorTarget); -1132for (HTableDescriptor td : tds) { -1133 if (pattern.matcher(td.getNameAsString()).matches()) { -1134 tmpTables.add(td.getNameAsString()); -1135 } -1136} -1137 } -1138} catch (IOException e) { -1139 LOG.error("Communicate with admin failed", e); -1140 throw e; -1141} -1142 -1143if (tmpTables.size() 0) { -1144 returnTables = tmpTables.toArray(new String[tmpTables.size()]); -1145} else { -1146 String msg = "No HTable found, tablePattern:" + Arrays.toString(monitorTargets); -1147 LOG.error(msg); -1148 this.errorCode = INIT_ERROR_EXIT_CODE; -1149 throw new TableNotFoundException(msg); -1150} -1151 } else { -1152returnTables = monitorTargets; -1153 } -1154 -1155 return returnTables; -1156} -1157 -1158/* -1159 * canary entry point to monitor all the tables. -1160 */ -1161private ListFutureVoid sniff(TaskType taskType, RegionStdOutSink regionSink) throws Exception { -1162 if (LOG.isDebugEnabled()) { -1163LOG.debug(String.format("reading list of tables")); -1164 } -1165 ListFutureVoid taskFutures = new LinkedList(); -1166 for (HTableDescriptor table : admin.listTables()) { -1167if (admin.isTableEnabled(table.getTableName()) -1168 (!table.getTableName().equals(writeTableName))) { -1169 AtomicLong readLatency = regionSink.initializeAndGetReadLatencyForTable(table.getNameAsString()); -1170 taskFutures.addAll(Canary.sniff(admin, sink, table, executor, taskType,
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/17128d27/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html index feb42ea..4bd98f4 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/HBaseAdmin.RestoreSnapshotFuture.html @@ -185,4189 +185,4266 @@ 177import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; 178import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotRequest; 179import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SnapshotResponse; -180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; -181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; -182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; -183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; -184import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; -185import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; -186import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; -187import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; -188import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; -189import org.apache.hadoop.hbase.snapshot.HBaseSnapshotException; -190import org.apache.hadoop.hbase.snapshot.RestoreSnapshotException; -191import org.apache.hadoop.hbase.snapshot.SnapshotCreationException; -192import org.apache.hadoop.hbase.snapshot.UnknownSnapshotException; -193import org.apache.hadoop.hbase.util.Addressing; -194import org.apache.hadoop.hbase.util.Bytes; -195import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -196import org.apache.hadoop.hbase.util.ForeignExceptionUtil; -197import org.apache.hadoop.hbase.util.Pair; -198import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -199import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; -200import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; -201import org.apache.hadoop.ipc.RemoteException; -202import org.apache.hadoop.util.StringUtils; -203import org.apache.zookeeper.KeeperException; -204 -205import com.google.common.annotations.VisibleForTesting; -206import com.google.protobuf.Descriptors; -207import com.google.protobuf.Message; -208import com.google.protobuf.RpcController; -209import java.util.stream.Collectors; -210 -211/** -212 * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that -213 * this is an HBase-internal class as defined in -214 * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html -215 * There are no guarantees for backwards source / binary compatibility and methods or class can -216 * change or go away without deprecation. -217 * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing -218 * an HBaseAdmin directly. -219 * -220 * pConnection should be an iunmanaged/i connection obtained via -221 * {@link ConnectionFactory#createConnection(Configuration)} -222 * -223 * @see ConnectionFactory -224 * @see Connection -225 * @see Admin -226 */ -227@InterfaceAudience.Private -228@InterfaceStability.Evolving -229public class HBaseAdmin implements Admin { -230 private static final Log LOG = LogFactory.getLog(HBaseAdmin.class); -231 -232 private static final String ZK_IDENTIFIER_PREFIX = "hbase-admin-on-"; +180import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; +181import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionResponse; +182import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.StopMasterRequest; +183import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableRequest; +184import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.TruncateTableResponse; +185import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.UnassignRegionRequest; +186import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; +187import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; +188import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.GetReplicationPeerConfigResponse; +189import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; +190import org.apache.hadoop.hbase.snapshot.ClientSnapshotDescriptionUtils; +191import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2777c693/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.DummyStoreEngine.html -- diff --git a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.DummyStoreEngine.html b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.DummyStoreEngine.html index 4196a6c..6c65fd1 100644 --- a/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.DummyStoreEngine.html +++ b/testdevapidocs/src-html/org/apache/hadoop/hbase/regionserver/TestStore.DummyStoreEngine.html @@ -114,1476 +114,1636 @@ 106import org.mockito.Mockito; 107 108import com.google.common.collect.Lists; -109 -110/** -111 * Test class for the Store -112 */ -113@Category({RegionServerTests.class, MediumTests.class}) -114public class TestStore { -115 private static final Log LOG = LogFactory.getLog(TestStore.class); -116 @Rule public TestName name = new TestName(); -117 -118 HStore store; -119 byte [] table = Bytes.toBytes("table"); -120 byte [] family = Bytes.toBytes("family"); +109import org.apache.hadoop.hbase.filter.Filter; +110import org.apache.hadoop.hbase.filter.FilterBase; +111import static org.junit.Assert.assertEquals; +112import static org.junit.Assert.assertTrue; +113 +114/** +115 * Test class for the Store +116 */ +117@Category({RegionServerTests.class, MediumTests.class}) +118public class TestStore { +119 private static final Log LOG = LogFactory.getLog(TestStore.class); +120 @Rule public TestName name = new TestName(); 121 -122 byte [] row = Bytes.toBytes("row"); -123 byte [] row2 = Bytes.toBytes("row2"); -124 byte [] qf1 = Bytes.toBytes("qf1"); -125 byte [] qf2 = Bytes.toBytes("qf2"); -126 byte [] qf3 = Bytes.toBytes("qf3"); -127 byte [] qf4 = Bytes.toBytes("qf4"); -128 byte [] qf5 = Bytes.toBytes("qf5"); -129 byte [] qf6 = Bytes.toBytes("qf6"); -130 -131 NavigableSetbyte[] qualifiers = new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR); -132 -133 ListCell expected = new ArrayList(); -134 ListCell result = new ArrayList(); -135 -136 long id = System.currentTimeMillis(); -137 Get get = new Get(row); -138 -139 private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); -140 private final String DIR = TEST_UTIL.getDataTestDir("TestStore").toString(); -141 +122 HStore store; +123 byte [] table = Bytes.toBytes("table"); +124 byte [] family = Bytes.toBytes("family"); +125 +126 byte [] row = Bytes.toBytes("row"); +127 byte [] row2 = Bytes.toBytes("row2"); +128 byte [] qf1 = Bytes.toBytes("qf1"); +129 byte [] qf2 = Bytes.toBytes("qf2"); +130 byte [] qf3 = Bytes.toBytes("qf3"); +131 byte [] qf4 = Bytes.toBytes("qf4"); +132 byte [] qf5 = Bytes.toBytes("qf5"); +133 byte [] qf6 = Bytes.toBytes("qf6"); +134 +135 NavigableSetbyte[] qualifiers = new ConcurrentSkipListSet(Bytes.BYTES_COMPARATOR); +136 +137 ListCell expected = new ArrayList(); +138 ListCell result = new ArrayList(); +139 +140 long id = System.currentTimeMillis(); +141 Get get = new Get(row); 142 -143 /** -144 * Setup -145 * @throws IOException -146 */ -147 @Before -148 public void setUp() throws IOException { -149qualifiers.add(qf1); -150qualifiers.add(qf3); -151qualifiers.add(qf5); -152 -153Iteratorbyte[] iter = qualifiers.iterator(); -154while(iter.hasNext()){ -155 byte [] next = iter.next(); -156 expected.add(new KeyValue(row, family, next, 1, (byte[])null)); -157 get.addColumn(family, next); -158} -159 } -160 -161 private void init(String methodName) throws IOException { -162init(methodName, TEST_UTIL.getConfiguration()); +143 private HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); +144 private final String DIR = TEST_UTIL.getDataTestDir("TestStore").toString(); +145 +146 +147 /** +148 * Setup +149 * @throws IOException +150 */ +151 @Before +152 public void setUp() throws IOException { +153qualifiers.add(qf1); +154qualifiers.add(qf3); +155qualifiers.add(qf5); +156 +157Iteratorbyte[] iter = qualifiers.iterator(); +158while(iter.hasNext()){ +159 byte [] next = iter.next(); +160 expected.add(new KeyValue(row, family, next, 1, (byte[])null)); +161 get.addColumn(family, next); +162} 163 } 164 -165 private Store init(String methodName, Configuration conf) -166 throws IOException { -167HColumnDescriptor hcd = new HColumnDescriptor(family); -168// some of the tests write 4 versions and then flush -169// (with HBASE-4241, lower versions are collected on flush) -170hcd.setMaxVersions(4); -171return init(methodName, conf, hcd); -172 } -173 -174 private Store init(String methodName, Configuration conf, -175 HColumnDescriptor hcd) throws IOException { -176HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(table)); -177return init(methodName, conf,
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/90c7dfe4/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html index 75db22d..99a09f9 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.TruncateTableProcedureBiConsumer.html @@ -37,2710 +37,2816 @@ 029import java.util.List; 030import java.util.Map; 031import java.util.Optional; -032import java.util.concurrent.CompletableFuture; -033import java.util.concurrent.TimeUnit; -034import java.util.concurrent.atomic.AtomicReference; -035import java.util.function.BiConsumer; -036import java.util.regex.Pattern; -037import java.util.stream.Collectors; -038 -039import com.google.common.annotations.VisibleForTesting; -040 -041import io.netty.util.Timeout; -042import io.netty.util.TimerTask; -043 -044import java.util.stream.Stream; -045 -046import org.apache.commons.io.IOUtils; -047import org.apache.commons.logging.Log; -048import org.apache.commons.logging.LogFactory; -049import org.apache.hadoop.hbase.ClusterStatus; -050import org.apache.hadoop.hbase.HRegionInfo; -051import org.apache.hadoop.hbase.HRegionLocation; -052import org.apache.hadoop.hbase.MetaTableAccessor; -053import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -054import org.apache.hadoop.hbase.NotServingRegionException; -055import org.apache.hadoop.hbase.ProcedureInfo; -056import org.apache.hadoop.hbase.RegionLoad; -057import org.apache.hadoop.hbase.RegionLocations; -058import org.apache.hadoop.hbase.ServerName; -059import org.apache.hadoop.hbase.NamespaceDescriptor; -060import org.apache.hadoop.hbase.HConstants; -061import org.apache.hadoop.hbase.TableExistsException; -062import org.apache.hadoop.hbase.TableName; -063import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -064import org.apache.hadoop.hbase.TableNotDisabledException; -065import org.apache.hadoop.hbase.TableNotEnabledException; -066import org.apache.hadoop.hbase.TableNotFoundException; -067import org.apache.hadoop.hbase.UnknownRegionException; -068import org.apache.hadoop.hbase.classification.InterfaceAudience; -069import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -070import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -071import org.apache.hadoop.hbase.client.Scan.ReadType; -072import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -073import org.apache.hadoop.hbase.client.replication.TableCFs; -074import org.apache.hadoop.hbase.exceptions.DeserializationException; -075import org.apache.hadoop.hbase.ipc.HBaseRpcController; -076import org.apache.hadoop.hbase.quotas.QuotaFilter; -077import org.apache.hadoop.hbase.quotas.QuotaSettings; -078import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -079import org.apache.hadoop.hbase.replication.ReplicationException; -080import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -081import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -082import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoRequest; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadRequest; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionLoadResponse; -098import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/0821e51a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html index 6d0fff2..0c07a2f 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.AckHandler.html @@ -34,552 +34,553 @@ 026import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus; 027import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; 028 -029import io.netty.buffer.ByteBuf; -030import io.netty.buffer.ByteBufAllocator; -031import io.netty.channel.Channel; -032import io.netty.channel.ChannelHandler.Sharable; -033import io.netty.channel.ChannelHandlerContext; -034import io.netty.channel.EventLoop; -035import io.netty.channel.SimpleChannelInboundHandler; -036import io.netty.handler.codec.protobuf.ProtobufDecoder; -037import io.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder; -038import io.netty.handler.timeout.IdleStateEvent; -039import io.netty.handler.timeout.IdleStateHandler; -040import io.netty.util.concurrent.Promise; -041import io.netty.util.concurrent.PromiseCombiner; -042 -043import java.io.IOException; -044import java.nio.ByteBuffer; -045import java.util.ArrayDeque; -046import java.util.Collection; -047import java.util.Collections; -048import java.util.Deque; -049import java.util.IdentityHashMap; -050import java.util.List; -051import java.util.Set; -052import java.util.concurrent.CompletableFuture; -053import java.util.concurrent.TimeUnit; -054import java.util.function.Supplier; -055 -056import org.apache.hadoop.conf.Configuration; -057import org.apache.hadoop.crypto.Encryptor; -058import org.apache.hadoop.fs.Path; -059import org.apache.hadoop.hbase.classification.InterfaceAudience; -060import org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.CancelOnClose; -061import org.apache.hadoop.hbase.util.CancelableProgressable; -062import org.apache.hadoop.hbase.util.FSUtils; -063import org.apache.hadoop.hdfs.DFSClient; -064import org.apache.hadoop.hdfs.DistributedFileSystem; -065import org.apache.hadoop.hdfs.protocol.ClientProtocol; -066import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -067import org.apache.hadoop.hdfs.protocol.LocatedBlock; -068import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; -069import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; -070import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto; -071import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; -072import org.apache.hadoop.util.DataChecksum; -073 -074import com.google.common.annotations.VisibleForTesting; -075 -076/** -077 * An asynchronous HDFS output stream implementation which fans out data to datanode and only -078 * supports writing file with only one block. -079 * p -080 * Use the createOutput method in {@link FanOutOneBlockAsyncDFSOutputHelper} to create. The mainly -081 * usage of this class is implementing WAL, so we only expose a little HDFS configurations in the -082 * method. And we place it here under util package because we want to make it independent of WAL -083 * implementation thus easier to move it to HDFS project finally. -084 * p -085 * Note that, all connections to datanode will run in the same {@link EventLoop} which means we only -086 * need one thread here. But be careful, we do some blocking operations in {@link #close()} and -087 * {@link #recoverAndClose(CancelableProgressable)} methods, so do not call them inside -088 * {@link EventLoop}. And for {@link #write(byte[])} {@link #write(byte[], int, int)}, -089 * {@link #buffered()} and {@link #flush(boolean)}, if you call them outside {@link EventLoop}, -090 * there will be an extra context-switch. -091 * p -092 * Advantages compare to DFSOutputStream: -093 * ol -094 * liThe fan out mechanism. This will reduce the latency./li -095 * liThe asynchronous WAL could also run in the same EventLoop, we could just call write and flush -096 * inside the EventLoop thread, so generally we only have one thread to do all the things./li -097 * liFail-fast when connection to datanode error. The WAL implementation could open new writer -098 * ASAP./li -099 * liWe could benefit from netty's ByteBuf management mechanism./li -100 * /ol -101 */ -102@InterfaceAudience.Private -103public class FanOutOneBlockAsyncDFSOutput implements AsyncFSOutput { -104 -105 // The MAX_PACKET_SIZE is 16MB but it include the header size and checksum size. So here we set a -106 // smaller
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/2d27954a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html index 16c0042..71844ce 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.DeleteColumnFamilyProcedureBiConsumer.html @@ -126,2499 +126,2543 @@ 118import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DisableTableResponse; 119import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest; 120import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse; -121import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableRequest; -122import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.EnableTableResponse; -123import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnRequest; -124import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteColumnResponse; -125import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureRequest; -126import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ExecProcedureResponse; -127import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusRequest; -128import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetClusterStatusResponse; -129import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest; -130import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse; -131import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest; -132import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse; -133import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; -134import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -135import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest; -136import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse; -137import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsRequest; -138import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableDescriptorsResponse; -139import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesRequest; -140import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetTableNamesResponse; -141import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableRequest; -142import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.CreateTableResponse; -143import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableRequest; -144import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.DeleteTableResponse; -145import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledRequest; -146import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsBalancerEnabledResponse; -147import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest; -148import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse; -149import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; -150import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledResponse; -151import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeRequest; -152import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsInMaintenanceModeResponse; -153import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; -154import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse; -155import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest; -156import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; -157import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; -158import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; -159import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html index 49fff53..49f7926 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AbortProcedureFuture.html @@ -122,7 +122,7 @@ var activeTableTab = "activeTableTab"; -private static class HBaseAdmin.AbortProcedureFuture +private static class HBaseAdmin.AbortProcedureFuture extends HBaseAdmin.ProcedureFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true; title="class or interface in java.lang">Boolean @@ -235,7 +235,7 @@ extends isAbortInProgress -privateboolean isAbortInProgress +privateboolean isAbortInProgress @@ -252,7 +252,7 @@ extends AbortProcedureFuture -publicAbortProcedureFuture(HBaseAdminadmin, +publicAbortProcedureFuture(HBaseAdminadmin, http://docs.oracle.com/javase/8/docs/api/java/lang/Long.html?is-external=true; title="class or interface in java.lang">LongprocId, http://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true; title="class or interface in java.lang">BooleanabortProcResponse) @@ -271,7 +271,7 @@ extends get -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true; title="class or interface in java.lang">Booleanget(longtimeout, +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/Boolean.html?is-external=true; title="class or interface in java.lang">Booleanget(longtimeout, http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/TimeUnit.html?is-external=true; title="class or interface in java.util.concurrent">TimeUnitunit) throws http://docs.oracle.com/javase/8/docs/api/java/lang/InterruptedException.html?is-external=true; title="class or interface in java.lang">InterruptedException, http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutionException.html?is-external=true; title="class or interface in java.util.concurrent">ExecutionException, http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html index dc54cdc..dd16dde 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.AddColumnFamilyFuture.html @@ -132,7 +132,7 @@ var activeTableTab = "activeTableTab"; -private static class HBaseAdmin.AddColumnFamilyFuture +private static class HBaseAdmin.AddColumnFamilyFuture extends HBaseAdmin.ModifyTableFuture @@ -246,7 +246,7 @@ extends AddColumnFamilyFuture -publicAddColumnFamilyFuture(HBaseAdminadmin, +publicAddColumnFamilyFuture(HBaseAdminadmin, TableNametableName, org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponseresponse) @@ -265,7 +265,7 @@ extends getOperationType -publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() +publichttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringgetOperationType() Overrides: getOperationTypein classHBaseAdmin.ModifyTableFuture http://git-wip-us.apache.org/repos/asf/hbase-site/blob/9fb0764b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html index 51e7fc6..e6dd218 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html +++ b/devapidocs/org/apache/hadoop/hbase/client/HBaseAdmin.CreateTableFuture.html @@ -127,7 +127,7 @@ var activeTableTab = "activeTableTab"; -private static class HBaseAdmin.CreateTableFuture +private static class HBaseAdmin.CreateTableFuture extends HBaseAdmin.TableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void @@ -170,7 +170,7 @@ extends Field and Description -private HTableDescriptor +private
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b3b50f22/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html index b0ae9ad..ed6e17d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/assignment/AssignmentManager.RegionInTransitionChore.html @@ -30,1704 +30,1791 @@ 022import java.util.ArrayList; 023import java.util.Collection; 024import java.util.Collections; -025import java.util.HashMap; -026import java.util.HashSet; -027import java.util.List; -028import java.util.Map; -029import java.util.Set; -030import java.util.concurrent.CopyOnWriteArrayList; -031import java.util.concurrent.Future; -032import java.util.concurrent.TimeUnit; -033import java.util.concurrent.atomic.AtomicBoolean; -034import java.util.concurrent.locks.Condition; -035import java.util.concurrent.locks.ReentrantLock; -036import java.util.stream.Collectors; -037 -038import org.apache.commons.logging.Log; -039import org.apache.commons.logging.LogFactory; -040import org.apache.hadoop.conf.Configuration; -041import org.apache.hadoop.hbase.HBaseIOException; -042import org.apache.hadoop.hbase.HConstants; -043import org.apache.hadoop.hbase.HRegionInfo; -044import org.apache.hadoop.hbase.PleaseHoldException; -045import org.apache.hadoop.hbase.RegionException; -046import org.apache.hadoop.hbase.RegionStateListener; -047import org.apache.hadoop.hbase.ServerName; -048import org.apache.hadoop.hbase.TableName; -049import org.apache.hadoop.hbase.classification.InterfaceAudience; -050import org.apache.hadoop.hbase.client.TableState; -051import org.apache.hadoop.hbase.exceptions.UnexpectedStateException; -052import org.apache.hadoop.hbase.master.balancer.FavoredStochasticBalancer; -053import org.apache.hadoop.hbase.favored.FavoredNodesManager; -054import org.apache.hadoop.hbase.favored.FavoredNodesPromoter; -055import org.apache.hadoop.hbase.master.AssignmentListener; -056import org.apache.hadoop.hbase.master.LoadBalancer; -057import org.apache.hadoop.hbase.master.MasterServices; -058import org.apache.hadoop.hbase.master.MetricsAssignmentManager; -059import org.apache.hadoop.hbase.master.NoSuchProcedureException; -060import org.apache.hadoop.hbase.master.RegionPlan; -061import org.apache.hadoop.hbase.master.RegionState; -062import org.apache.hadoop.hbase.master.RegionState.State; -063import org.apache.hadoop.hbase.master.ServerListener; -064import org.apache.hadoop.hbase.master.TableStateManager; -065import org.apache.hadoop.hbase.master.assignment.RegionStates.RegionStateNode; -066import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerState; -067import org.apache.hadoop.hbase.master.assignment.RegionStates.ServerStateNode; -068// TODO: why are they here? -069import org.apache.hadoop.hbase.master.normalizer.NormalizationPlan.PlanType; -070import org.apache.hadoop.hbase.master.procedure.MasterProcedureEnv; -071import org.apache.hadoop.hbase.master.procedure.MasterProcedureScheduler; -072import org.apache.hadoop.hbase.master.procedure.ProcedureSyncWait; -073import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure; -074import org.apache.hadoop.hbase.procedure2.Procedure; -075import org.apache.hadoop.hbase.procedure2.ProcedureEvent; -076import org.apache.hadoop.hbase.procedure2.ProcedureExecutor; -077import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore; -078import org.apache.hadoop.hbase.procedure2.util.StringUtils; -079import org.apache.hadoop.hbase.quotas.QuotaExceededException; -080import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -081import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos.ReportRegionStateTransitionResponse; -085import org.apache.hadoop.hbase.util.Bytes; -086import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -087import org.apache.hadoop.hbase.util.Pair; -088import org.apache.hadoop.hbase.util.Threads; -089 -090import com.google.common.annotations.VisibleForTesting; +025import java.util.Comparator; +026import java.util.HashMap; +027import java.util.HashSet; +028import java.util.List; +029import java.util.Map; +030import java.util.Set; +031import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotType.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotType.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotType.html index b59a2a6..aadcd01 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotType.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/SnapshotType.html @@ -148,12 +148,6 @@ the order they are declared. -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void -AsyncHBaseAdmin.snapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsnapshotName, -TableNametableName, -SnapshotTypetype) - - void Admin.snapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsnapshotName, TableNametableName, @@ -161,14 +155,14 @@ the order they are declared. Create typed snapshot of the table. - + void HBaseAdmin.snapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsnapshotName, TableNametableName, SnapshotTypetype) - -http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void + +default http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/lang/Void.html?is-external=true; title="class or interface in java.lang">Void AsyncAdmin.snapshot(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringsnapshotName, TableNametableName, SnapshotTypetype) http://git-wip-us.apache.org/repos/asf/hbase-site/blob/ca9f6925/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html index f31e596..ef45d65 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html +++ b/devapidocs/org/apache/hadoop/hbase/client/class-use/TableDescriptor.html @@ -222,8 +222,8 @@ private http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListTableDescriptor -AsyncHBaseAdmin.batchTableOperations(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true; title="class or interface in java.util.regex">Patternpattern, -AsyncHBaseAdmin.TableOperatoroperator, +RawAsyncHBaseAdmin.batchTableOperations(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true; title="class or interface in java.util.regex">Patternpattern, +RawAsyncHBaseAdmin.TableOperatoroperator, http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringoperationType) @@ -238,14 +238,22 @@ http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in java.util.concurrent">CompletableFuturehttp://docs.oracle.com/javase/8/docs/api/java/util/List.html?is-external=true; title="class or interface in java.util">ListTableDescriptor -AsyncHBaseAdmin.disableTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true; title="class or interface in java.util.regex">Patternpattern) +RawAsyncHBaseAdmin.deleteTables(http://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html?is-external=true; title="class or interface in java.util.regex">Patternpattern) http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/CompletableFuture.html?is-external=true; title="class or interface in
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/8e3b63ca/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html index c895448..545d4da 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.LocalityBasedCandidateGenerator.html @@ -1294,425 +1294,426 @@ 1286 } 1287 1288 // We normalize locality to be a score between 0 and 1.0 representing how good it -1289 // is compared to how good it could be -1290 locality /= bestLocality; -1291} -1292 -1293@Override -1294protected void regionMoved(int region, int oldServer, int newServer) { -1295 int oldEntity = type == LocalityType.SERVER ? oldServer : cluster.serverIndexToRackIndex[oldServer]; -1296 int newEntity = type == LocalityType.SERVER ? newServer : cluster.serverIndexToRackIndex[newServer]; -1297 if (this.services == null) { -1298return; -1299 } -1300 double localityDelta = getWeightedLocality(region, newEntity) - getWeightedLocality(region, oldEntity); -1301 double normalizedDelta = localityDelta / bestLocality; -1302 locality += normalizedDelta; -1303} -1304 -1305@Override -1306double cost() { -1307 return 1 - locality; -1308} -1309 -1310private int getMostLocalEntityForRegion(int region) { -1311 return cluster.getOrComputeRegionsToMostLocalEntities(type)[region]; -1312} -1313 -1314private double getWeightedLocality(int region, int entity) { -1315 return cluster.getOrComputeWeightedLocality(region, entity, type); -1316} -1317 -1318 } -1319 -1320 static class ServerLocalityCostFunction extends LocalityBasedCostFunction { -1321 -1322private static final String LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.localityCost"; -1323private static final float DEFAULT_LOCALITY_COST = 25; -1324 -1325 ServerLocalityCostFunction(Configuration conf, MasterServices srv) { -1326 super( -1327 conf, -1328 srv, -1329 LocalityType.SERVER, -1330 LOCALITY_COST_KEY, -1331 DEFAULT_LOCALITY_COST -1332 ); -1333} -1334 -1335@Override -1336int regionIndexToEntityIndex(int region) { -1337 return cluster.regionIndexToServerIndex[region]; -1338} -1339 } -1340 -1341 static class RackLocalityCostFunction extends LocalityBasedCostFunction { -1342 -1343private static final String RACK_LOCALITY_COST_KEY = "hbase.master.balancer.stochastic.rackLocalityCost"; -1344private static final float DEFAULT_RACK_LOCALITY_COST = 15; -1345 -1346public RackLocalityCostFunction(Configuration conf, MasterServices services) { -1347 super( -1348 conf, -1349 services, -1350 LocalityType.RACK, -1351 RACK_LOCALITY_COST_KEY, -1352 DEFAULT_RACK_LOCALITY_COST -1353 ); -1354} -1355 -1356@Override -1357int regionIndexToEntityIndex(int region) { -1358 return cluster.getRackForRegion(region); -1359} -1360 } -1361 -1362 /** -1363 * Base class the allows writing costs functions from rolling average of some -1364 * number from RegionLoad. -1365 */ -1366 abstract static class CostFromRegionLoadFunction extends CostFunction { -1367 -1368private ClusterStatus clusterStatus = null; -1369private MapString, DequeBalancerRegionLoad loads = null; -1370private double[] stats = null; -1371 CostFromRegionLoadFunction(Configuration conf) { -1372 super(conf); -1373} -1374 -1375void setClusterStatus(ClusterStatus status) { -1376 this.clusterStatus = status; -1377} -1378 -1379void setLoads(MapString, DequeBalancerRegionLoad l) { -1380 this.loads = l; -1381} -1382 -1383@Override -1384double cost() { -1385 if (clusterStatus == null || loads == null) { -1386return 0; -1387 } -1388 -1389 if (stats == null || stats.length != cluster.numServers) { -1390stats = new double[cluster.numServers]; -1391 } -1392 -1393 for (int i =0; i stats.length; i++) { -1394//Cost this server has from RegionLoad -1395long cost = 0; -1396 -1397// for every region on this server get the rl -1398for(int regionIndex:cluster.regionsPerServer[i]) { -1399 CollectionBalancerRegionLoad regionLoadList = cluster.regionLoads[regionIndex]; -1400 -1401 // Now if we found a region load get the type of cost that was
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/aecb1286/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html index 01496d6..dc12c09 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.CreateNamespaceProcedureBiConsumer.html @@ -48,2406 +48,2267 @@ 040 041import io.netty.util.Timeout; 042import io.netty.util.TimerTask; -043import java.util.stream.Stream; -044import org.apache.commons.io.IOUtils; -045import org.apache.commons.logging.Log; -046import org.apache.commons.logging.LogFactory; -047import org.apache.hadoop.hbase.HRegionInfo; -048import org.apache.hadoop.hbase.HRegionLocation; -049import org.apache.hadoop.hbase.MetaTableAccessor; -050import org.apache.hadoop.hbase.MetaTableAccessor.QueryType; -051import org.apache.hadoop.hbase.NotServingRegionException; -052import org.apache.hadoop.hbase.ProcedureInfo; -053import org.apache.hadoop.hbase.RegionLocations; -054import org.apache.hadoop.hbase.ServerName; -055import org.apache.hadoop.hbase.NamespaceDescriptor; -056import org.apache.hadoop.hbase.HConstants; -057import org.apache.hadoop.hbase.TableExistsException; -058import org.apache.hadoop.hbase.TableName; -059import org.apache.hadoop.hbase.AsyncMetaTableAccessor; -060import org.apache.hadoop.hbase.TableNotDisabledException; -061import org.apache.hadoop.hbase.TableNotEnabledException; -062import org.apache.hadoop.hbase.TableNotFoundException; -063import org.apache.hadoop.hbase.UnknownRegionException; -064import org.apache.hadoop.hbase.classification.InterfaceAudience; -065import org.apache.hadoop.hbase.classification.InterfaceStability; -066import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.AdminRequestCallerBuilder; -067import org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory.MasterRequestCallerBuilder; -068import org.apache.hadoop.hbase.client.Scan.ReadType; -069import org.apache.hadoop.hbase.client.replication.ReplicationSerDeHelper; -070import org.apache.hadoop.hbase.client.replication.TableCFs; -071import org.apache.hadoop.hbase.exceptions.DeserializationException; -072import org.apache.hadoop.hbase.ipc.HBaseRpcController; -073import org.apache.hadoop.hbase.quotas.QuotaFilter; -074import org.apache.hadoop.hbase.quotas.QuotaSettings; -075import org.apache.hadoop.hbase.quotas.QuotaTableUtil; -076import org.apache.hadoop.hbase.replication.ReplicationException; -077import org.apache.hadoop.hbase.replication.ReplicationPeerConfig; -078import org.apache.hadoop.hbase.replication.ReplicationPeerDescription; -079import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback; -080import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -081import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; -082import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; -083import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionRequest; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse; -085import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionRequest; -086import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CompactRegionResponse; -087import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; -088import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; -089import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionRequest; -090import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetOnlineRegionResponse; -091import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionRequest; -092import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.SplitRegionResponse; -093import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ProcedureDescription; -094import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.TableSchema; -095import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureRequest; -096import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AbortProcedureResponse; -097import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnRequest; -098import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AddColumnResponse; -099import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.AssignRegionRequest; -100import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/a719cd00/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html index 6de986f..c895448 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/master/balancer/StochasticLoadBalancer.RegionReplicaCandidateGenerator.html @@ -26,1592 +26,1693 @@ 018package org.apache.hadoop.hbase.master.balancer; 019 020import java.util.ArrayDeque; -021import java.util.Arrays; -022import java.util.Collection; -023import java.util.Deque; -024import java.util.HashMap; -025import java.util.LinkedList; -026import java.util.List; -027import java.util.Map; -028import java.util.Map.Entry; -029import java.util.Random; -030 -031import org.apache.commons.logging.Log; -032import org.apache.commons.logging.LogFactory; -033import org.apache.hadoop.conf.Configuration; -034import org.apache.hadoop.hbase.ClusterStatus; -035import org.apache.hadoop.hbase.HBaseInterfaceAudience; -036import org.apache.hadoop.hbase.HConstants; -037import org.apache.hadoop.hbase.HRegionInfo; -038import org.apache.hadoop.hbase.RegionLoad; -039import org.apache.hadoop.hbase.ServerLoad; -040import org.apache.hadoop.hbase.ServerName; -041import org.apache.hadoop.hbase.TableName; -042import org.apache.hadoop.hbase.classification.InterfaceAudience; -043import org.apache.hadoop.hbase.master.MasterServices; -044import org.apache.hadoop.hbase.master.RegionPlan; -045import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; -046import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; -047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; -048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; -049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; -050import org.apache.hadoop.hbase.util.Bytes; -051import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -052 -053import com.google.common.collect.Lists; -054 -055/** -056 * pThis is a best effort load balancer. Given a Cost function F(C) =gt; x It will -057 * randomly try and mutate the cluster to Cprime. If F(Cprime) lt; F(C) then the -058 * new cluster state becomes the plan. It includes costs functions to compute the cost of:/p -059 * ul -060 * liRegion Load/li -061 * liTable Load/li -062 * liData Locality/li -063 * liMemstore Sizes/li -064 * liStorefile Sizes/li -065 * /ul -066 * -067 * -068 * pEvery cost function returns a number between 0 and 1 inclusive; where 0 is the lowest cost -069 * best solution, and 1 is the highest possible cost and the worst solution. The computed costs are -070 * scaled by their respective multipliers:/p +021import java.util.ArrayList; +022import java.util.Arrays; +023import java.util.Collection; +024import java.util.Collections; +025import java.util.Deque; +026import java.util.HashMap; +027import java.util.LinkedList; +028import java.util.List; +029import java.util.Map; +030import java.util.Map.Entry; +031import java.util.Random; +032 +033import org.apache.commons.logging.Log; +034import org.apache.commons.logging.LogFactory; +035import org.apache.hadoop.conf.Configuration; +036import org.apache.hadoop.hbase.ClusterStatus; +037import org.apache.hadoop.hbase.HBaseInterfaceAudience; +038import org.apache.hadoop.hbase.HConstants; +039import org.apache.hadoop.hbase.HRegionInfo; +040import org.apache.hadoop.hbase.RegionLoad; +041import org.apache.hadoop.hbase.ServerLoad; +042import org.apache.hadoop.hbase.ServerName; +043import org.apache.hadoop.hbase.TableName; +044import org.apache.hadoop.hbase.classification.InterfaceAudience; +045import org.apache.hadoop.hbase.master.MasterServices; +046import org.apache.hadoop.hbase.master.RegionPlan; +047import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action; +048import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.Action.Type; +049import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.AssignRegionAction; +050import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.LocalityType; +051import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.MoveRegionAction; +052import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer.Cluster.SwapRegionsAction; +053import org.apache.hadoop.hbase.util.Bytes; +054import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; +055 +056import
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/476c54ed/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html -- diff --git a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html index 4b85756..4262d4d 100644 --- a/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html +++ b/devapidocs/src-html/org/apache/hadoop/hbase/regionserver/HStore.StoreFlusherImpl.html @@ -48,2512 +48,2526 @@ 040import java.util.concurrent.atomic.AtomicBoolean; 041import java.util.concurrent.locks.ReentrantLock; 042import java.util.concurrent.locks.ReentrantReadWriteLock; -043 -044import org.apache.commons.logging.Log; -045import org.apache.commons.logging.LogFactory; -046import org.apache.hadoop.conf.Configuration; -047import org.apache.hadoop.fs.FileSystem; -048import org.apache.hadoop.fs.Path; -049import org.apache.hadoop.hbase.Cell; -050import org.apache.hadoop.hbase.CellComparator; -051import org.apache.hadoop.hbase.CellUtil; -052import org.apache.hadoop.hbase.CompoundConfiguration; -053import org.apache.hadoop.hbase.HColumnDescriptor; -054import org.apache.hadoop.hbase.HConstants; -055import org.apache.hadoop.hbase.HRegionInfo; -056import org.apache.hadoop.hbase.MemoryCompactionPolicy; -057import org.apache.hadoop.hbase.TableName; -058import org.apache.hadoop.hbase.backup.FailedArchiveException; -059import org.apache.hadoop.hbase.classification.InterfaceAudience; -060import org.apache.hadoop.hbase.client.Scan; -061import org.apache.hadoop.hbase.conf.ConfigurationManager; -062import org.apache.hadoop.hbase.io.compress.Compression; -063import org.apache.hadoop.hbase.io.crypto.Encryption; -064import org.apache.hadoop.hbase.io.hfile.CacheConfig; -065import org.apache.hadoop.hbase.io.hfile.HFile; -066import org.apache.hadoop.hbase.io.hfile.HFileContext; -067import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder; -068import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoder; -069import org.apache.hadoop.hbase.io.hfile.HFileDataBlockEncoderImpl; -070import org.apache.hadoop.hbase.io.hfile.HFileScanner; -071import org.apache.hadoop.hbase.io.hfile.InvalidHFileException; -072import org.apache.hadoop.hbase.monitoring.MonitoredTask; -073import org.apache.hadoop.hbase.regionserver.compactions.CompactionContext; -074import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress; -075import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest; -076import org.apache.hadoop.hbase.regionserver.compactions.DefaultCompactor; -077import org.apache.hadoop.hbase.regionserver.compactions.OffPeakHours; -078import org.apache.hadoop.hbase.regionserver.querymatcher.ScanQueryMatcher; -079import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController; -080import org.apache.hadoop.hbase.regionserver.wal.WALUtil; -081import org.apache.hadoop.hbase.security.EncryptionUtil; -082import org.apache.hadoop.hbase.security.User; -083import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -084import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.CompactionDescriptor; -085import org.apache.hadoop.hbase.util.Bytes; -086import org.apache.hadoop.hbase.util.ChecksumType; -087import org.apache.hadoop.hbase.util.ClassSize; -088import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -089import org.apache.hadoop.hbase.util.Pair; -090import org.apache.hadoop.hbase.util.ReflectionUtils; -091import org.apache.hadoop.util.StringUtils; -092import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; -093 -094import com.google.common.annotations.VisibleForTesting; -095import com.google.common.base.Preconditions; -096import com.google.common.collect.ImmutableCollection; -097import com.google.common.collect.ImmutableList; -098import com.google.common.collect.Lists; -099import com.google.common.collect.Sets; -100 -101/** -102 * A Store holds a column family in a Region. Its a memstore and a set of zero -103 * or more StoreFiles, which stretch backwards over time. -104 * -105 * pThere's no reason to consider append-logging at this level; all logging -106 * and locking is handled at the HRegion level. Store just provides -107 * services to manage sets of StoreFiles. One of the most important of those -108 * services is compaction services where files are aggregated once they pass -109 * a configurable threshold. -110 * -111 * pLocking and transactions are handled at a higher level. This API should -112 * not be called directly but by an HRegion manager. -113 */ -114@InterfaceAudience.Private -115public class HStore implements Store { -116 public static final String MEMSTORE_CLASS_NAME = "hbase.regionserver.memstore.class"; -117 public static final String
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/77a552c4/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html index 790c1b9..2e92f44 100644 --- a/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html +++ b/devapidocs/org/apache/hadoop/hbase/client/TableDescriptorBuilder.html @@ -18,7 +18,7 @@ catch(err) { } //--> -var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":10,"i6":10,"i7":9,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":42,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9}; +var methods = {"i0":10,"i1":10,"i2":10,"i3":10,"i4":10,"i5":9,"i6":10,"i7":9,"i8":9,"i9":9,"i10":10,"i11":10,"i12":10,"i13":10,"i14":10,"i15":10,"i16":10,"i17":10,"i18":10,"i19":10,"i20":10,"i21":10,"i22":42,"i23":42,"i24":10,"i25":10,"i26":10,"i27":10,"i28":10,"i29":10,"i30":10,"i31":9}; var tabs = {65535:["t0","All Methods"],1:["t1","Static Methods"],2:["t2","Instance Methods"],8:["t4","Concrete Methods"],32:["t6","Deprecated Methods"]}; var altColor = "altColor"; var rowColor = "rowColor"; @@ -110,7 +110,7 @@ var activeTableTab = "activeTableTab"; @InterfaceAudience.Public -public class TableDescriptorBuilder +public class TableDescriptorBuilder extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html?is-external=true; title="class or interface in java.lang">Object @@ -227,13 +227,13 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? DURABILITY_KEY -private static Bytes -FALSE - - static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String FLUSH_POLICY + +private static Bytes +FLUSH_POLICY_KEY + static http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String IS_META @@ -246,7 +246,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? IS_META_KEY -private static org.apache.commons.logging.Log +static org.apache.commons.logging.Log LOG @@ -361,7 +361,7 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? private static Bytes -TRUE +SPLIT_POLICY_KEY @@ -403,70 +403,70 @@ extends http://docs.oracle.com/javase/8/docs/api/java/lang/Object.html? TableDescriptorBuilder -addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringclassName) +addColumnFamily(ColumnFamilyDescriptorfamily) TableDescriptorBuilder +addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringclassName) + + +TableDescriptorBuilder addCoprocessor(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringclassName, org.apache.hadoop.fs.PathjarFilePath, intpriority, http://docs.oracle.com/javase/8/docs/api/java/util/Map.html?is-external=true; title="class or interface in java.util">Maphttp://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">String,http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">Stringkvs) - -TableDescriptorBuilder -addCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringspecStr) - TableDescriptorBuilder -addFamily(HColumnDescriptorfamily) +addCoprocessorWithSpec(http://docs.oracle.com/javase/8/docs/api/java/lang/String.html?is-external=true; title="class or interface in java.lang">StringspecStr) -HTableDescriptor +TableDescriptor build() -private TableDescriptor -doBuild() +static TableDescriptor +copy(TableDescriptordesc) TableDescriptorBuilder -modifyFamily(HColumnDescriptorfamily) +modifyColumnFamily(ColumnFamilyDescriptorfamily) static TableDescriptorBuilder -newBuilder(byte[]pbBytes) -The input should be created by toByteArray(org.apache.hadoop.hbase.client.TableDescriptor). - - - -static TableDescriptorBuilder newBuilder(TableDescriptordesc) Copy all configuration, values, families, and name from the input. - + static TableDescriptorBuilder newBuilder(TableNamename) + +static TableDescriptor +parseFrom(byte[]pbBytes) +The input should be created by toByteArray(org.apache.hadoop.hbase.client.TableDescriptor). + + TableDescriptorBuilder -remove(byte[]key) +removeColumnFamily(byte[]name)
[13/51] [partial] hbase-site git commit: Published site at 82d554e3783372cc6b05489452c815b57c06f6cd.
http://git-wip-us.apache.org/repos/asf/hbase-site/blob/b44796ef/devapidocs/org/apache/hadoop/hbase/backup/impl/class-use/BackupCommands.ProgressCommand.html -- diff --git a/devapidocs/org/apache/hadoop/hbase/backup/impl/class-use/BackupCommands.ProgressCommand.html b/devapidocs/org/apache/hadoop/hbase/backup/impl/class-use/BackupCommands.ProgressCommand.html index 288e5e8..34f842a 100644 --- a/devapidocs/org/apache/hadoop/hbase/backup/impl/class-use/BackupCommands.ProgressCommand.html +++ b/devapidocs/org/apache/hadoop/hbase/backup/impl/class-use/BackupCommands.ProgressCommand.html @@ -4,7 +4,7 @@ -Uses of Class org.apache.hadoop.hbase.backup.impl.BackupCommands.ProgressCommand (Apache HBase 2.0.0-SNAPSHOT API) +Uses of Class org.apache.hadoop.hbase.backup.impl.BackupCommands.ProgressCommand (Apache HBase 3.0.0-SNAPSHOT API) @@ -12,7 +12,7 @@ -Uses of Class org.apache.hadoop.hbase.backup.impl.BackupCommands.RepairCommand (Apache HBase 2.0.0-SNAPSHOT API) +Uses of Class org.apache.hadoop.hbase.backup.impl.BackupCommands.RepairCommand (Apache HBase 3.0.0-SNAPSHOT API)