This is an automated email from the ASF dual-hosted git repository.
zhangduo pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hbase.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new 68bc533f711 HBASE-28255 Correcting spelling errors or annotations with
non-standard spelling (#5577)
68bc533f711 is described below
commit 68bc533f7116cedc681704b82319e5793b827621
Author: Sigma <[email protected]>
AuthorDate: Thu Jan 25 17:07:30 2024 +0800
HBASE-28255 Correcting spelling errors or annotations with non-standard
spelling (#5577)
Co-authored-by: mazhengxuan <[email protected]>
Co-authored-by: Duo Zhang <[email protected]>
Signed-off-by: Duo Zhang <[email protected]>
(cherry picked from commit caa6aed287c92abd1b630f56e02a2953172373a8)
---
.../java/org/apache/hadoop/hbase/Abortable.java | 2 +-
.../apache/hadoop/hbase/ipc/AbstractRpcClient.java | 2 +-
.../example/ZooKeeperScanPolicyObserver.java | 2 +-
.../procedure2/AbstractProcedureScheduler.java | 2 +-
.../master/replication/AbstractPeerProcedure.java | 2 +-
.../hbase/regionserver/AbstractMemStore.java | 2 +-
.../regionserver/AbstractMultiFileWriter.java | 2 +-
.../hbase/regionserver/wal/AbstractFSWAL.java | 31 +++++++++++-----------
.../wal/AbstractProtobufWALReader.java | 12 ++++-----
.../hadoop/hbase/regionserver/wal/AsyncFSWAL.java | 16 +++++------
10 files changed, 36 insertions(+), 37 deletions(-)
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java
index b9736d57345..b0a5a86d50b 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java
@@ -37,7 +37,7 @@ public interface Abortable {
void abort(String why, Throwable e);
/**
- * It just call another abort method and the Throwable parameter is null.
+ * It just calls another abort method and the Throwable parameter is null.
* @param why Why we're aborting.
* @see Abortable#abort(String, Throwable)
*/
diff --git
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
index fcded9f5b69..5926539d067 100644
---
a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
+++
b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java
@@ -209,7 +209,7 @@ public abstract class AbstractRpcClient<T extends
RpcConnection> implements RpcC
for (T conn : connections.values()) {
// Remove connection if it has not been chosen by anyone for more than
maxIdleTime, and the
// connection itself has already shutdown. The latter check is because
we may still
- // have some pending calls on connection so we should not shutdown the
connection outside.
+ // have some pending calls on connection, so we should not shut down
the connection outside.
// The connection itself will disconnect if there is no pending call
for maxIdleTime.
if (conn.getLastTouched() < closeBeforeTime && !conn.isActive()) {
if (LOG.isTraceEnabled()) {
diff --git
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
index 53288be872e..fa7ccf73736 100644
---
a/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
+++
b/hbase-examples/src/main/java/org/apache/hadoop/hbase/coprocessor/example/ZooKeeperScanPolicyObserver.java
@@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.yetus.audience.InterfaceAudience;
/**
- * This is an example showing how a RegionObserver could configured via
ZooKeeper in order to
+ * This is an example showing how a RegionObserver could be configured via
ZooKeeper in order to
* control a Region compaction, flush, and scan policy. This also demonstrated
the use of shared
* {@link org.apache.hadoop.hbase.coprocessor.RegionObserver} state. See
* {@link RegionCoprocessorEnvironment#getSharedData()}.
diff --git
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
index 34fc1594702..29f5730bc66 100644
---
a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
+++
b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java
@@ -236,7 +236,7 @@ public abstract class AbstractProcedureScheduler implements
ProcedureScheduler {
// ==========================================================================
/**
- * Wake up all of the given events. Note that we first take scheduler lock
and then wakeInternal()
+ * Wake up all the given events. Note that we first take scheduler lock and
then wakeInternal()
* synchronizes on the event. Access should remain package-private. Use
ProcedureEvent class to
* wake/suspend events.
* @param events the list of events to wake
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
index 64896cae497..ff73284feca 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/replication/AbstractPeerProcedure.java
@@ -36,7 +36,7 @@ public abstract class AbstractPeerProcedure<TState>
protected String peerId;
- // used to keep compatible with old client where we can only returns after
updateStorage.
+ // used to keep compatible with old client where we can only return after
updateStorage.
protected ProcedurePrepareLatch latch;
protected AbstractPeerProcedure() {
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
index 5cd3a92e5b6..62ff6f9a92f 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMemStore.java
@@ -157,7 +157,7 @@ public abstract class AbstractMemStore implements MemStore {
Cell toAdd = maybeCloneWithAllocator(currentActive, cell, false);
boolean mslabUsed = (toAdd != cell);
// This cell data is backed by the same byte[] where we read request in
RPC(See
- // HBASE-15180). By default MSLAB is ON and we might have copied cell to
MSLAB area. If
+ // HBASE-15180). By default, MSLAB is ON and we might have copied cell to
MSLAB area. If
// not we must do below deep copy. Or else we will keep referring to the
bigger chunk of
// memory and prevent it from getting GCed.
// Copy to MSLAB would not have happened if
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
index a02b05f66ba..6370d6a79cc 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/AbstractMultiFileWriter.java
@@ -64,7 +64,7 @@ public abstract class AbstractMultiFileWriter implements
CellSink, ShipperListen
* Commit all writers.
* <p>
* Notice that here we use the same <code>maxSeqId</code> for all output
files since we haven't
- * find an easy to find enough sequence ids for different output files in
some corner cases. See
+ * found an easy to find enough sequence ids for different output files in
some corner cases. See
* comments in HBASE-15400 for more details.
*/
public List<Path> commitWriters(long maxSeqId, boolean majorCompaction)
throws IOException {
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index 0beffbd1dd7..dc244d7abac 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -225,8 +225,8 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
protected final long blocksize;
/*
- * If more than this many logs, force flush of oldest region to oldest edit
goes to disk. If too
- * many and we crash, then will take forever replaying. Keep the number of
logs tidy.
+ * If more than this many logs, force flush of oldest region to the oldest
edit goes to disk. If
+ * too many and we crash, then will take forever replaying. Keep the number
of logs tidy.
*/
protected final int maxLogs;
@@ -300,12 +300,12 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
/**
* The log file size. Notice that the size may not be accurate if we do
asynchronous close in
- * sub classes.
+ * subclasses.
*/
private final long logSize;
/**
- * If we do asynchronous close in sub classes, it is possible that when
adding WALProps to the
+ * If we do asynchronous close in subclasses, it is possible that when
adding WALProps to the
* rolled map, the file is not closed yet, so in cleanOldLogs we should
not archive this file,
* for safety.
*/
@@ -684,7 +684,7 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
* If the number of un-archived WAL files ('live' WALs) is greater than
maximum allowed, check the
* first (oldest) WAL, and return those regions which should be flushed so
that it can be
* let-go/'archived'.
- * @return stores of regions (encodedRegionNames) to flush in order to
archive oldest WAL file.
+ * @return stores of regions (encodedRegionNames) to flush in order to
archive the oldest WAL file
*/
Map<byte[], List<byte[]>> findRegionsToForceFlush() throws IOException {
Map<byte[], List<byte[]>> regions = null;
@@ -735,7 +735,7 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
*/
private synchronized void cleanOldLogs() {
List<Pair<Path, Long>> logsToArchive = null;
- // For each log file, look at its Map of regions to highest sequence id;
if all sequence ids
+ // For each log file, look at its Map of regions to the highest sequence
id; if all sequence ids
// are older than what is currently in memory, the WAL can be GC'd.
for (Map.Entry<Path, WALProps> e : this.walFile2Props.entrySet()) {
if (!e.getValue().closed) {
@@ -906,7 +906,7 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
try {
Path oldPath = getOldPath();
Path newPath = getNewPath();
- // Any exception from here on is catastrophic, non-recoverable so we
currently abort.
+ // Any exception from here on is catastrophic, non-recoverable, so we
currently abort.
W nextWriter = this.createWriterInstance(newPath);
tellListenersAboutPreLogRoll(oldPath, newPath);
// NewPath could be equal to oldPath if replaceWriter fails.
@@ -925,7 +925,7 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
regionsToFlush = findRegionsToForceFlush();
}
} catch (CommonFSUtils.StreamLacksCapabilityException exception) {
- // If the underlying FileSystem can't do what we ask, treat as IO
failure so
+ // If the underlying FileSystem can't do what we ask, treat as IO
failure, so
// we'll abort.
throw new IOException(
"Underlying FileSystem can't meet stream requirements. See RS log "
+ "for details.",
@@ -1013,10 +1013,9 @@ public abstract class AbstractFSWAL<W extends
WriterBase> implements WAL {
throw new IOException(e.getCause());
}
} finally {
- // in shutdown we may call cleanOldLogs so shutdown this executor in the
end.
- // In sync replication implementation, we may shutdown a WAL without
shutting down the whole
- // region server, if we shutdown this executor earlier we may get reject
execution exception
- // and abort the region server
+ // in shutdown, we may call cleanOldLogs so shutdown this executor in
the end.
+ // In sync replication implementation, we may shut down a WAL without
shutting down the whole
+ // region server, if we shut down this executor earlier we may get
reject execution exception
logArchiveExecutor.shutdown();
}
// we also need to wait logArchive to finish if we want to a graceful
shutdown as we may still
@@ -1270,12 +1269,12 @@ public abstract class AbstractFSWAL<W extends
WriterBase> implements WAL {
* have its region edit/sequence id assigned else it messes up our
unification of mvcc and
* sequenceid. On return <code>key</code> will have the region edit/sequence
id filled in.
* <p/>
- * NOTE: This append, at a time that is usually after this call returns,
starts an mvcc
+ * NOTE: This appends, at a time that is usually after this call returns,
starts a mvcc
* transaction by calling 'begin' wherein which we assign this update a
sequenceid. At assignment
* time, we stamp all the passed in Cells inside WALEdit with their
sequenceId. You must
* 'complete' the transaction this mvcc transaction by calling
* MultiVersionConcurrencyControl#complete(...) or a variant otherwise mvcc
will get stuck. Do it
- * in the finally of a try/finally block within which this append lives and
any subsequent
+ * in the finally of a try/finally block within which this appends lives and
any subsequent
* operations like sync or update of memstore, etc. Get the WriteEntry to
pass mvcc out of the
* passed in WALKey <code>walKey</code> parameter. Be warned that the
WriteEntry is not
* immediately available on return from this method. It WILL be available
subsequent to a sync of
@@ -1305,7 +1304,7 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
* Notice that you need to clear the {@link #rollRequested} flag in this
method, as the new writer
* will begin to work before returning from this method. If we clear the
flag after returning from
* this call, we may miss a roll request. The implementation class should
choose a proper place to
- * clear the {@link #rollRequested} flag so we do not miss a roll request,
typically before you
+ * clear the {@link #rollRequested} flag, so we do not miss a roll request,
typically before you
* start writing to the new writer.
*/
protected abstract void doReplaceWriter(Path oldPath, Path newPath, W
nextWriter)
@@ -1411,7 +1410,7 @@ public abstract class AbstractFSWAL<W extends WriterBase>
implements WAL {
}
/**
- * Pass one or more log file names and it will either dump out a text
version on
+ * Pass one or more log file names, and it will either dump out a text
version on
* <code>stdout</code> or split the specified log files.
*/
public static void main(String[] args) throws IOException {
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java
index f5e65e08c84..5d51750ba5f 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractProtobufWALReader.java
@@ -121,7 +121,7 @@ public abstract class AbstractProtobufWALReader
* Get or create the input stream used by cell decoder.
* <p/>
* For implementing replication, we may need to limit the bytes we can read,
so here we provide a
- * method so sub classes can wrap the original input stream.
+ * method so subclasses can wrap the original input stream.
*/
protected abstract InputStream getCellCodecInputStream(FSDataInputStream
stream);
@@ -366,7 +366,7 @@ public abstract class AbstractProtobufWALReader
this.fileLength = stat.getLen();
this.walEditsStopOffset = this.fileLength;
long currentPos = stream.getPos();
- // we will reset walEditsStopOffset if trailer if available
+ // we will reset walEditsStopOffset if trailer is available
trailerPresent = setTrailerIfPresent(stream);
if (currentPos != stream.getPos()) {
// seek back
@@ -509,18 +509,18 @@ public abstract class AbstractProtobufWALReader
* This is used to determine whether we have already reached the WALTrailer.
As the size and magic
* are at the end of the WAL file, it is possible that these two options are
missing while
* writing, so we will consider there is no trailer. And when we actually
reach the WALTrailer, we
- * will try to decode it as WALKey and we will fail but the error could be
vary as it is parsing
+ * will try to decode it as WALKey and we will fail but the error could be
varied as it is parsing
* WALTrailer actually.
* @return whether this is a WALTrailer and we should throw EOF to upper
layer the file is done
*/
protected final boolean isWALTrailer(long startPosition) throws IOException {
- // We have nothing in the WALTrailer PB message now so its size is just a
int length size and a
+ // We have nothing in the WALTrailer PB message now so its size is just an
int length size and a
// magic at the end
int trailerSize = PB_WAL_COMPLETE_MAGIC.length + Bytes.SIZEOF_INT;
if (fileLength - startPosition >= trailerSize) {
// We still have more than trailerSize bytes before reaching the EOF so
this is not a trailer.
// We also test for == here because if this is a valid trailer, we can
read it while opening
- // the reader so we should not reach here
+ // the reader, so we should not reach here
return false;
}
inputStream.seek(startPosition);
@@ -548,7 +548,7 @@ public abstract class AbstractProtobufWALReader
return false;
}
}
- // in fact we should not reach here, as this means the trailer bytes are
all matched and
+ // in fact, we should not reach here, as this means the trailer bytes are
all matched and
// complete, then we should not call this method...
return true;
}
diff --git
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index c1e6c1b6907..69d5cba244d 100644
---
a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++
b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -157,9 +157,9 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
private final Supplier<Boolean> hasConsumerTask;
private static final int MAX_EPOCH = 0x3FFFFFFF;
- // the lowest bit is waitingRoll, which means new writer is created and we
are waiting for old
+ // the lowest bit is waitingRoll, which means new writer is created, and we
are waiting for old
// writer to be closed.
- // the second lowest bit is writerBroken which means the current writer is
broken and rollWriter
+ // the second-lowest bit is writerBroken which means the current writer is
broken and rollWriter
// is needed.
// all other bits are the epoch number of the current writer, this is used
to detect whether the
// writer is still the one when you issue the sync.
@@ -281,8 +281,8 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
// return whether we have successfully set readyForRolling to true.
private boolean trySetReadyForRolling() {
// Check without holding lock first. Usually we will just return here.
- // waitingRoll is volatile and unacedEntries is only accessed inside event
loop so it is safe to
- // check them outside the consumeLock.
+ // waitingRoll is volatile and unacedEntries is only accessed inside event
loop, so it is safe
+ // to check them outside the consumeLock.
if (!waitingRoll(epochAndState) || !unackedAppends.isEmpty()) {
return false;
}
@@ -343,13 +343,13 @@ public class AsyncFSWAL extends
AbstractFSWAL<AsyncWriter> {
// changed, i.e, we have already rolled the writer, or the writer is
already broken, we should
// just skip here, to avoid mess up the state or accidentally release some
WAL entries and
// cause data corruption.
- // The syncCompleted call is on the critical write path so we should try
our best to make it
+ // The syncCompleted call is on the critical write path, so we should try
our best to make it
// fast. So here we do not hold consumeLock, for increasing performance.
It is safe because
// there are only 3 possible situations:
// 1. For normal case, the only place where we change epochAndState is
when rolling the writer.
// Before rolling actually happen, we will only change the state to
waitingRoll which is another
// bit than writerBroken, and when we actually change the epoch, we can
make sure that there is
- // no out going sync request. So we will always pass the check here and
there is no problem.
+ // no outgoing sync request. So we will always pass the check here and
there is no problem.
// 2. The writer is broken, but we have not called syncFailed yet. In this
case, since
// syncFailed and syncCompleted are executed in the same thread, we will
just face the same
// situation with #1.
@@ -541,7 +541,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
}
if (writer.getLength() == fileLengthAtLastSync) {
// we haven't written anything out, just advance the
highestSyncedSequence since we may only
- // stamped some region sequence id.
+ // stamp some region sequence id.
if (unackedAppends.isEmpty()) {
highestSyncedTxid.set(highestProcessedAppendTxid);
finishSync();
@@ -549,7 +549,7 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
}
return;
}
- // reach here means that we have some unsynced data but haven't reached
the batch size yet
+ // reach here means that we have some unsynced data but haven't reached
the batch size yet,
// but we will not issue a sync directly here even if there are sync
requests because we may
// have some new data in the ringbuffer, so let's just return here and
delay the decision of
// whether to issue a sync in the caller method.