swamirishi commented on code in PR #8774:
URL: https://github.com/apache/ozone/pull/8774#discussion_r2212778747
##########
hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java:
##########
@@ -186,68 +444,65 @@ void clear() {
final boolean warn = !isCommit && batchSize > 0;
String details = warn ? summary() : null;
- for (Object value : ops.values()) {
- if (value instanceof CodecBuffer) {
- ((CodecBuffer) value).release(); // the key will also be released
- }
- }
- ops.clear();
+ IOUtils.close(LOG, batchOps.values());
+ batchOps.clear();
if (warn) {
LOG.warn("discarding changes {}", details);
}
}
- void putOrDelete(Bytes key, int keyLen, Object val, int valLen) {
- Preconditions.checkState(!isCommit, "%s is already committed.", this);
- batchSize += keyLen + valLen;
+ private void deleteIfExist(Bytes key, boolean removeFromIndexMap) {
// remove previous first in order to call release()
- final Object previous = ops.remove(key);
- if (previous != null) {
- final boolean isPut = previous != Op.DELETE;
- final int preLen;
- if (!isPut) {
- preLen = 0;
- } else if (previous instanceof CodecBuffer) {
- final CodecBuffer previousValue = (CodecBuffer) previous;
- preLen = previousValue.readableBytes();
- previousValue.release(); // key will also be released
- } else if (previous instanceof byte[]) {
- preLen = ((byte[]) previous).length;
- } else {
- throw new IllegalStateException("Unexpected previous: " + previous
- + ", class=" + previous.getClass().getSimpleName());
- }
- discardedSize += keyLen + preLen;
+ if (opsKeys.containsKey(key)) {
+ int previousIndex = removeFromIndexMap ? opsKeys.remove(key) :
opsKeys.get(key);
+ final Operation previous = batchOps.remove(previousIndex);
+ previous.close();
+ discardedSize += previous.totalLength();
discardedCount++;
- debug(() -> String.format("%s overwriting a previous %s", this,
- isPut ? "put (value: " + byteSize2String(preLen) + ")" : "del"));
+ debug(() -> String.format("%s overwriting a previous %s[valLen =>
%s]", this, previous.getOpType(),
+ previous.valLen()));
}
- final Object overwritten = ops.put(key, val);
- Preconditions.checkState(overwritten == null);
+ }
+ int overWriteOpIfExist(Bytes key, Operation operation) {
+ Preconditions.checkState(!isCommit, "%s is already committed.", this);
+ deleteIfExist(key, true);
+ batchSize += operation.totalLength();
+ int newIndex = opIndex.getAndIncrement();
+ final Integer overwritten = opsKeys.put(key, newIndex);
+ batchOps.put(newIndex, operation);
+ Preconditions.checkState(overwritten == null ||
!batchOps.containsKey(overwritten));
debug(() -> String.format("%s %s, %s; key=%s", this,
- valLen == 0 ? delString(keyLen) : putString(keyLen, valLen),
+ Op.DELETE == operation.getOpType() ?
delString(operation.totalLength()) : putString(operation.keyLen(),
+ operation.valLen()),
batchSizeDiscardedString(), key));
+ return newIndex;
}
void put(CodecBuffer key, CodecBuffer value) {
putCount++;
// always release the key with the value
- value.getReleaseFuture().thenAccept(v -> key.release());
- putOrDelete(new Bytes(key), key.readableBytes(),
- value, value.readableBytes());
+ Bytes keyBytes = new Bytes(key);
+ overWriteOpIfExist(keyBytes, new CodecBufferPutOperation(key, value,
keyBytes));
}
void put(byte[] key, byte[] value) {
putCount++;
- putOrDelete(new Bytes(key), key.length, value, value.length);
+ Bytes keyBytes = new Bytes(key);
+ overWriteOpIfExist(keyBytes, new ByteArrayPutOperation(key, value,
keyBytes));
}
void delete(byte[] key) {
delCount++;
- putOrDelete(new Bytes(key), key.length, Op.DELETE, 0);
+ Bytes keyBytes = new Bytes(key);
+ overWriteOpIfExist(keyBytes, new DeleteOperation(key, keyBytes));
+ }
+
+ void deleteRange(byte[] startKey, byte[] endKey) {
+ delRangeCount++;
Review Comment:
It would make deleteRange operation not performant. The initial patch had
TreeMap which will perform deleteRange efficiently with O(log(N)) in a BST. But
because of that put, deletes would suffer O(1) vs O(log(n)) in hashMap and
TreeMap. Instead of optimizing deleteRange which is going to be a lesser
frequent operation than individual puts and delete. I made it HashMap and I am
just taking a hit on commit where I sort Keys on the basis of sequence numbers
which is going to be much faster with use of more memory.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]