This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch HDDS-13513_Event_Notification_FeatureBranch
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to
refs/heads/HDDS-13513_Event_Notification_FeatureBranch by this push:
new 01b2b2a7101 HDDS-14003. EventNotification: Added protobufs and entity
class (#9363)
01b2b2a7101 is described below
commit 01b2b2a71012cae73a84228c4974172a4c6798d5
Author: gardenia <[email protected]>
AuthorDate: Wed Feb 25 06:28:00 2026 +0000
HDDS-14003. EventNotification: Added protobufs and entity class (#9363)
Co-authored-by: Colm Dougan <[email protected]>
---
.../ozone/om/helpers/OmCompletedRequestInfo.java | 464 +++++++++++++++++++++
.../src/main/proto/OmClientProtocol.proto | 29 ++
.../apache/hadoop/ozone/om/OMMetadataManager.java | 3 +
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 9 +
.../hadoop/ozone/om/codec/OMDBDefinition.java | 11 +-
.../ozone/om/TestOmCompletedRequestInfo.java | 220 ++++++++++
.../hadoop/ozone/om/TestOmMetadataManager.java | 4 +-
7 files changed, 738 insertions(+), 2 deletions(-)
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmCompletedRequestInfo.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmCompletedRequestInfo.java
new file mode 100644
index 00000000000..0b07d7a9250
--- /dev/null
+++
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmCompletedRequestInfo.java
@@ -0,0 +1,464 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.helpers;
+
+import java.util.Objects;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.hdds.utils.db.CopyObject;
+import org.apache.hadoop.hdds.utils.db.DelegatedCodec;
+import org.apache.hadoop.hdds.utils.db.Proto2Codec;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * This class is used for storing info related to completed operations.
+ * These are the subset of operations which mutate the filesystem such
+ * as create volume/bucket/key/file/dir and associated delete/rename
+ * events where appropriate.
+ *
+ * The initial use case for this data is to create an event notification
+ * feed which users can subscribe to and get a history of such changes
+ * but the data is designed to be provided in a generic means that could
+ * be suitable for other use cases.
+ *
+ * Each successfully completion operation has an associated
+ * OmCompletedRequestInfo entry the trxLogIndex, cmdType, volumeName,
bucketName,
+ * keyName, creationTime and operationArgs
+ */
+public final class OmCompletedRequestInfo implements
CopyObject<OmCompletedRequestInfo> {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(OmCompletedRequestInfo.class);
+
+ private static final Codec<OmCompletedRequestInfo> CODEC = new
DelegatedCodec<>(
+
Proto2Codec.get(OzoneManagerProtocolProtos.CompletedRequestInfo.getDefaultInstance()),
+ OmCompletedRequestInfo::getFromProtobuf,
+ OmCompletedRequestInfo::getProtobuf,
+ OmCompletedRequestInfo.class);
+
+ private long trxLogIndex;
+ private final Type cmdType;
+ private final String volumeName;
+ private final String bucketName;
+ private final String keyName;
+ private final long creationTime;
+ private final OperationArgs opArgs;
+
+ /**
+ * Private constructor, constructed via builder.
+ * @param trxLogIndex - trxLogIndex.
+ * @param cmdType - comand type.
+ * @param volumeName - volume name.
+ * @param bucketName - bucket name.
+ * @param keyName - key name.
+ * @param creationTime - creation time.
+ * @param opArgs - operation specifc arguments.
+ */
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ private OmCompletedRequestInfo(Builder builder) {
+ this.trxLogIndex = builder.trxLogIndex;
+ this.cmdType = builder.cmdType;
+ this.volumeName = builder.volumeName;
+ this.bucketName = builder.bucketName;
+ this.keyName = builder.keyName;
+ this.creationTime = builder.creationTime;
+ this.opArgs = builder.opArgs;
+ }
+
+ public static Codec<OmCompletedRequestInfo> getCodec() {
+ return CODEC;
+ }
+
+ public void setTrxLogIndex(long trxLogIndex) {
+ this.trxLogIndex = trxLogIndex;
+ }
+
+ public long getTrxLogIndex() {
+ return trxLogIndex;
+ }
+
+ public Type getCmdType() {
+ return cmdType;
+ }
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+
+ public String getBucketName() {
+ return bucketName;
+ }
+
+ public String getKeyName() {
+ return keyName;
+ }
+
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ public OperationArgs getOpArgs() {
+ return opArgs;
+ }
+
+ public static
org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo.Builder
+ newBuilder() {
+ return new
org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo.Builder();
+ }
+
+ public OmCompletedRequestInfo.Builder toBuilder() {
+ return new Builder()
+ .setTrxLogIndex(trxLogIndex)
+ .setCmdType(cmdType)
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setCreationTime(creationTime)
+ .setOpArgs(opArgs);
+ }
+
+ /**
+ * Builder of OmCompletedRequestInfo.
+ */
+ public static class Builder {
+ private long trxLogIndex;
+ private Type cmdType;
+ private String volumeName;
+ private String bucketName;
+ private String keyName;
+ private long creationTime;
+ private OperationArgs opArgs;
+
+ public Builder() {
+ // default values
+ }
+
+ public Builder setTrxLogIndex(long trxLogIndex) {
+ this.trxLogIndex = trxLogIndex;
+ return this;
+ }
+
+ public Builder setCmdType(Type cmdType) {
+ this.cmdType = cmdType;
+ return this;
+ }
+
+ public Builder setVolumeName(String volumeName) {
+ this.volumeName = volumeName;
+ return this;
+ }
+
+ public Builder setBucketName(String bucketName) {
+ this.bucketName = bucketName;
+ return this;
+ }
+
+ public Builder setKeyName(String keyName) {
+ this.keyName = keyName;
+ return this;
+ }
+
+ public Builder setCreationTime(long crTime) {
+ this.creationTime = crTime;
+ return this;
+ }
+
+ public Builder setOpArgs(OperationArgs opArgs) {
+ this.opArgs = opArgs;
+ return this;
+ }
+
+ public OmCompletedRequestInfo build() {
+ Objects.requireNonNull(trxLogIndex, "trxLogIndex == null");
+ Objects.requireNonNull(cmdType, "cmdType == null");
+ Objects.requireNonNull(volumeName, "volumeName == null");
+ Objects.requireNonNull(creationTime, "creationTime == null");
+ Objects.requireNonNull(opArgs, "opArgs == null");
+
+ return new OmCompletedRequestInfo(this);
+ }
+ }
+
+ /**
+ * Creates OmCompletedRequestInfo protobuf from OmCompletedRequestInfo.
+ */
+ public OzoneManagerProtocolProtos.CompletedRequestInfo getProtobuf() {
+ OzoneManagerProtocolProtos.CompletedRequestInfo.Builder sib =
+ OzoneManagerProtocolProtos.CompletedRequestInfo.newBuilder()
+ .setTrxLogIndex(trxLogIndex)
+ .setCmdType(cmdType)
+ .setVolumeName(volumeName)
+ .setCreationTime(creationTime);
+
+ // can be null e.g. CreateVolume
+ if (bucketName != null) {
+ sib.setBucketName(bucketName);
+ }
+
+ // can be null e.g. CreateBucket
+ if (keyName != null) {
+ sib.setKeyName(keyName);
+ }
+
+ switch (cmdType) {
+ case RenameKey:
+
sib.setRenameKeyArgs(OzoneManagerProtocolProtos.RenameKeyOperationArgs.newBuilder()
+ .setToKeyName(((OperationArgs.RenameKeyArgs) opArgs).getToKeyName())
+ .build());
+ break;
+ case CreateFile:
+
sib.setCreateFileArgs(OzoneManagerProtocolProtos.CreateFileOperationArgs.newBuilder()
+ .setIsRecursive(((OperationArgs.CreateFileArgs)
opArgs).isRecursive())
+ .setIsOverwrite(((OperationArgs.CreateFileArgs)
opArgs).isOverwrite())
+ .build());
+ break;
+ case CreateVolume:
+ case DeleteVolume:
+ case CreateBucket:
+ case DeleteBucket:
+ case CreateKey:
+ case DeleteKey:
+ case CommitKey:
+ case CreateDirectory:
+ break;
+ default:
+ LOG.error("Unexpected cmdType={}", cmdType);
+ break;
+ }
+
+ return sib.build();
+ }
+
+ /**
+ * Parses OmCompletedRequestInfo protobuf and creates OmCompletedRequestInfo.
+ * @param completedRequestInfoProto protobuf
+ * @return instance of OmCompletedRequestInfo
+ */
+ public static OmCompletedRequestInfo getFromProtobuf(
+ OzoneManagerProtocolProtos.CompletedRequestInfo
completedRequestInfoProto) {
+
+ OmCompletedRequestInfo.Builder osib = OmCompletedRequestInfo.newBuilder()
+ .setTrxLogIndex(completedRequestInfoProto.getTrxLogIndex())
+ .setCmdType(completedRequestInfoProto.getCmdType())
+ .setVolumeName(completedRequestInfoProto.getVolumeName())
+ .setBucketName(completedRequestInfoProto.getBucketName())
+ .setKeyName(completedRequestInfoProto.getKeyName())
+ .setCreationTime(completedRequestInfoProto.getCreationTime());
+
+ switch (completedRequestInfoProto.getCmdType()) {
+ case RenameKey:
+ OzoneManagerProtocolProtos.RenameKeyOperationArgs renameArgs
+ = (OzoneManagerProtocolProtos.RenameKeyOperationArgs)
completedRequestInfoProto.getRenameKeyArgs();
+
+ osib.setOpArgs(new
OperationArgs.RenameKeyArgs(renameArgs.getToKeyName()));
+ break;
+ case CreateFile:
+ OzoneManagerProtocolProtos.CreateFileOperationArgs createFileArgs
+ = (OzoneManagerProtocolProtos.CreateFileOperationArgs)
completedRequestInfoProto.getCreateFileArgs();
+
+ osib.setOpArgs(new
OperationArgs.CreateFileArgs(createFileArgs.getIsOverwrite(),
+
createFileArgs.getIsRecursive()));
+ break;
+ case CreateVolume:
+ case DeleteVolume:
+ case CreateBucket:
+ case DeleteBucket:
+ case CreateKey:
+ case DeleteKey:
+ case CommitKey:
+ case CreateDirectory:
+ osib.setOpArgs(new OperationArgs.NoArgs());
+ break;
+ default:
+ LOG.error("Unexpected cmdType={}",
completedRequestInfoProto.getCmdType());
+ break;
+ }
+ return osib.build();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ OmCompletedRequestInfo that = (OmCompletedRequestInfo) o;
+ return trxLogIndex == that.trxLogIndex &&
+ cmdType == that.cmdType &&
+ creationTime == that.creationTime &&
+ Objects.equals(volumeName, that.volumeName) &&
+ Objects.equals(bucketName, that.bucketName) &&
+ Objects.equals(keyName, that.keyName) &&
+ opArgs.equals(that.opArgs);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(trxLogIndex, cmdType, volumeName, bucketName,
+ keyName, creationTime, opArgs);
+ }
+
+ /**
+ * Return a new copy of the object.
+ */
+ @Override
+ public OmCompletedRequestInfo copyObject() {
+ return new Builder()
+ .setTrxLogIndex(trxLogIndex)
+ .setCmdType(cmdType)
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setCreationTime(creationTime)
+ .setOpArgs(opArgs)
+ .build();
+ }
+
+ @Override
+ public String toString() {
+ return "OmCompletedRequestInfo{" +
+ "trxLogIndex: '" + trxLogIndex + '\'' +
+ ", cmdType: '" + cmdType + '\'' +
+ ", volumeName: '" + volumeName + '\'' +
+ ", bucketName: '" + bucketName + '\'' +
+ ", keyName: '" + keyName + '\'' +
+ ", creationTime: '" + creationTime + '\'' +
+ ", opArgs : '" + opArgs + '\'' +
+ '}';
+ }
+
+ /**
+ * OperationArgs - common base class for operations specific
+ * parameters.
+ */
+ public abstract static class OperationArgs {
+
+ /**
+ * RenameKeyArgs.
+ */
+ public static class RenameKeyArgs extends OperationArgs {
+ private final String toKeyName;
+
+ public RenameKeyArgs(String toKeyName) {
+ this.toKeyName = toKeyName;
+ }
+
+ public String getToKeyName() {
+ return toKeyName;
+ }
+
+ @Override
+ public String toString() {
+ return "RenameKeyArgs{" +
+ "toKeyName: '" + toKeyName + '\'' +
+ '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ RenameKeyArgs that = (RenameKeyArgs) o;
+ return Objects.equals(toKeyName, that.toKeyName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(toKeyName);
+ }
+ }
+
+ /**
+ * CreateFileArgs.
+ */
+ public static class CreateFileArgs extends OperationArgs {
+ // hsync?
+ private final boolean recursive;
+ private final boolean overwrite;
+
+ public CreateFileArgs(boolean recursive, boolean overwrite) {
+ this.recursive = recursive;
+ this.overwrite = overwrite;
+ }
+
+ public boolean isRecursive() {
+ return recursive;
+ }
+
+ public boolean isOverwrite() {
+ return overwrite;
+ }
+
+ @Override
+ public String toString() {
+ return "CreateFileArgs{" +
+ "recursive: '" + recursive + '\'' +
+ ", overwrite: '" + overwrite + '\'' +
+ '}';
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ CreateFileArgs that = (CreateFileArgs) o;
+ return recursive == that.recursive &&
+ overwrite == that.overwrite;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(recursive, overwrite);
+ }
+ }
+
+ /**
+ * NoArgs.
+ */
+ public static class NoArgs extends OperationArgs {
+ @Override
+ public String toString() {
+ return "NoArgs{}";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ return o != null && getClass() == o.getClass();
+ }
+
+ @Override
+ public int hashCode() {
+ return 42;
+ }
+ }
+ }
+}
diff --git
a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index bdb3cc3cee3..12708817c31 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -917,6 +917,35 @@ message SnapshotDiffJobProto {
optional double keysProcessedPct = 13;
}
+/**
+ * Per request type entities to hold arguments
+ * captured for CompletedRequestInfo
+ */
+message RenameKeyOperationArgs {
+ optional string toKeyName = 1;
+}
+
+message CreateFileOperationArgs {
+ optional bool isRecursive = 1;
+ optional bool isOverwrite = 2;
+}
+
+/**
+ * CompletedRequestInfo table entry
+ */
+message CompletedRequestInfo {
+
+ optional int64 trxLogIndex = 1;
+ optional Type cmdType = 2; // Type of the command
+ optional string volumeName = 3;
+ optional string bucketName = 4;
+ optional string keyName = 5;
+ optional uint64 creationTime = 6;
+
+ optional RenameKeyOperationArgs renameKeyArgs = 7;
+ optional CreateFileOperationArgs createFileArgs = 8;
+}
+
message OzoneObj {
enum ObjectType {
VOLUME = 1;
diff --git
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index 461324d0022..abd63f09c0e 100644
---
a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++
b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.ozone.om.helpers.ListKeysResult;
import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo;
import org.apache.hadoop.ozone.om.helpers.OmDBTenantState;
import org.apache.hadoop.ozone.om.helpers.OmDBUserPrincipalInfo;
@@ -486,6 +487,8 @@ String getMultipartKeyFSO(String volume, String bucket,
String key, String
Table<String, CompactionLogEntry> getCompactionLogTable();
+ Table<Long, OmCompletedRequestInfo> getCompletedRequestInfoTable();
+
/**
* Gets the OM Meta table.
* @return meta table reference.
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 9dc01e54f99..df3af67bc3b 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -101,6 +101,7 @@
import org.apache.hadoop.ozone.om.helpers.ListKeysResult;
import org.apache.hadoop.ozone.om.helpers.ListOpenFilesResult;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo;
import org.apache.hadoop.ozone.om.helpers.OmDBTenantState;
import org.apache.hadoop.ozone.om.helpers.OmDBUserPrincipalInfo;
@@ -181,6 +182,7 @@ public class OmMetadataManagerImpl implements
OMMetadataManager,
private Table<String, SnapshotInfo> snapshotInfoTable;
private Table<String, String> snapshotRenamedTable;
private Table<String, CompactionLogEntry> compactionLogTable;
+ private Table<Long, OmCompletedRequestInfo> completedRequestInfoTable;
private OzoneManager ozoneManager;
@@ -498,6 +500,8 @@ protected void initializeOmTables(CacheType cacheType,
// TODO: [SNAPSHOT] Initialize table lock for snapshotRenamedTable.
compactionLogTable =
initializer.get(OMDBDefinition.COMPACTION_LOG_TABLE_DEF);
+
+ completedRequestInfoTable =
initializer.get(OMDBDefinition.COMPLETED_REQUEST_INFO_TABLE_DEF);
}
/**
@@ -1700,6 +1704,11 @@ public Table<String, CompactionLogEntry>
getCompactionLogTable() {
return compactionLogTable;
}
+ @Override
+ public Table<Long, OmCompletedRequestInfo> getCompletedRequestInfoTable() {
+ return completedRequestInfoTable;
+ }
+
/**
* Get Snapshot Chain Manager.
*
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
index 9894e8f5d6b..810cd68a4b6 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OMDBDefinition.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
import org.apache.hadoop.ozone.om.helpers.OmDBAccessIdInfo;
import org.apache.hadoop.ozone.om.helpers.OmDBTenantState;
import org.apache.hadoop.ozone.om.helpers.OmDBUserPrincipalInfo;
@@ -315,6 +316,13 @@ public final class OMDBDefinition extends
DBDefinition.WithMap {
StringCodec.get(),
CompactionLogEntry.getCodec());
+ public static final String COMPLETED_REQUEST_INFO_TABLE =
"completedRequestInfoTable";
+ /** completedOperationnfoTable: txId :- OmCompletedRequestInfo. */
+ public static final DBColumnFamilyDefinition<Long, OmCompletedRequestInfo>
COMPLETED_REQUEST_INFO_TABLE_DEF
+ = new DBColumnFamilyDefinition<>(COMPLETED_REQUEST_INFO_TABLE,
+ LongCodec.get(),
+ OmCompletedRequestInfo.getCodec());
+
//---------------------------------------------------------------------------
private static final Map<String, DBColumnFamilyDefinition<?, ?>>
COLUMN_FAMILIES
= DBColumnFamilyDefinition.newUnmodifiableMap(
@@ -339,7 +347,8 @@ public final class OMDBDefinition extends
DBDefinition.WithMap {
TENANT_STATE_TABLE_DEF,
TRANSACTION_INFO_TABLE_DEF,
USER_TABLE_DEF,
- VOLUME_TABLE_DEF);
+ VOLUME_TABLE_DEF,
+ COMPLETED_REQUEST_INFO_TABLE_DEF);
private static final OMDBDefinition INSTANCE = new OMDBDefinition();
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmCompletedRequestInfo.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmCompletedRequestInfo.java
new file mode 100644
index 00000000000..d1f85ef1bcc
--- /dev/null
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmCompletedRequestInfo.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_DB_DIRS;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo;
+import org.apache.hadoop.ozone.om.helpers.OmCompletedRequestInfo.OperationArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.util.Time;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.io.TempDir;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Tests OmCompletedRequestInfo om database table for Ozone object storage
operations.
+ */
+public class TestOmCompletedRequestInfo {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(TestOmCompletedRequestInfo.class);
+
+ private OMMetadataManager omMetadataManager;
+ private static final long EXPECTED_OPERATION_ID = 123L;
+
+ private static final String VOLUME_NAME = "vol1";
+ private static final String BUCKET_NAME = "bucket1";
+ private static final String KEY_NAME = "bucket1";
+
+ @TempDir
+ private Path folder;
+
+ @BeforeEach
+ public void setup() throws Exception {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(OZONE_OM_DB_DIRS,
+ folder.toAbsolutePath().toString());
+ omMetadataManager = new OmMetadataManagerImpl(conf, null);
+ }
+
+ private OmCompletedRequestInfo createRequestInfo(long trxLogIndex) {
+ return createRequestInfo(trxLogIndex, Time.now());
+ }
+
+ private OmCompletedRequestInfo createRequestInfo(long trxLogIndex, long
creationTime) {
+ return new OmCompletedRequestInfo.Builder()
+ .setTrxLogIndex(trxLogIndex)
+ .setCmdType(Type.CreateKey)
+ .setVolumeName(VOLUME_NAME)
+ .setBucketName(BUCKET_NAME)
+ .setKeyName(KEY_NAME)
+ .setCreationTime(creationTime)
+ .setOpArgs(new OperationArgs.NoArgs())
+ .build();
+ }
+
+ @Test
+ public void testTableExists() throws Exception {
+ Table<Long, OmCompletedRequestInfo> requestInfoTable =
+ omMetadataManager.getCompletedRequestInfoTable();
+ Assertions.assertTrue(requestInfoTable.isEmpty());
+ }
+
+ @Test
+ public void testAddNewOperation() throws Exception {
+ Table<Long, OmCompletedRequestInfo> requestInfoTable =
+ omMetadataManager.getCompletedRequestInfoTable();
+ requestInfoTable.put(EXPECTED_OPERATION_ID,
createRequestInfo(EXPECTED_OPERATION_ID));
+ Assertions.assertEquals(EXPECTED_OPERATION_ID,
+ requestInfoTable.get(EXPECTED_OPERATION_ID).getTrxLogIndex());
+ }
+
+ @Test
+ public void testDeleteOmCompletedRequestInfo() throws Exception {
+ Table<Long, OmCompletedRequestInfo> requestInfoTable =
+ omMetadataManager.getCompletedRequestInfoTable();
+
+ Assertions.assertFalse(requestInfoTable.isExist(EXPECTED_OPERATION_ID));
+ requestInfoTable.put(EXPECTED_OPERATION_ID,
createRequestInfo(EXPECTED_OPERATION_ID));
+ Assertions.assertTrue(requestInfoTable.isExist(EXPECTED_OPERATION_ID));
+ requestInfoTable.delete(EXPECTED_OPERATION_ID);
+ Assertions.assertFalse(requestInfoTable.isExist(EXPECTED_OPERATION_ID));
+ }
+
+ @Test
+ // This is a little outside of the scope of a unit test for this
+ // entity class but it seems fundamental to this entity storage that
+ // the iteration order is expected
+ public void testIterationOrderIsBasedOnTransationIndex() throws Exception {
+ Table<Long, OmCompletedRequestInfo> requestInfoTable =
+ omMetadataManager.getCompletedRequestInfoTable();
+
+ List<Long> idsToInsert = Arrays.asList(
+ 1L, 19L, 8L, 7L, 111L, 72L, 992L, 11L, 42L);
+ List<Long> expectedIdIterationOrder = Arrays.asList(
+ 1L, 7L, 8L, 11L, 19L, 42L, 72L, 111L, 992L);
+
+ for (Long trxLogIndex : idsToInsert) {
+ OmCompletedRequestInfo requestInfo = createRequestInfo(trxLogIndex);
+ requestInfoTable.put(trxLogIndex, requestInfo);
+ }
+
+ List<Long> iteratedIds = new ArrayList<>();
+ Table.KeyValue<Long, OmCompletedRequestInfo> requestInfoRow;
+
+ try (TableIterator<Long, ? extends Table.KeyValue<Long,
OmCompletedRequestInfo>>
+ tableIterator = requestInfoTable.iterator()) {
+
+ while (tableIterator.hasNext()) {
+ requestInfoRow = tableIterator.next();
+ iteratedIds.add(requestInfoRow.getValue().getTrxLogIndex());
+ }
+ }
+
+ Assertions.assertEquals(iteratedIds, expectedIdIterationOrder);
+ }
+
+ @Test
+ public void testEquals() {
+ long now = Time.now();
+ OmCompletedRequestInfo requestInfo =
createRequestInfo(EXPECTED_OPERATION_ID, now);
+ OmCompletedRequestInfo requestInfoDupe =
createRequestInfo(EXPECTED_OPERATION_ID, now);
+ OmCompletedRequestInfo requestInfoWithDifference =
createRequestInfo(EXPECTED_OPERATION_ID + 1);
+
+ Assertions.assertEquals(requestInfo, requestInfoDupe);
+ Assertions.assertEquals(requestInfoDupe, requestInfo);
+ Assertions.assertNotEquals(requestInfo, requestInfoWithDifference);
+ Assertions.assertNotEquals(requestInfoWithDifference, requestInfo);
+ }
+
+ @Test
+ public void testEqualsWithNullBucketName() {
+ OmCompletedRequestInfo requestInfo =
createRequestInfo(EXPECTED_OPERATION_ID);
+
+ OmCompletedRequestInfo requestInfoWithNullBucketName = new
OmCompletedRequestInfo.Builder()
+ .setTrxLogIndex(321)
+ .setCmdType(Type.CreateVolume)
+ .setVolumeName(VOLUME_NAME)
+ .setCreationTime(Time.now())
+ .setOpArgs(new OperationArgs.NoArgs())
+ .build();
+
+ Assertions.assertEquals(requestInfoWithNullBucketName,
requestInfoWithNullBucketName);
+ Assertions.assertNotEquals(requestInfoWithNullBucketName, requestInfo);
+ Assertions.assertNotEquals(requestInfo, requestInfoWithNullBucketName);
+ }
+
+ @Test
+ public void testEqualsWithNullKeyName() {
+ OmCompletedRequestInfo requestInfo =
createRequestInfo(EXPECTED_OPERATION_ID);
+
+ OmCompletedRequestInfo requestInfoWithNullKeyName = new
OmCompletedRequestInfo.Builder()
+ .setTrxLogIndex(321)
+ .setCmdType(Type.CreateKey)
+ .setVolumeName(VOLUME_NAME)
+ .setBucketName(BUCKET_NAME)
+ .setKeyName(null)
+ .setCreationTime(Time.now())
+ .setOpArgs(new OperationArgs.NoArgs())
+ .build();
+
+ Assertions.assertEquals(requestInfoWithNullKeyName,
requestInfoWithNullKeyName);
+ Assertions.assertNotEquals(requestInfoWithNullKeyName, requestInfo);
+ Assertions.assertNotEquals(requestInfo, requestInfoWithNullKeyName);
+ }
+
+ @Test
+ public void testEqualsWithDifferentArguments() {
+ long now = Time.now();
+
+ OmCompletedRequestInfo renameRequestInfo = new
OmCompletedRequestInfo.Builder()
+ .setTrxLogIndex(321)
+ .setCmdType(Type.RenameKey)
+ .setVolumeName(VOLUME_NAME)
+ .setBucketName(BUCKET_NAME)
+ .setKeyName(KEY_NAME)
+ .setCreationTime(now)
+ .setOpArgs(new OperationArgs.RenameKeyArgs(KEY_NAME + "_renamed"))
+ .build();
+
+ OmCompletedRequestInfo renameRequestInfoWithDifferentArgs = new
OmCompletedRequestInfo.Builder()
+ .setTrxLogIndex(321)
+ .setCmdType(Type.RenameKey)
+ .setVolumeName(VOLUME_NAME)
+ .setBucketName(BUCKET_NAME)
+ .setKeyName(KEY_NAME)
+ .setCreationTime(now)
+ .setOpArgs(new OperationArgs.RenameKeyArgs(KEY_NAME +
"_renamed_AGAIN"))
+ .build();
+
+ Assertions.assertEquals(renameRequestInfo, renameRequestInfo);
+ Assertions.assertEquals(renameRequestInfoWithDifferentArgs,
renameRequestInfoWithDifferentArgs);
+ Assertions.assertNotEquals(renameRequestInfo,
renameRequestInfoWithDifferentArgs);
+ Assertions.assertNotEquals(renameRequestInfoWithDifferentArgs,
renameRequestInfo);
+ }
+}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
index d905fb23461..a1a5c0c5077 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java
@@ -26,6 +26,7 @@
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_THRESHOLD_DEFAULT;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.BUCKET_TABLE;
import static
org.apache.hadoop.ozone.om.codec.OMDBDefinition.COMPACTION_LOG_TABLE;
+import static
org.apache.hadoop.ozone.om.codec.OMDBDefinition.COMPLETED_REQUEST_INFO_TABLE;
import static
org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELEGATION_TOKEN_TABLE;
import static
org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_DIR_TABLE;
import static org.apache.hadoop.ozone.om.codec.OMDBDefinition.DELETED_TABLE;
@@ -136,7 +137,8 @@ public class TestOmMetadataManager {
TENANT_STATE_TABLE,
SNAPSHOT_INFO_TABLE,
SNAPSHOT_RENAMED_TABLE,
- COMPACTION_LOG_TABLE
+ COMPACTION_LOG_TABLE,
+ COMPLETED_REQUEST_INFO_TABLE
};
private OMMetadataManager omMetadataManager;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]