tkalkirill commented on code in PR #3488:
URL: https://github.com/apache/ignite-3/pull/3488#discussion_r1540536990


##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/RocksDbDataRegion.java:
##########
@@ -34,7 +33,7 @@
  */
 public class RocksDbDataRegion {
     /** Region configuration. */
-    private final RocksDbDataRegionConfiguration cfg;
+    private final RocksDbDataRegionView cfg;

Review Comment:
   I think that the name is not very suitable, maybe `configView`?



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/RocksDbDataRegion.java:
##########
@@ -47,33 +46,31 @@ public class RocksDbDataRegion {
      *
      * @param cfg Data region configuration.
      */
-    public RocksDbDataRegion(RocksDbDataRegionConfiguration cfg) {
+    public RocksDbDataRegion(RocksDbDataRegionView cfg) {
         this.cfg = cfg;
     }
 
     /**
      * Start the rocksDb data region.
      */
     public void start() {
-        RocksDbDataRegionView dataRegionView = cfg.value();
+        long writeBufferSize = cfg.writeBufferSize();
 
-        long writeBufferSize = dataRegionView.writeBufferSize();
+        long totalCacheSize = cfg.size() + writeBufferSize;
 
-        long totalCacheSize = dataRegionView.size() + writeBufferSize;
-
-        switch (dataRegionView.cache().toLowerCase(Locale.ROOT)) {
+        switch (cfg.cache().toLowerCase(Locale.ROOT)) {
             case ROCKSDB_CLOCK_CACHE:
-                cache = new ClockCache(totalCacheSize, 
dataRegionView.numShardBits(), false);
+                cache = new ClockCache(totalCacheSize, cfg.numShardBits(), 
false);
 
                 break;
 
             case ROCKSDB_LRU_CACHE:
-                cache = new LRUCache(totalCacheSize, 
dataRegionView.numShardBits(), false);
+                cache = new LRUCache(totalCacheSize, cfg.numShardBits(), 
false);
 
                 break;
 
             default:
-                assert false : dataRegionView.cache();
+                assert false : cfg.cache();

Review Comment:
   Maybe `throw AssertionError(String.format("Unknown data region cache type: 
[name=%s, cacheType=%s]", cfg.name(), cfg.cache()));` ?



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/Index.java:
##########
@@ -85,7 +87,7 @@ void transitionToDestroyedState() {
         try {
             
IgniteUtils.closeAll(storageByPartitionId.values().stream().map(index -> 
index::transitionToDestroyedState));
         } catch (Exception e) {
-            throw new StorageException("Failed to transition index storages to 
the DESTROYED state: " + indexId, e);
+            throw new StorageException("Failed to transition index storages to 
the DESTROYED state: " + indexColumnFamily.indexId(), e);

Review Comment:
   Same



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/RocksDbStorageEngine.java:
##########
@@ -177,34 +198,23 @@ public RocksDbTableStorage createMvTable(
             StorageTableDescriptor tableDescriptor,
             StorageIndexDescriptorSupplier indexDescriptorSupplier
     ) throws StorageException {
-        RocksDbDataRegion dataRegion = 
regions.get(tableDescriptor.getDataRegion());
+        String regionName = tableDescriptor.getDataRegion();
 
-        int tableId = tableDescriptor.getId();
+        RocksDbStorage storage = storageByRegionName.get(regionName);
 
-        assert dataRegion != null : "tableId=" + tableId + ", dataRegion=" + 
tableDescriptor.getDataRegion();
+        assert storage != null : "No instances exist for region " + regionName;

Review Comment:
   I would add information about the tableId to the error message.



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/RocksDbIndexes.java:
##########
@@ -69,41 +69,32 @@ void recoverIndexes(StorageIndexDescriptorSupplier 
indexDescriptorSupplier) thro
                 if (descriptor == null) {
                     deleteByPrefix(writeBatch, rocksDb.hashIndexCf(), 
indexPrefix(tableId, indexId));
                 } else {
-                    hashIndices.put(indexId, new HashIndex(rocksDb, tableId, 
descriptor, rocksDb.meta));
+                    hashIndices.put(indexId, new HashIndex(tableId, 
rocksDb.hashIndexCf(), descriptor, rocksDb.meta));
                 }
             }
 
-            var indexCfsToDestroy = new ArrayList<Map.Entry<Integer, 
ColumnFamily>>();
+            var indexCfsToDestroy = new ArrayList<IndexColumnFamily>();

Review Comment:
   Meow



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/Index.java:
##########
@@ -74,7 +76,7 @@ final void close() {
         try {
             
IgniteUtils.closeAll(storageByPartitionId.values().stream().map(index -> 
index::close));
         } catch (Exception e) {
-            throw new StorageException("Failed to close index storages: " + 
indexId, e);
+            throw new StorageException("Failed to close index storages: " + 
indexColumnFamily.indexId(), e);

Review Comment:
   U can use 
`org.apache.ignite.internal.storage.StorageException#StorageException(int, 
java.lang.String, java.lang.Throwable, java.lang.Object...)`.
   
   Maybe create method `Index#id()` to simplify?



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/instance/SharedRocksDbInstance.java:
##########
@@ -306,14 +320,56 @@ public void destroySortedIndexCfIfNeeded(byte[] cfName, 
int indexId) {
         }
     }
 
+    /**
+     * Removes all data associated with the given table ID in this storage.
+     */
+    public void destroyTable(int tableId) {
+        try (WriteBatch writeBatch = new WriteBatch()) {
+            byte[] tableIdBytes = ByteBuffer.allocate(Integer.BYTES)
+                    .order(KEY_BYTE_ORDER)
+                    .putInt(tableId)
+                    .array();
+
+            deleteByPrefix(writeBatch, partitionCf, tableIdBytes);
+            deleteByPrefix(writeBatch, gcQueueCf, tableIdBytes);
+            deleteByPrefix(writeBatch, hashIndexCf, tableIdBytes);
+
+            List<IndexColumnFamily> sortedIndexCfs = sortedIndexes(tableId);
+
+            for (IndexColumnFamily indexColumnFamily : sortedIndexCfs) {
+                deleteByPrefix(writeBatch, indexColumnFamily.columnFamily(), 
tableIdBytes);
+            }
+
+            deleteByPrefix(writeBatch, meta.columnFamily(), 
metaPrefix(PARTITION_META_PREFIX, tableIdBytes));
+            deleteByPrefix(writeBatch, meta.columnFamily(), 
metaPrefix(PARTITION_CONF_PREFIX, tableIdBytes));
+            deleteByPrefix(writeBatch, meta.columnFamily(), 
metaPrefix(INDEX_ROW_ID_PREFIX, tableIdBytes));
+
+            db.write(DFLT_WRITE_OPTS, writeBatch);
+
+            if (!sortedIndexCfs.isEmpty()) {
+                scheduleIndexCfsDestroy(sortedIndexCfs);
+            }
+        } catch (RocksDBException e) {
+            throw new StorageException("Failed to destroy table data. 
[tableId={}]", e, tableId);
+        }
+    }
+
+    private static byte[] metaPrefix(byte[] metaPrefix, byte[] tableIdBytes) {
+        return ByteBuffer.allocate(metaPrefix.length + tableIdBytes.length)
+                .order(KEY_BYTE_ORDER)
+                .put(metaPrefix)
+                .put(tableIdBytes)
+                .array();
+    }
+
     private ColumnFamily createSortedIndexCf(byte[] cfName) {
         ColumnFamilyDescriptor cfDescriptor = new 
ColumnFamilyDescriptor(cfName, sortedIndexCfOptions(cfName));
 
         ColumnFamily columnFamily;
         try {
             columnFamily = ColumnFamily.create(db, cfDescriptor);
         } catch (RocksDBException e) {
-            throw new StorageException("Failed to create new RocksDB column 
family: " + toStringName(cfDescriptor.getName()), e);
+            throw new StorageException("Failed to create new RocksDB column 
family: " + toStringName(cfName), e);

Review Comment:
   U can use 
`org.apache.ignite.internal.storage.StorageException#StorageException(int, 
java.lang.String, java.lang.Throwable, java.lang.Object...)`



##########
modules/storage-rocksdb/src/main/java/org/apache/ignite/internal/storage/rocksdb/RocksDbStorageEngine.java:
##########
@@ -125,40 +137,49 @@ public String name() {
 
     @Override
     public void start() throws StorageException {
-        registerDataRegion(DEFAULT_DATA_REGION_NAME);
+        registerDataRegion(engineConfig.defaultRegion().value());
 
         // TODO: IGNITE-17066 Add handling deleting/updating data regions 
configuration
         engineConfig.regions().listenElements(new 
ConfigurationNamedListListener<>() {
             @Override
             public CompletableFuture<?> 
onCreate(ConfigurationNotificationEvent<RocksDbDataRegionView> ctx) {
-                registerDataRegion(ctx.newName(RocksDbDataRegionView.class));
+                RocksDbDataRegionView newValue = ctx.newValue();
+
+                assert newValue != null;

Review Comment:
   I think this has already been tested in the configuration module and 
checking is not needed.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to