This is an automated email from the ASF dual-hosted git repository.
cshannon pushed a commit to branch no-chop-merge
in repository https://gitbox.apache.org/repos/asf/accumulo.git
The following commit(s) were added to refs/heads/no-chop-merge by this push:
new c88b9d7944 Update stored file metadata to include a range (#3480)
c88b9d7944 is described below
commit c88b9d7944735f71a958d3c418752c15dbb4a07f
Author: Christopher L. Shannon <[email protected]>
AuthorDate: Fri Jul 14 11:16:09 2023 -0400
Update stored file metadata to include a range (#3480)
Update stored file metadata to include a range
File metadata is now serialized as Json and contains a range along with
the path for a file. TabletFiles now also take a Range object which can
be used to fence off by specific rows in the range.
---
.../org/apache/accumulo/core/gc/Reference.java | 3 +-
.../accumulo/core/gc/ReferenceDirectory.java | 8 +-
.../org/apache/accumulo/core/gc/ReferenceFile.java | 38 +++--
.../accumulo/core/metadata/AbstractTabletFile.java | 8 +
.../core/metadata/CompactableFileImpl.java | 3 +-
.../core/metadata/ReferencedTabletFile.java | 21 +--
.../accumulo/core/metadata/StoredTabletFile.java | 156 +++++++++++++++++--
.../accumulo/core/metadata/ValidationUtil.java | 2 +-
.../schema/ExternalCompactionMetadata.java | 10 +-
.../metadata/schema/ReferencedTabletFileTest.java | 20 ++-
.../core/metadata/schema/TabletMetadataTest.java | 23 ++-
.../accumulo/server/compaction/CompactionInfo.java | 7 +-
.../accumulo/server/compaction/FileCompactor.java | 4 +-
.../server/constraints/MetadataConstraints.java | 31 +++-
.../org/apache/accumulo/server/fs/VolumeUtil.java | 20 +--
.../accumulo/server/gc/AllVolumesDirectory.java | 4 +-
.../accumulo/server/init/ZooKeeperInitializer.java | 5 +-
.../accumulo/server/metadata/ServerAmpleImpl.java | 9 +-
.../server/metadata/TabletMutatorBase.java | 16 +-
.../accumulo/server/util/MetadataTableUtil.java | 8 +-
.../server/util/RemoveEntriesForMissingFiles.java | 4 +-
.../constraints/MetadataConstraintsTest.java | 172 +++++++++++++++++----
.../apache/accumulo/server/fs/VolumeUtilTest.java | 107 ++++++-------
.../accumulo/server/util/TableDiskUsageTest.java | 10 +-
.../main/java/org/apache/accumulo/gc/GCRun.java | 6 +-
.../accumulo/gc/GarbageCollectionAlgorithm.java | 4 +-
.../apache/accumulo/gc/GarbageCollectionTest.java | 3 +-
.../accumulo/gc/SimpleGarbageCollectorTest.java | 8 +-
.../accumulo/manager/TabletGroupWatcher.java | 2 +-
.../accumulo/manager/recovery/RecoveryManager.java | 2 +-
.../tableOps/bulkVer2/CleanUpBulkImport.java | 2 +-
.../manager/tableOps/bulkVer2/LoadFiles.java | 8 +-
.../tableOps/tableExport/WriteExportFiles.java | 5 +-
.../tableImport/PopulateMetadataTable.java | 9 +-
.../tserver/compactions/ExternalCompactionJob.java | 2 +-
.../accumulo/tserver/tablet/CompactableUtils.java | 6 +-
.../tserver/tablet/MinorCompactionTask.java | 2 +-
.../accumulo/tserver/tablet/MinorCompactor.java | 2 +-
.../org/apache/accumulo/tserver/tablet/Tablet.java | 2 +-
.../tserver/compaction/CompactableUtilsTest.java | 2 +-
.../tablet/CompactableImplFileManagerTest.java | 3 +-
.../java/org/apache/accumulo/test/CloneIT.java | 41 +++--
.../accumulo/test/ScanServerMetadataEntriesIT.java | 4 +-
.../java/org/apache/accumulo/test/VolumeIT.java | 16 +-
.../accumulo/test/functional/BulkFailureIT.java | 3 +-
.../apache/accumulo/test/functional/BulkNewIT.java | 2 +-
.../accumulo/test/functional/CleanTmpIT.java | 3 +-
.../accumulo/test/functional/CloneTestIT.java | 3 +-
.../test/functional/FileNormalizationIT.java | 6 +-
.../test/functional/FunctionalTestUtils.java | 14 +-
.../test/functional/GarbageCollectorIT.java | 6 +-
.../accumulo/test/functional/ReadWriteIT.java | 3 +-
.../test/functional/RecoveryWithEmptyRFileIT.java | 3 +-
53 files changed, 594 insertions(+), 267 deletions(-)
diff --git a/core/src/main/java/org/apache/accumulo/core/gc/Reference.java
b/core/src/main/java/org/apache/accumulo/core/gc/Reference.java
index cdffbd7eeb..e094fa2bed 100644
--- a/core/src/main/java/org/apache/accumulo/core/gc/Reference.java
+++ b/core/src/main/java/org/apache/accumulo/core/gc/Reference.java
@@ -43,5 +43,6 @@ public interface Reference {
* A directory will be read from the "srv:dir" column family:
* {@link
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily}
*/
- String getMetadataEntry();
+ String getMetadataPath();
+
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/gc/ReferenceDirectory.java
b/core/src/main/java/org/apache/accumulo/core/gc/ReferenceDirectory.java
index b9a6589d9f..f37bc8379e 100644
--- a/core/src/main/java/org/apache/accumulo/core/gc/ReferenceDirectory.java
+++ b/core/src/main/java/org/apache/accumulo/core/gc/ReferenceDirectory.java
@@ -46,11 +46,11 @@ public class ReferenceDirectory extends ReferenceFile {
* A Tablet directory should have a metadata entry equal to the dirName.
*/
@Override
- public String getMetadataEntry() {
- if (!tabletDir.equals(metadataEntry)) {
+ public String getMetadataPath() {
+ if (!tabletDir.equals(metadataPath)) {
throw new IllegalStateException(
- "Tablet dir " + tabletDir + " is not equal to metadataEntry: " +
metadataEntry);
+ "Tablet dir " + tabletDir + " is not equal to metadataPath: " +
metadataPath);
}
- return metadataEntry;
+ return metadataPath;
}
}
diff --git a/core/src/main/java/org/apache/accumulo/core/gc/ReferenceFile.java
b/core/src/main/java/org/apache/accumulo/core/gc/ReferenceFile.java
index 7f796e8de9..c8a98f745a 100644
--- a/core/src/main/java/org/apache/accumulo/core/gc/ReferenceFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/gc/ReferenceFile.java
@@ -21,6 +21,9 @@ package org.apache.accumulo.core.gc;
import java.util.Objects;
import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.metadata.ScanServerRefTabletFile;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
+import org.apache.hadoop.fs.Path;
/**
* A GC reference used for streaming and delete markers. This type is a file.
Subclass is a
@@ -30,12 +33,27 @@ public class ReferenceFile implements Reference,
Comparable<ReferenceFile> {
// parts of an absolute URI, like "hdfs://1.2.3.4/accumulo/tables/2a/t-0003"
public final TableId tableId; // 2a
- // the exact string that is stored in the metadata
- protected final String metadataEntry;
+ // the exact path from the file reference string that is stored in the
metadata
+ protected final String metadataPath;
- public ReferenceFile(TableId tableId, String metadataEntry) {
+ protected ReferenceFile(TableId tableId, String metadataPath) {
this.tableId = Objects.requireNonNull(tableId);
- this.metadataEntry = Objects.requireNonNull(metadataEntry);
+ this.metadataPath = Objects.requireNonNull(metadataPath);
+ }
+
+ public ReferenceFile(TableId tableId, Path metadataPathPath) {
+ this.tableId = Objects.requireNonNull(tableId);
+ this.metadataPath = Objects.requireNonNull(metadataPathPath.toString());
+ }
+
+ public ReferenceFile(TableId tableId, ScanServerRefTabletFile tabletFile) {
+ this.tableId = Objects.requireNonNull(tableId);
+ this.metadataPath =
Objects.requireNonNull(tabletFile.getNormalizedPathStr());
+ }
+
+ public ReferenceFile(TableId tableId, StoredTabletFile tabletFile) {
+ this.tableId = Objects.requireNonNull(tableId);
+ this.metadataPath = Objects.requireNonNull(tabletFile.getMetadataPath());
}
@Override
@@ -49,8 +67,8 @@ public class ReferenceFile implements Reference,
Comparable<ReferenceFile> {
}
@Override
- public String getMetadataEntry() {
- return metadataEntry;
+ public String getMetadataPath() {
+ return metadataPath;
}
@Override
@@ -58,7 +76,7 @@ public class ReferenceFile implements Reference,
Comparable<ReferenceFile> {
if (equals(that)) {
return 0;
} else {
- return this.metadataEntry.compareTo(that.metadataEntry);
+ return this.metadataPath.compareTo(that.metadataPath);
}
}
@@ -74,17 +92,17 @@ public class ReferenceFile implements Reference,
Comparable<ReferenceFile> {
return false;
}
ReferenceFile other = (ReferenceFile) obj;
- return metadataEntry.equals(other.metadataEntry);
+ return metadataPath.equals(other.metadataPath);
}
@Override
public int hashCode() {
- return this.metadataEntry.hashCode();
+ return this.metadataPath.hashCode();
}
@Override
public String toString() {
- return "Reference [id=" + tableId + ", ref=" + metadataEntry + "]";
+ return "Reference [id=" + tableId + ", ref=" + metadataPath + "]";
}
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java
b/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java
index 51b8f6166f..89f4b4bc6b 100644
---
a/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java
+++
b/core/src/main/java/org/apache/accumulo/core/metadata/AbstractTabletFile.java
@@ -23,6 +23,8 @@ import java.util.Objects;
import org.apache.accumulo.core.data.Range;
import org.apache.hadoop.fs.Path;
+import com.google.common.base.Preconditions;
+
/**
* A base class used to represent file references that are handled by code
that processes tablet
* files.
@@ -38,6 +40,11 @@ public abstract class AbstractTabletFile<T extends
AbstractTabletFile<T>>
protected AbstractTabletFile(Path path, Range range) {
this.path = Objects.requireNonNull(path);
this.range = Objects.requireNonNull(range);
+ // Ensure consistency by requiring ranges use true/false for inclusivity
+ // for start and end keys
+ Preconditions.checkArgument(
+ !hasRange() || (range.isStartKeyInclusive() &&
!range.isEndKeyInclusive()),
+ "The Range for a TabletFile must be startKeyInclusive=true and
endKeyInclusive=false");
}
@Override
@@ -54,4 +61,5 @@ public abstract class AbstractTabletFile<T extends
AbstractTabletFile<T>>
public boolean hasRange() {
return !range.isInfiniteStartKey() || !range.isInfiniteStopKey();
}
+
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
b/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
index e8cf109060..31fdc55d16 100644
---
a/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
+++
b/core/src/main/java/org/apache/accumulo/core/metadata/CompactableFileImpl.java
@@ -22,6 +22,7 @@ import java.net.URI;
import java.util.Objects;
import org.apache.accumulo.core.client.admin.compaction.CompactableFile;
+import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
public class CompactableFileImpl implements CompactableFile {
@@ -30,7 +31,7 @@ public class CompactableFileImpl implements CompactableFile {
private final DataFileValue dataFileValue;
public CompactableFileImpl(URI uri, long size, long entries) {
- this.storedTabletFile = new StoredTabletFile(uri.toString());
+ this.storedTabletFile = StoredTabletFile.of(uri, new Range());
this.dataFileValue = new DataFileValue(size, entries);
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/metadata/ReferencedTabletFile.java
b/core/src/main/java/org/apache/accumulo/core/metadata/ReferencedTabletFile.java
index 3edfd1d41f..0300f0ee31 100644
---
a/core/src/main/java/org/apache/accumulo/core/metadata/ReferencedTabletFile.java
+++
b/core/src/main/java/org/apache/accumulo/core/metadata/ReferencedTabletFile.java
@@ -28,7 +28,6 @@ import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.Text;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -196,25 +195,11 @@ public class ReferencedTabletFile extends
AbstractTabletFile<ReferencedTabletFil
return parts.getNormalizedPath();
}
- /**
- * Return a string for inserting a new tablet file.
- */
- public String getMetaInsert() {
- return parts.getNormalizedPath();
- }
-
- /**
- * Return a new Text object of {@link #getMetaInsert()}
- */
- public Text getMetaInsertText() {
- return new Text(getMetaInsert());
- }
-
/**
* New file was written to metadata so return a StoredTabletFile
*/
public StoredTabletFile insert() {
- return new StoredTabletFile(parts.getNormalizedPath());
+ return StoredTabletFile.of(getPath(), getRange());
}
@Override
@@ -250,4 +235,8 @@ public class ReferencedTabletFile extends
AbstractTabletFile<ReferencedTabletFil
return new ReferencedTabletFile(path);
}
+ public static ReferencedTabletFile of(final Path path, Range range) {
+ return new ReferencedTabletFile(path, range);
+ }
+
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/metadata/StoredTabletFile.java
b/core/src/main/java/org/apache/accumulo/core/metadata/StoredTabletFile.java
index 941d98fa88..e0c6f9f4a6 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/StoredTabletFile.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/StoredTabletFile.java
@@ -18,14 +18,23 @@
*/
package org.apache.accumulo.core.metadata;
+import java.io.IOException;
+import java.io.UncheckedIOException;
import java.net.URI;
+import java.util.Comparator;
import java.util.Objects;
+import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
+import org.apache.accumulo.core.util.json.ByteArrayToBase64TypeAdapter;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataInputBuffer;
+import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
+import com.google.gson.Gson;
+
/**
* Object representing a tablet file entry stored in the metadata table. Keeps
a string of the exact
* entry of what is in the metadata table, which is important for updating and
deleting metadata
@@ -40,17 +49,30 @@ import org.apache.hadoop.io.Text;
public class StoredTabletFile extends AbstractTabletFile<StoredTabletFile> {
private final String metadataEntry;
private final ReferencedTabletFile referencedTabletFile;
+ private final String metadataEntryPath;
+
+ // TODO: Comparing by metadata path (file path) and then range object,
+ // Is this ok or should we compare by the metadtaEntry string
lexicographical order?
+ private static final Comparator<StoredTabletFile> comparator = Comparator
+
.comparing(StoredTabletFile::getMetadataPath).thenComparing(StoredTabletFile::getRange);
/**
* Construct a tablet file using the string read from the metadata. Preserve
the exact string so
* the entry can be deleted.
*/
public StoredTabletFile(String metadataEntry) {
- // TODO: Future version of metadataEntry will contains the path
- // and the range so we will need to parse the string here
- super(new Path(URI.create(metadataEntry)), new Range());
- this.metadataEntry = metadataEntry;
- this.referencedTabletFile = ReferencedTabletFile.of(getPath());
+ this(metadataEntry, deserialize(metadataEntry));
+ }
+
+ private StoredTabletFile(TabletFileCq fileCq) {
+ this(serialize(fileCq), fileCq);
+ }
+
+ private StoredTabletFile(String metadataEntry, TabletFileCq fileCq) {
+ super(Objects.requireNonNull(fileCq).path, fileCq.range);
+ this.metadataEntry = Objects.requireNonNull(metadataEntry);
+ this.metadataEntryPath = fileCq.path.toString();
+ this.referencedTabletFile = ReferencedTabletFile.of(getPath(),
fileCq.range);
}
/**
@@ -58,15 +80,22 @@ public class StoredTabletFile extends
AbstractTabletFile<StoredTabletFile> {
* and deleting metadata entries. If the exact string is not used, erroneous
entries can pollute
* the metadata table.
*/
- public String getMetaUpdateDelete() {
+ public String getMetadata() {
return metadataEntry;
}
/**
- * Return a new Text object of {@link #getMetaUpdateDelete()}
+ * Returns just the Path portion of the metadata, not the full Json.
*/
- public Text getMetaUpdateDeleteText() {
- return new Text(getMetaUpdateDelete());
+ public String getMetadataPath() {
+ return metadataEntryPath;
+ }
+
+ /**
+ * Return a new Text object of {@link #getMetadata()}
+ */
+ public Text getMetadataText() {
+ return new Text(getMetadata());
}
public ReferencedTabletFile getTabletFile() {
@@ -91,7 +120,7 @@ public class StoredTabletFile extends
AbstractTabletFile<StoredTabletFile> {
if (equals(o)) {
return 0;
} else {
- return metadataEntry.compareTo(o.metadataEntry);
+ return comparator.compare(this, o);
}
}
@@ -121,11 +150,116 @@ public class StoredTabletFile extends
AbstractTabletFile<StoredTabletFile> {
* Validates that the provided metadata string for the StoredTabletFile is
valid.
*/
public static void validate(String metadataEntry) {
- ReferencedTabletFile.parsePath(new Path(URI.create(metadataEntry)));
+ ReferencedTabletFile.parsePath(deserialize(metadataEntry).path);
+ }
+
+ public static StoredTabletFile of(final Text metadataEntry) {
+ return new
StoredTabletFile(Objects.requireNonNull(metadataEntry).toString());
}
public static StoredTabletFile of(final String metadataEntry) {
return new StoredTabletFile(metadataEntry);
}
+ public static StoredTabletFile of(final URI path, Range range) {
+ return of(new Path(Objects.requireNonNull(path)), range);
+ }
+
+ public static StoredTabletFile of(final Path path, Range range) {
+ return new StoredTabletFile(new TabletFileCq(Objects.requireNonNull(path),
range));
+ }
+
+ private static final Gson gson =
ByteArrayToBase64TypeAdapter.createBase64Gson();
+
+ private static TabletFileCq deserialize(String json) {
+ final TabletFileCqMetadataGson metadata =
+ gson.fromJson(Objects.requireNonNull(json),
TabletFileCqMetadataGson.class);
+ // Recreate the exact Range that was originally stored in Metadata. Stored
ranges are originally
+ // constructed with inclusive/exclusive for the start and end key
inclusivity settings.
+ // (Except for Ranges with no start/endkey as then the inclusivity flags
do not matter)
+ //
+ // With this particular constructor, when setting the startRowInclusive to
true and
+ // endRowInclusive to false, both the start and end row values will be
taken as is
+ // and not modified and will recreate the original Range.
+ //
+ // This constructor will always set the resulting inclusivity of the Range
to be true for the
+ // start row and false for end row regardless of what the
startRowInclusive and endRowInclusive
+ // flags are set to.
+ return new TabletFileCq(new Path(URI.create(metadata.path)),
+ new Range(decodeRow(metadata.startRow), true,
decodeRow(metadata.endRow), false));
+ }
+
+ public static String serialize(String path) {
+ return serialize(path, new Range());
+ }
+
+ public static String serialize(String path, Range range) {
+ final TabletFileCqMetadataGson metadata = new TabletFileCqMetadataGson();
+ metadata.path = Objects.requireNonNull(path);
+
+ // TODO - Add validation on start/end rows exclusive/inclusive in a Range
if not null?
+ // If we can guarantee start is exlusive and end is inclusive then we
don't need to encode
+ // those boolean values or store them.
+ // Should we validate and enforce this when we serialize here or even
earlier when we crate the
+ // TabletFile object with a range?
+ metadata.startRow = encodeRow(range.getStartKey());
+ metadata.endRow = encodeRow(range.getEndKey());
+
+ return gson.toJson(metadata);
+ }
+
+ private static String serialize(TabletFileCq tabletFileCq) {
+ return serialize(Objects.requireNonNull(tabletFileCq).path.toString(),
tabletFileCq.range);
+ }
+
+ /**
+ * Helper methods to encode and decode rows in a range to/from byte arrays.
Null rows will just be
+ * returned as an empty byte array
+ **/
+
+ private static byte[] encodeRow(final Key key) {
+ final Text row = key != null ? key.getRow() : null;
+ if (row != null) {
+ try (DataOutputBuffer buffer = new DataOutputBuffer()) {
+ row.write(buffer);
+ return buffer.getData();
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+ // Empty byte array means null row
+ return new byte[0];
+ }
+
+ private static Text decodeRow(byte[] serialized) {
+ // Empty byte array means null row
+ if (serialized.length == 0) {
+ return null;
+ }
+
+ try (DataInputBuffer buffer = new DataInputBuffer()) {
+ final Text row = new Text();
+ buffer.reset(serialized, serialized.length);
+ row.readFields(buffer);
+ return row;
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
+ }
+
+ private static class TabletFileCq {
+ public final Path path;
+ public final Range range;
+
+ public TabletFileCq(Path path, Range range) {
+ this.path = Objects.requireNonNull(path);
+ this.range = Objects.requireNonNull(range);
+ }
+ }
+
+ private static class TabletFileCqMetadataGson {
+ private String path;
+ private byte[] startRow;
+ private byte[] endRow;
+ }
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/metadata/ValidationUtil.java
b/core/src/main/java/org/apache/accumulo/core/metadata/ValidationUtil.java
index db60a89200..26d49e5182 100644
--- a/core/src/main/java/org/apache/accumulo/core/metadata/ValidationUtil.java
+++ b/core/src/main/java/org/apache/accumulo/core/metadata/ValidationUtil.java
@@ -43,7 +43,7 @@ public class ValidationUtil {
}
public static ReferenceFile validate(ReferenceFile reference) {
- validate(new Path(reference.getMetadataEntry()));
+ validate(new Path(reference.getMetadataPath()));
return reference;
}
diff --git
a/core/src/main/java/org/apache/accumulo/core/metadata/schema/ExternalCompactionMetadata.java
b/core/src/main/java/org/apache/accumulo/core/metadata/schema/ExternalCompactionMetadata.java
index 5eef8c3d5d..a5beddd7ad 100644
---
a/core/src/main/java/org/apache/accumulo/core/metadata/schema/ExternalCompactionMetadata.java
+++
b/core/src/main/java/org/apache/accumulo/core/metadata/schema/ExternalCompactionMetadata.java
@@ -31,7 +31,6 @@ import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.spi.compaction.CompactionExecutorId;
import org.apache.accumulo.core.spi.compaction.CompactionKind;
import org.apache.accumulo.core.util.compaction.CompactionExecutorIdImpl;
-import org.apache.hadoop.fs.Path;
public class ExternalCompactionMetadata {
@@ -126,10 +125,9 @@ public class ExternalCompactionMetadata {
public String toJson() {
GSonData jData = new GSonData();
- jData.inputs =
jobFiles.stream().map(StoredTabletFile::getMetaUpdateDelete).collect(toList());
- jData.nextFiles =
-
nextFiles.stream().map(StoredTabletFile::getMetaUpdateDelete).collect(toList());
- jData.tmp = compactTmpName.getMetaInsert();
+ jData.inputs =
jobFiles.stream().map(StoredTabletFile::getMetadata).collect(toList());
+ jData.nextFiles =
nextFiles.stream().map(StoredTabletFile::getMetadata).collect(toList());
+ jData.tmp = compactTmpName.insert().getMetadata();
jData.compactor = compactorId;
jData.kind = kind.name();
jData.executorId = ((CompactionExecutorIdImpl) ceid).getExternalName();
@@ -146,7 +144,7 @@ public class ExternalCompactionMetadata {
return new ExternalCompactionMetadata(
jData.inputs.stream().map(StoredTabletFile::new).collect(toSet()),
jData.nextFiles.stream().map(StoredTabletFile::new).collect(toSet()),
- new ReferencedTabletFile(new Path(jData.tmp)), jData.compactor,
+ StoredTabletFile.of(jData.tmp).getTabletFile(), jData.compactor,
CompactionKind.valueOf(jData.kind), jData.priority,
CompactionExecutorIdImpl.externalId(jData.executorId), jData.propDels,
jData.selectedAll,
jData.compactionId);
diff --git
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
index f88ed74780..a71f26365c 100644
---
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
+++
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/ReferencedTabletFileTest.java
@@ -20,7 +20,11 @@ package org.apache.accumulo.core.metadata.schema;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import java.net.URI;
+
+import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.metadata.ReferencedTabletFile;
import org.apache.accumulo.core.metadata.StoredTabletFile;
@@ -28,13 +32,16 @@ import org.junit.jupiter.api.Test;
public class ReferencedTabletFileTest {
- private ReferencedTabletFile test(String metadataEntry, String volume,
String tableId,
+ private ReferencedTabletFile test(String metadataFile, String volume, String
tableId,
String tabletDir, String fileName) {
- StoredTabletFile storedTabletFile = new StoredTabletFile(metadataEntry);
+ String metadataPath = StoredTabletFile.serialize(metadataFile);
+ StoredTabletFile storedTabletFile = new StoredTabletFile(metadataPath);
ReferencedTabletFile tabletFile = storedTabletFile.getTabletFile();
+ // Make sure original file name wasn't changed when serialized
+ assertTrue(metadataPath.contains(metadataFile));
assertEquals(volume, tabletFile.getVolume());
- assertEquals(metadataEntry, storedTabletFile.getMetaUpdateDelete());
+ assertEquals(metadataPath, storedTabletFile.getMetadata());
assertEquals(TableId.of(tableId), tabletFile.getTableId());
assertEquals(tabletDir, tabletFile.getTabletDir());
assertEquals(fileName, tabletFile.getFileName());
@@ -102,9 +109,10 @@ public class ReferencedTabletFileTest {
String metadataEntry = uglyVolume + "/tables/" + id + "/" + dir + "/" +
filename;
ReferencedTabletFile uglyFile =
test(metadataEntry, "hdfs://nn.somewhere.com:86753/accumulo", id, dir,
filename);
- ReferencedTabletFile niceFile = StoredTabletFile
- .of("hdfs://nn.somewhere.com:86753/accumulo/tables/" + id + "/" + dir
+ "/" + filename)
- .getTabletFile();
+ ReferencedTabletFile niceFile = StoredTabletFile.of(
+ URI.create(
+ "hdfs://nn.somewhere.com:86753/accumulo/tables/" + id + "/" + dir
+ "/" + filename),
+ new Range()).getTabletFile();
assertEquals(niceFile, uglyFile);
assertEquals(niceFile.hashCode(), uglyFile.hashCode());
}
diff --git
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
index 50c32acc62..3b856726be 100644
---
a/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
+++
b/core/src/test/java/org/apache/accumulo/core/metadata/schema/TabletMetadataTest.java
@@ -19,6 +19,7 @@
package org.apache.accumulo.core.metadata.schema;
import static java.util.stream.Collectors.toSet;
+import static org.apache.accumulo.core.metadata.StoredTabletFile.serialize;
import static
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.COMPACT_COLUMN;
import static
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN;
import static
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily.FLUSH_COLUMN;
@@ -79,21 +80,19 @@ public class TabletMetadataTest {
FLUSH_COLUMN.put(mutation, new Value("6"));
TIME_COLUMN.put(mutation, new Value("M123456789"));
- String bf1 = "hdfs://nn1/acc/tables/1/t-0001/bf1";
- String bf2 = "hdfs://nn1/acc/tables/1/t-0001/bf2";
+ String bf1 = serialize("hdfs://nn1/acc/tables/1/t-0001/bf1");
+ String bf2 = serialize("hdfs://nn1/acc/tables/1/t-0001/bf2");
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf1).put(FateTxId.formatTid(56));
mutation.at().family(BulkFileColumnFamily.NAME).qualifier(bf2).put(FateTxId.formatTid(59));
mutation.at().family(ClonedColumnFamily.NAME).qualifier("").put("OK");
DataFileValue dfv1 = new DataFileValue(555, 23);
- StoredTabletFile tf1 = new
StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/df1.rf");
- StoredTabletFile tf2 = new
StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/df2.rf");
-
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf1.getMetaUpdateDelete())
- .put(dfv1.encode());
+ StoredTabletFile tf1 = new
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/df1.rf"));
+ StoredTabletFile tf2 = new
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/df2.rf"));
+
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf1.getMetadata()).put(dfv1.encode());
DataFileValue dfv2 = new DataFileValue(234, 13);
-
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf2.getMetaUpdateDelete())
- .put(dfv2.encode());
+
mutation.at().family(DataFileColumnFamily.NAME).qualifier(tf2.getMetadata()).put(dfv2.encode());
mutation.at().family(CurrentLocationColumnFamily.NAME).qualifier("s001").put("server1:8555");
@@ -106,10 +105,10 @@ public class TabletMetadataTest {
mutation.at().family(le2.getColumnFamily()).qualifier(le2.getColumnQualifier())
.timestamp(le2.timestamp).put(le2.getValue());
- StoredTabletFile sf1 = new
StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/sf1.rf");
- StoredTabletFile sf2 = new
StoredTabletFile("hdfs://nn1/acc/tables/1/t-0001/sf2.rf");
-
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf1.getMetaUpdateDelete()).put("");
-
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf2.getMetaUpdateDelete()).put("");
+ StoredTabletFile sf1 = new
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/sf1.rf"));
+ StoredTabletFile sf2 = new
StoredTabletFile(serialize("hdfs://nn1/acc/tables/1/t-0001/sf2.rf"));
+
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf1.getMetadata()).put("");
+
mutation.at().family(ScanFileColumnFamily.NAME).qualifier(sf2.getMetadata()).put("");
SortedMap<Key,Value> rowMap = toRowMap(mutation);
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionInfo.java
b/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionInfo.java
index 6b13fad689..57d4d24120 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionInfo.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/compaction/CompactionInfo.java
@@ -74,7 +74,7 @@ public class CompactionInfo {
return compactor.thread;
}
- public String getOutputFile() {
+ public StoredTabletFile getOutputFile() {
return compactor.getOutputFile();
}
@@ -105,7 +105,8 @@ public class CompactionInfo {
List<String> files = compactor.getFilesToCompact().stream()
.map(StoredTabletFile::getNormalizedPathStr).collect(Collectors.toList());
return new ActiveCompaction(compactor.extent.toThrift(),
- System.currentTimeMillis() - compactor.getStartTime(), files,
compactor.getOutputFile(),
- type, reason, localityGroup, entriesRead, entriesWritten, iiList,
iterOptions, timesPaused);
+ System.currentTimeMillis() - compactor.getStartTime(), files,
+ compactor.getOutputFile().getMetadataPath(), type, reason,
localityGroup, entriesRead,
+ entriesWritten, iiList, iterOptions, timesPaused);
}
}
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
b/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
index 03091f2e86..1c5b35e81f 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
@@ -191,8 +191,8 @@ public class FileCompactor implements
Callable<CompactionStats> {
return extent;
}
- protected String getOutputFile() {
- return outputFile.getMetaInsert();
+ protected StoredTabletFile getOutputFile() {
+ return outputFile.insert();
}
protected Map<String,Set<ByteSequence>>
getLocalityGroups(AccumuloConfiguration acuTableConf)
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
index 921639bda8..543cdff7c1 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/constraints/MetadataConstraints.java
@@ -233,8 +233,16 @@ public class MetadataConstraints implements Constraint {
validateDataFilePath(violations, new
String(columnUpdate.getColumnQualifier(), UTF_8));
} else if (columnFamily.equals(BulkFileColumnFamily.NAME)) {
if (!columnUpdate.isDeleted() && !checkedBulk) {
- violations = validateDataFilePath(violations,
- new String(columnUpdate.getColumnQualifier(), UTF_8));
+ /*
+ * TODO: This needs to be re-worked after Issue
+ * https://github.com/apache/accumulo/issues/3505 is done.
+ *
+ * That issue will reorganizes this class and make things more
efficient so we are not
+ * looping over the same mutatoin more than once like in this case.
The below check is
+ * commented out for now because the violation check is already done
when creating
+ * StoredTabletFiles so it isn't needed here anymore violations =
+ * validateDataFilePath(violations, new
String(columnUpdate.getColumnQualifier(), UTF_8));
+ */
// splits, which also write the time reference, are allowed to write
this reference even
// when
@@ -249,8 +257,8 @@ public class MetadataConstraints implements Constraint {
// See ACCUMULO-1230.
boolean isLocationMutation = false;
- HashSet<Text> dataFiles = new HashSet<>();
- HashSet<Text> loadedFiles = new HashSet<>();
+ HashSet<StoredTabletFile> dataFiles = new HashSet<>();
+ HashSet<StoredTabletFile> loadedFiles = new HashSet<>();
String tidString = new String(columnUpdate.getValue(), UTF_8);
int otherTidCount = 0;
@@ -262,9 +270,20 @@ public class MetadataConstraints implements Constraint {
.equals(CurrentLocationColumnFamily.NAME)) {
isLocationMutation = true;
} else if (new
Text(update.getColumnFamily()).equals(DataFileColumnFamily.NAME)) {
- dataFiles.add(new Text(update.getColumnQualifier()));
+ try {
+ // TODO: This actually validates for a second time as the loop
already validates
+ // if a DataFileColumnFamily, this will likely be fixed as
part of
+ // https://github.com/apache/accumulo/issues/3505
+ dataFiles.add(StoredTabletFile.of(new
Text(update.getColumnQualifier())));
+ } catch (RuntimeException e) {
+ violations = addViolation(violations, 9);
+ }
} else if (new
Text(update.getColumnFamily()).equals(BulkFileColumnFamily.NAME)) {
- loadedFiles.add(new Text(update.getColumnQualifier()));
+ try {
+ loadedFiles.add(StoredTabletFile.of(new
Text(update.getColumnQualifier())));
+ } catch (RuntimeException e) {
+ violations = addViolation(violations, 9);
+ }
if (!new String(update.getValue(), UTF_8).equals(tidString)) {
otherTidCount++;
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
index 2dbdbf8568..735ce2eeef 100644
--- a/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/fs/VolumeUtil.java
@@ -21,6 +21,7 @@ package org.apache.accumulo.server.fs;
import java.util.ArrayList;
import java.util.List;
import java.util.Map.Entry;
+import java.util.Objects;
import java.util.SortedMap;
import java.util.TreeMap;
@@ -53,29 +54,29 @@ public class VolumeUtil {
}
public static Path removeTrailingSlash(Path path) {
- String pathStr = path.toString();
+ String pathStr = Objects.requireNonNull(path).toString();
if (pathStr.endsWith("/")) {
return new Path(removeTrailingSlash(pathStr));
}
return path;
}
- public static Path switchVolume(String path, FileType ft,
List<Pair<Path,Path>> replacements) {
+ public static Path switchVolume(Path path, FileType ft,
List<Pair<Path,Path>> replacements) {
if (replacements.isEmpty()) {
log.trace("Not switching volume because there are no replacements");
return null;
}
- Path p = new Path(path);
// removing slash because new Path("hdfs://nn1").equals(new
Path("hdfs://nn1/")) evaluates to
// false
- Path volume = removeTrailingSlash(ft.getVolume(p));
+ Path volume =
removeTrailingSlash(ft.getVolume(Objects.requireNonNull(path)));
for (Pair<Path,Path> pair : replacements) {
Path key = removeTrailingSlash(pair.getFirst());
if (key.equals(volume)) {
- Path replacement = new Path(pair.getSecond(), ft.removeVolume(p));
+ Path replacement =
+ new Path(pair.getSecond(),
Objects.requireNonNull(ft.removeVolume(path)));
log.trace("Replacing {} with {}", path, replacement);
return replacement;
}
@@ -87,7 +88,7 @@ public class VolumeUtil {
}
private static LogEntry switchVolumes(LogEntry le, List<Pair<Path,Path>>
replacements) {
- Path switchedPath = switchVolume(le.filename, FileType.WAL, replacements);
+ Path switchedPath = switchVolume(new Path(le.filename), FileType.WAL,
replacements);
String switchedString;
int numSwitched = 0;
if (switchedPath != null) {
@@ -162,11 +163,12 @@ public class VolumeUtil {
}
for (Entry<StoredTabletFile,DataFileValue> entry :
tabletFiles.datafiles.entrySet()) {
- String metaPath = entry.getKey().getMetaUpdateDelete();
- Path switchedPath = switchVolume(metaPath, FileType.TABLE, replacements);
+ String metaPath = entry.getKey().getMetadata();
+ Path switchedPath = switchVolume(entry.getKey().getPath(),
FileType.TABLE, replacements);
if (switchedPath != null) {
filesToRemove.add(entry.getKey());
- ReferencedTabletFile switchedFile = new
ReferencedTabletFile(switchedPath);
+ ReferencedTabletFile switchedFile =
+ new ReferencedTabletFile(switchedPath, entry.getKey().getRange());
filesToAdd.put(switchedFile, entry.getValue());
ret.datafiles.put(switchedFile.insert(), entry.getValue());
log.debug("Replacing volume {} : {} -> {}", extent, metaPath,
switchedPath);
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/gc/AllVolumesDirectory.java
b/server/base/src/main/java/org/apache/accumulo/server/gc/AllVolumesDirectory.java
index 2dbc1705f3..7d96cef6f4 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/gc/AllVolumesDirectory.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/gc/AllVolumesDirectory.java
@@ -42,8 +42,8 @@ public class AllVolumesDirectory extends ReferenceFile {
}
@Override
- public String getMetadataEntry() {
- return metadataEntry;
+ public String getMetadataPath() {
+ return metadataPath;
}
@Override
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java
b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java
index a939d237c1..70782ad631 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/init/ZooKeeperInitializer.java
@@ -34,6 +34,7 @@ import
org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.manager.thrift.ManagerGoalState;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
import org.apache.accumulo.core.metadata.schema.MetadataSchema;
import org.apache.accumulo.core.metadata.schema.MetadataTime;
@@ -174,8 +175,8 @@ public class ZooKeeperInitializer {
MetadataSchema.TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(mutation,
new Value(dirName));
- mutation.put(MetadataSchema.TabletsSection.DataFileColumnFamily.STR_NAME,
file,
- new DataFileValue(0, 0).encodeAsValue());
+ mutation.put(MetadataSchema.TabletsSection.DataFileColumnFamily.STR_NAME,
+ StoredTabletFile.serialize(file), new DataFileValue(0,
0).encodeAsValue());
MetadataSchema.TabletsSection.ServerColumnFamily.TIME_COLUMN.put(mutation,
new Value(new MetadataTime(0, TimeType.LOGICAL).encode()));
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
b/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
index 049ccc800d..7ff258b591 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/metadata/ServerAmpleImpl.java
@@ -22,6 +22,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.accumulo.core.metadata.RootTable.ZROOT_TABLET_GC_CANDIDATES;
import static org.apache.accumulo.server.util.MetadataTableUtil.EMPTY_TEXT;
+import java.net.URI;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
@@ -139,8 +140,8 @@ public class ServerAmpleImpl extends AmpleImpl implements
Ample {
if (DataLevel.of(tableId) == DataLevel.ROOT) {
// Directories are unexpected for the root tablet, so convert to stored
tablet file
- mutateRootGcCandidates(rgcc -> rgcc.add(candidates.stream()
- .map(reference -> new
StoredTabletFile(reference.getMetadataEntry()))));
+ mutateRootGcCandidates(rgcc -> rgcc.add(candidates.stream().map(
+ reference ->
StoredTabletFile.of(URI.create(reference.getMetadataPath()), new Range()))));
return;
}
@@ -259,11 +260,11 @@ public class ServerAmpleImpl extends AmpleImpl implements
Ample {
@Override
public Mutation createDeleteMutation(ReferenceFile tabletFilePathToRemove) {
- return
createDelMutation(ValidationUtil.validate(tabletFilePathToRemove).getMetadataEntry());
+ return
createDelMutation(ValidationUtil.validate(tabletFilePathToRemove).getMetadataPath());
}
public Mutation createDeleteMutation(StoredTabletFile pathToRemove) {
- return createDelMutation(pathToRemove.getMetaUpdateDelete());
+ return createDelMutation(pathToRemove.getMetadataPath());
}
private Mutation createDelMutation(String path) {
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
b/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
index d10b4fe665..90a6217bc1 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/metadata/TabletMutatorBase.java
@@ -83,36 +83,36 @@ public abstract class TabletMutatorBase implements
Ample.TabletMutator {
@Override
public Ample.TabletMutator putFile(ReferencedTabletFile path, DataFileValue
dfv) {
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.put(DataFileColumnFamily.NAME, path.getMetaInsertText(), new
Value(dfv.encode()));
+ mutation.put(DataFileColumnFamily.NAME, path.insert().getMetadataText(),
+ new Value(dfv.encode()));
return this;
}
@Override
public Ample.TabletMutator putFile(StoredTabletFile path, DataFileValue dfv)
{
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.put(DataFileColumnFamily.NAME, path.getMetaUpdateDeleteText(),
- new Value(dfv.encode()));
+ mutation.put(DataFileColumnFamily.NAME, path.getMetadataText(), new
Value(dfv.encode()));
return this;
}
@Override
public Ample.TabletMutator deleteFile(StoredTabletFile path) {
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.putDelete(DataFileColumnFamily.NAME,
path.getMetaUpdateDeleteText());
+ mutation.putDelete(DataFileColumnFamily.NAME, path.getMetadataText());
return this;
}
@Override
public Ample.TabletMutator putScan(StoredTabletFile path) {
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.put(ScanFileColumnFamily.NAME, path.getMetaUpdateDeleteText(),
new Value());
+ mutation.put(ScanFileColumnFamily.NAME, path.getMetadataText(), new
Value());
return this;
}
@Override
public Ample.TabletMutator deleteScan(StoredTabletFile path) {
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.putDelete(ScanFileColumnFamily.NAME,
path.getMetaUpdateDeleteText());
+ mutation.putDelete(ScanFileColumnFamily.NAME, path.getMetadataText());
return this;
}
@@ -197,7 +197,7 @@ public abstract class TabletMutatorBase implements
Ample.TabletMutator {
@Override
public Ample.TabletMutator putBulkFile(ReferencedTabletFile bulkref, long
tid) {
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.put(BulkFileColumnFamily.NAME, bulkref.getMetaInsertText(),
+ mutation.put(BulkFileColumnFamily.NAME, bulkref.insert().getMetadataText(),
new Value(FateTxId.formatTid(tid)));
return this;
}
@@ -205,7 +205,7 @@ public abstract class TabletMutatorBase implements
Ample.TabletMutator {
@Override
public Ample.TabletMutator deleteBulkFile(StoredTabletFile bulkref) {
Preconditions.checkState(updatesEnabled, "Cannot make updates after
calling mutate.");
- mutation.putDelete(BulkFileColumnFamily.NAME,
bulkref.getMetaUpdateDeleteText());
+ mutation.putDelete(BulkFileColumnFamily.NAME, bulkref.getMetadataText());
return this;
}
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
index 2acaba8a1c..eee7cf3657 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/util/MetadataTableUtil.java
@@ -259,12 +259,12 @@ public class MetadataTableUtil {
ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m);
for (Entry<StoredTabletFile,DataFileValue> entry :
datafileSizes.entrySet()) {
- m.put(DataFileColumnFamily.NAME,
entry.getKey().getMetaUpdateDeleteText(),
+ m.put(DataFileColumnFamily.NAME, entry.getKey().getMetadataText(),
new Value(entry.getValue().encode()));
}
for (StoredTabletFile pathToRemove : highDatafilesToRemove) {
- m.putDelete(DataFileColumnFamily.NAME,
pathToRemove.getMetaUpdateDeleteText());
+ m.putDelete(DataFileColumnFamily.NAME, pathToRemove.getMetadataText());
}
update(context, zooLock, m, KeyExtent.fromMetaRow(metadataEntry));
@@ -358,8 +358,7 @@ public class MetadataTableUtil {
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
StoredTabletFile stf = new
StoredTabletFile(key.getColumnQualifierData().toString());
- bw.addMutation(
- ample.createDeleteMutation(new ReferenceFile(tableId,
stf.getMetaUpdateDelete())));
+ bw.addMutation(ample.createDeleteMutation(new
ReferenceFile(tableId, stf)));
}
if (ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
@@ -437,6 +436,7 @@ public class MetadataTableUtil {
if (!cf.startsWith("../") && !cf.contains(":")) {
cf = "../" + srcTableId + entry.getKey().getColumnQualifier();
}
+ // TODO: Fix this?
m.put(entry.getKey().getColumnFamily(), new Text(cf),
entry.getValue());
} else if
(entry.getKey().getColumnFamily().equals(CurrentLocationColumnFamily.NAME)) {
m.put(LastLocationColumnFamily.NAME,
entry.getKey().getColumnQualifier(), entry.getValue());
diff --git
a/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
b/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
index 2f6e0a1e82..e30c98b22f 100644
---
a/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
+++
b/server/base/src/main/java/org/apache/accumulo/server/util/RemoveEntriesForMissingFiles.java
@@ -37,7 +37,7 @@ import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
-import org.apache.accumulo.core.metadata.ValidationUtil;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
@@ -146,7 +146,7 @@ public class RemoveEntriesForMissingFiles {
count++;
Key key = entry.getKey();
- Path map = new
Path(ValidationUtil.validate(key.getColumnQualifierData().toString()));
+ Path map =
StoredTabletFile.of(key.getColumnQualifierData().toString()).getPath();
synchronized (processing) {
while (processing.size() >= 64 || processing.contains(map)) {
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
index be66aa1d66..1aeadb6736 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
@@ -22,11 +22,14 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
+import java.net.URI;
import java.util.List;
import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.BulkFileColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
@@ -154,9 +157,15 @@ public class MetadataConstraintsTest {
// inactive txid
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("12345"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNotNull(violations);
@@ -165,9 +174,15 @@ public class MetadataConstraintsTest {
// txid that throws exception
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("9"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNotNull(violations);
@@ -176,16 +191,25 @@ public class MetadataConstraintsTest {
// active txid w/ file
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNull(violations);
// active txid w/o file
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
violations = mc.check(createEnv(), m);
assertNotNull(violations);
@@ -194,13 +218,25 @@ public class MetadataConstraintsTest {
// two active txids w/ files
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile2"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new
Range())
+ .getMetadataText(),
new Value("7"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile2"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNotNull(violations);
@@ -209,24 +245,45 @@ public class MetadataConstraintsTest {
// two files w/ one active txid
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile2"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new
Range())
+ .getMetadataText(),
new Value("5"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile2"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNull(violations);
// two loaded w/ one active txid and one file
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
- m.put(DataFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(DataFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile2"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile2"), new
Range())
+ .getMetadataText(),
new Value("5"));
violations = mc.check(createEnv(), m);
assertNotNull(violations);
@@ -235,7 +292,10 @@ public class MetadataConstraintsTest {
// active txid, mutation that looks like split
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1"));
violations = mc.check(createEnv(), m);
@@ -243,7 +303,10 @@ public class MetadataConstraintsTest {
// inactive txid, mutation that looks like split
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("12345"));
ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value("/t1"));
violations = mc.check(createEnv(), m);
@@ -251,7 +314,10 @@ public class MetadataConstraintsTest {
// active txid, mutation that looks like a load
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("5"));
m.put(CurrentLocationColumnFamily.NAME, new Text("789"), new
Value("127.0.0.1:9997"));
violations = mc.check(createEnv(), m);
@@ -259,7 +325,10 @@ public class MetadataConstraintsTest {
// inactive txid, mutation that looks like a load
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"),
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new Value("12345"));
m.put(CurrentLocationColumnFamily.NAME, new Text("789"), new
Value("127.0.0.1:9997"));
violations = mc.check(createEnv(), m);
@@ -268,31 +337,49 @@ public class MetadataConstraintsTest {
// deleting a load flag
m = new Mutation(new Text("0;foo"));
m.putDelete(BulkFileColumnFamily.NAME,
- new Text("hdfs://nn1/a/accumulo/tables/2b/t-001/someFile"));
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText());
violations = mc.check(createEnv(), m);
assertNull(violations);
// Missing beginning of path
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new Text("/someFile"), new Value("5"));
+ m.put(BulkFileColumnFamily.NAME, new Text(StoredTabletFile
+ .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"),
new Range())
+
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
"/someFile")),
+ new Value("5"));
violations = mc.check(createEnv(), m);
assertNotNull(violations);
- assertEquals(2, violations.size());
+ assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 9), violations.get(0));
assertNotNull(mc.getViolationDescription(violations.get(0)));
- // No DataFileColumnFamily included
- assertEquals(Short.valueOf((short) 8), violations.get(1));
// Missing tables directory in path
m = new Mutation(new Text("0;foo"));
- m.put(BulkFileColumnFamily.NAME, new
Text("hdfs://nn1/a/accumulo/2b/t-001/C00.rf"),
+ m.put(BulkFileColumnFamily.NAME,
+ new Text(StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
+ "hdfs://1.2.3.4/accumulo/2a/t-0003/someFile")),
new Value("5"));
violations = mc.check(createEnv(), m);
assertNotNull(violations);
- assertEquals(2, violations.size());
+ assertEquals(1, violations.size());
assertEquals(Short.valueOf((short) 9), violations.get(0));
+
// No DataFileColumnFamily included
- assertEquals(Short.valueOf((short) 8), violations.get(1));
+ m = new Mutation(new Text("0;foo"));
+ m.put(BulkFileColumnFamily.NAME,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
+ new Value("5"));
+ violations = mc.check(createEnv(), m);
+ assertNotNull(violations);
+ assertEquals(1, violations.size());
+ assertEquals(Short.valueOf((short) 8), violations.get(0));
+ assertNotNull(mc.getViolationDescription(violations.get(0)));
}
@@ -313,7 +400,19 @@ public class MetadataConstraintsTest {
// Missing beginning of path
m = new Mutation(new Text("0;foo"));
- m.put(columnFamily, new Text("/someFile"), value);
+ m.put(columnFamily, new Text(StoredTabletFile
+ .of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"),
new Range())
+
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
"/someFile")),
+ value);
+ violations = mc.check(createEnv(), m);
+ assertNotNull(violations);
+ assertEquals(1, violations.size());
+ assertEquals(Short.valueOf((short) 9), violations.get(0));
+ assertNotNull(mc.getViolationDescription(violations.get(0)));
+
+ // Bad Json - only path so should fail parsing
+ m = new Mutation(new Text("0;foo"));
+ m.put(columnFamily, new
Text("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), value);
violations = mc.check(createEnv(), m);
assertNotNull(violations);
assertEquals(1, violations.size());
@@ -322,7 +421,11 @@ public class MetadataConstraintsTest {
// Missing tables directory in path
m = new Mutation(new Text("0;foo"));
- m.put(columnFamily, new Text("hdfs://nn1/a/accumulo/2b/t-001/C00.rf"),
+ m.put(columnFamily,
+ new Text(StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+
.getMetadata().replace("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile",
+ "hdfs://1.2.3.4/accumulo/2a/t-0003/someFile")),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNotNull(violations);
@@ -331,7 +434,10 @@ public class MetadataConstraintsTest {
// Should pass validation
m = new Mutation(new Text("0;foo"));
- m.put(columnFamily, new
Text("hdfs://nn1/a/accumulo/tables/2b/t-001/C00.rf"),
+ m.put(columnFamily,
+ StoredTabletFile
+
.of(URI.create("hdfs://1.2.3.4/accumulo/tables/2a/t-0003/someFile"), new
Range())
+ .getMetadataText(),
new DataFileValue(1, 1).encodeAsValue());
violations = mc.check(createEnv(), m);
assertNull(violations);
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
b/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
index 5222801b23..a725b4459b 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/fs/VolumeUtilTest.java
@@ -42,15 +42,15 @@ public class VolumeUtilTest {
.add(new Pair<>(new Path("hdfs://nn1:9000/accumulo"), new
Path("viewfs:/a/accumulo")));
replacements.add(new Pair<>(new Path("hdfs://nn2/accumulo"), new
Path("viewfs:/b/accumulo")));
- assertEquals(new Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
VolumeUtil
- .switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
assertEquals(new Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertEquals(new Path("viewfs:/b/accumulo/tables/t-00000/C000.rf"),
VolumeUtil
- .switchVolume("hdfs://nn2/accumulo/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("viewfs:/a/accumulo/tables/t-00000/C000.rf",
FileType.TABLE,
- replacements));
-
assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf",
+ new Path("hdfs://nn1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
+ new Path("hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/b/accumulo/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
+ new Path("hdfs://nn2/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("file:/nn1/a/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
replacements.clear();
@@ -62,17 +62,18 @@ public class VolumeUtilTest {
.add(new Pair<>(new Path("hdfs://nn2/d2/accumulo"), new
Path("viewfs:/b/accumulo")));
assertEquals(new Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertEquals(new Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
+ new Path("hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
+ VolumeUtil.switchVolume(new
Path("hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
assertEquals(new Path("viewfs:/b/accumulo/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
-
assertNull(VolumeUtil.switchVolume("viewfs:/a/accumulo/tables/t-00000/C000.rf",
FileType.TABLE,
- replacements));
-
assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf",
+ new Path("hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("viewfs:/a/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("file:/nn1/a/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("hdfs://nn1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf",
FileType.TABLE,
- replacements));
}
@Test
@@ -82,15 +83,15 @@ public class VolumeUtilTest {
replacements.add(new Pair<>(new Path("hdfs://nn1:9000/accumulo"), new
Path("viewfs:/a")));
replacements.add(new Pair<>(new Path("hdfs://nn2/accumulo"), new
Path("viewfs:/b")));
- assertEquals(new Path("viewfs:/a/tables/t-00000/C000.rf"), VolumeUtil
- .switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
assertEquals(new Path("viewfs:/a/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertEquals(new Path("viewfs:/b/tables/t-00000/C000.rf"), VolumeUtil
- .switchVolume("hdfs://nn2/accumulo/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
- assertNull(
- VolumeUtil.switchVolume("viewfs:/a/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf",
+ new Path("hdfs://nn1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/a/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
+ new Path("hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/b/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
+ new Path("hdfs://nn2/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("viewfs:/a/tables/t-00000/C000.rf"), FileType.TABLE,
+ replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("file:/nn1/a/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
replacements.clear();
@@ -99,17 +100,18 @@ public class VolumeUtilTest {
replacements.add(new Pair<>(new Path("hdfs://nn2/d2/accumulo"), new
Path("viewfs:/b")));
assertEquals(new Path("viewfs:/a/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertEquals(new Path("viewfs:/a/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
+ new Path("hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/a/tables/t-00000/C000.rf"),
+ VolumeUtil.switchVolume(new
Path("hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
assertEquals(new Path("viewfs:/b/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertNull(
- VolumeUtil.switchVolume("viewfs:/a/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf",
- FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf",
FileType.TABLE,
+ new Path("hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("viewfs:/a/tables/t-00000/C000.rf"), FileType.TABLE,
replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("file:/nn1/a/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("hdfs://nn1/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
}
@Test
@@ -120,15 +122,15 @@ public class VolumeUtilTest {
.add(new Pair<>(new Path("hdfs://nn1:9000/accumulo"), new
Path("viewfs:/path1/path2")));
replacements.add(new Pair<>(new Path("hdfs://nn2/accumulo"), new
Path("viewfs:/path3")));
- assertEquals(new Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
VolumeUtil
- .switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
assertEquals(new Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertEquals(new Path("viewfs:/path3/tables/t-00000/C000.rf"), VolumeUtil
- .switchVolume("hdfs://nn2/accumulo/tables/t-00000/C000.rf",
FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("viewfs:/path1/path2/tables/t-00000/C000.rf",
FileType.TABLE,
- replacements));
-
assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf",
+ new Path("hdfs://nn1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
+ new Path("hdfs://nn1:9000/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/path3/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
+ new Path("hdfs://nn2/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("file:/nn1/a/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
replacements.clear();
@@ -139,17 +141,18 @@ public class VolumeUtilTest {
replacements.add(new Pair<>(new Path("hdfs://nn2/d2/accumulo"), new
Path("viewfs:/path3")));
assertEquals(new Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
- assertEquals(new Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
+ new Path("hdfs://nn1/d1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertEquals(new Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
+ VolumeUtil.switchVolume(new
Path("hdfs://nn1:9000/d1/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
assertEquals(new Path("viewfs:/path3/tables/t-00000/C000.rf"),
VolumeUtil.switchVolume(
- "hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf", FileType.TABLE,
replacements));
-
assertNull(VolumeUtil.switchVolume("viewfs:/path1/path2/tables/t-00000/C000.rf",
FileType.TABLE,
- replacements));
-
assertNull(VolumeUtil.switchVolume("file:/nn1/a/accumulo/tables/t-00000/C000.rf",
+ new Path("hdfs://nn2/d2/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("viewfs:/path1/path2/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("file:/nn1/a/accumulo/tables/t-00000/C000.rf"),
+ FileType.TABLE, replacements));
+ assertNull(VolumeUtil.switchVolume(new
Path("hdfs://nn1/accumulo/tables/t-00000/C000.rf"),
FileType.TABLE, replacements));
-
assertNull(VolumeUtil.switchVolume("hdfs://nn1/accumulo/tables/t-00000/C000.rf",
FileType.TABLE,
- replacements));
}
@Test
@@ -161,6 +164,6 @@ public class VolumeUtilTest {
FileType ft = FileType.TABLE;
assertEquals(new Path("file:/foo/v8/tables/+r/root_tablet"),
- VolumeUtil.switchVolume("file:/foo/v1/tables/+r/root_tablet", ft,
replacements));
+ VolumeUtil.switchVolume(new
Path("file:/foo/v1/tables/+r/root_tablet"), ft, replacements));
}
}
diff --git
a/server/base/src/test/java/org/apache/accumulo/server/util/TableDiskUsageTest.java
b/server/base/src/test/java/org/apache/accumulo/server/util/TableDiskUsageTest.java
index 7938c029d1..3cd46f57f3 100644
---
a/server/base/src/test/java/org/apache/accumulo/server/util/TableDiskUsageTest.java
+++
b/server/base/src/test/java/org/apache/accumulo/server/util/TableDiskUsageTest.java
@@ -25,6 +25,7 @@ import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
+import java.util.Map.Entry;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
@@ -268,8 +269,8 @@ public class TableDiskUsageTest {
private static Long getTotalUsage(Map<SortedSet<String>,Long> result,
TableId tableId) {
return result.entrySet().stream()
- .filter(entry -> entry.getKey().contains(getTableName(tableId)))
- .mapToLong(entry -> entry.getValue()).sum();
+ .filter(entry ->
entry.getKey().contains(getTableName(tableId))).mapToLong(Entry::getValue)
+ .sum();
}
private static String getTableName(TableId tableId) {
@@ -278,9 +279,8 @@ public class TableDiskUsageTest {
private static void appendFileMetadata(Map<Key,Value> tableEntries,
ReferencedTabletFile file,
long size) {
- tableEntries.put(
- new Key(new Text(file.getTableId() + "<"),
- MetadataSchema.TabletsSection.DataFileColumnFamily.NAME,
file.getMetaInsertText()),
+ tableEntries.put(new Key(new Text(file.getTableId() + "<"),
+ MetadataSchema.TabletsSection.DataFileColumnFamily.NAME,
file.insert().getMetadataText()),
new DataFileValue(size, 1).encodeAsValue());
}
diff --git a/server/gc/src/main/java/org/apache/accumulo/gc/GCRun.java
b/server/gc/src/main/java/org/apache/accumulo/gc/GCRun.java
index e9e58c89b3..bfaa78f63b 100644
--- a/server/gc/src/main/java/org/apache/accumulo/gc/GCRun.java
+++ b/server/gc/src/main/java/org/apache/accumulo/gc/GCRun.java
@@ -162,7 +162,7 @@ public class GCRun implements GarbageCollectionEnvironment {
fileStream = Stream.concat(fileStream, tmScans.stream());
}
// map the files to Reference objects
- var stream = fileStream.map(f -> new ReferenceFile(tm.getTableId(),
f.getMetaUpdateDelete()));
+ var stream = fileStream.map(f -> new ReferenceFile(tm.getTableId(), f));
// if dirName is populated then we have a tablet directory aka srv:dir
if (tm.getDirName() != null) {
// add the tablet directory to the stream
@@ -173,7 +173,7 @@ public class GCRun implements GarbageCollectionEnvironment {
});
var scanServerRefs = context.getAmple().getScanServerFileReferences()
- .map(sfr -> new ReferenceFile(sfr.getTableId(),
sfr.getNormalizedPathStr()));
+ .map(sfr -> new ReferenceFile(sfr.getTableId(), sfr));
return Stream.concat(tabletReferences, scanServerRefs);
}
@@ -255,7 +255,7 @@ public class GCRun implements GarbageCollectionEnvironment {
try {
Path fullPath;
Path switchedDelete =
- VolumeUtil.switchVolume(delete, VolumeManager.FileType.TABLE,
replacements);
+ VolumeUtil.switchVolume(new Path(delete),
VolumeManager.FileType.TABLE, replacements);
if (switchedDelete != null) {
// actually replacing the volumes in the metadata table would be
tricky because the
// entries would be different rows. So it could not be
diff --git
a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
index 164f8949a7..4b675a30bd 100644
---
a/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
+++
b/server/gc/src/main/java/org/apache/accumulo/gc/GarbageCollectionAlgorithm.java
@@ -156,11 +156,11 @@ public class GarbageCollectionAlgorithm {
log.debug("Candidate was still in use: {}", dir);
}
} else {
- String reference = ref.getMetadataEntry();
+ String reference = ref.getMetadataPath();
if (reference.startsWith("/")) {
log.debug("Candidate {} has a relative path, prepend tableId {}",
reference,
ref.getTableId());
- reference = "/" + ref.getTableId() + ref.getMetadataEntry();
+ reference = "/" + ref.getTableId() + ref.getMetadataPath();
} else if (!reference.contains(":") && !reference.startsWith("../")) {
throw new RuntimeException("Bad file reference " + reference);
}
diff --git
a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
index 1ddcb32bc1..896cb9ca60 100644
--- a/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
+++ b/server/gc/src/test/java/org/apache/accumulo/gc/GarbageCollectionTest.java
@@ -44,6 +44,7 @@ import
org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
import org.apache.accumulo.core.metadata.schema.Ample;
+import org.apache.hadoop.fs.Path;
import org.junit.jupiter.api.Test;
public class GarbageCollectionTest {
@@ -111,7 +112,7 @@ public class GarbageCollectionTest {
public void addFileReference(String tableId, String endRow, String file) {
TableId tid = TableId.of(tableId);
- references.put(tableId + ":" + endRow + ":" + file, new
ReferenceFile(tid, file));
+ references.put(tableId + ":" + endRow + ":" + file, new
ReferenceFile(tid, new Path(file)));
tableIds.add(tid);
}
diff --git
a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
index 32fb1f0c75..83443b6f99 100644
---
a/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
+++
b/server/gc/src/test/java/org/apache/accumulo/gc/SimpleGarbageCollectorTest.java
@@ -163,12 +163,12 @@ public class SimpleGarbageCollectorTest {
confirmed.put("5a/t-0001/F0002.rf",
"hdfs://nn1/accumulo/tables/5a/t-0001/F0002.rf");
confirmed.put("5a/t-0002/F0001.rf",
"hdfs://nn1/accumulo/tables/5a/t-0002/F0001.rf");
var allVolumesDirectory = new AllVolumesDirectory(TableId.of("5b"),
"t-0003");
- confirmed.put("5b/t-0003", allVolumesDirectory.getMetadataEntry());
+ confirmed.put("5b/t-0003", allVolumesDirectory.getMetadataPath());
confirmed.put("5b/t-0003/F0001.rf",
"hdfs://nn1/accumulo/tables/5b/t-0003/F0001.rf");
confirmed.put("5b/t-0003/F0002.rf",
"hdfs://nn2/accumulo/tables/5b/t-0003/F0002.rf");
confirmed.put("5b/t-0003/F0003.rf",
"hdfs://nn3/accumulo/tables/5b/t-0003/F0003.rf");
allVolumesDirectory = new AllVolumesDirectory(TableId.of("5b"), "t-0004");
- confirmed.put("5b/t-0004", allVolumesDirectory.getMetadataEntry());
+ confirmed.put("5b/t-0004", allVolumesDirectory.getMetadataPath());
confirmed.put("5b/t-0004/F0001.rf",
"hdfs://nn1/accumulo/tables/5b/t-0004/F0001.rf");
List<String> processedDeletes = new ArrayList<>();
@@ -179,10 +179,10 @@ public class SimpleGarbageCollectorTest {
expected.put("5a/t-0001", "hdfs://nn1/accumulo/tables/5a/t-0001");
expected.put("5a/t-0002/F0001.rf",
"hdfs://nn1/accumulo/tables/5a/t-0002/F0001.rf");
allVolumesDirectory = new AllVolumesDirectory(TableId.of("5b"), "t-0003");
- expected.put("5b/t-0003", allVolumesDirectory.getMetadataEntry());
+ expected.put("5b/t-0003", allVolumesDirectory.getMetadataPath());
expected.put("5b/t-0003/F0003.rf",
"hdfs://nn3/accumulo/tables/5b/t-0003/F0003.rf");
allVolumesDirectory = new AllVolumesDirectory(TableId.of("5b"), "t-0004");
- expected.put("5b/t-0004", allVolumesDirectory.getMetadataEntry());
+ expected.put("5b/t-0004", allVolumesDirectory.getMetadataPath());
assertEquals(expected, confirmed);
assertEquals(Arrays.asList("hdfs://nn1/accumulo/tables/5a/t-0001/F0001.rf",
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/TabletGroupWatcher.java
b/server/manager/src/main/java/org/apache/accumulo/manager/TabletGroupWatcher.java
index 8f14c20614..ee54284a25 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/TabletGroupWatcher.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/TabletGroupWatcher.java
@@ -703,7 +703,7 @@ abstract class TabletGroupWatcher extends
AccumuloDaemonThread {
Key key = entry.getKey();
if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
var stf = new
StoredTabletFile(key.getColumnQualifierData().toString());
- datafilesAndDirs.add(new ReferenceFile(stf.getTableId(),
stf.getMetaUpdateDelete()));
+ datafilesAndDirs.add(new ReferenceFile(stf.getTableId(), stf));
if (datafilesAndDirs.size() > 1000) {
ample.putGcFileAndDirCandidates(extent.tableId(),
datafilesAndDirs);
datafilesAndDirs.clear();
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
b/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
index 7bceb71e9c..86bc06930c 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/recovery/RecoveryManager.java
@@ -163,7 +163,7 @@ public class RecoveryManager {
for (Collection<String> logs : walogs) {
for (String walog : logs) {
- Path switchedWalog = VolumeUtil.switchVolume(walog, FileType.WAL,
+ Path switchedWalog = VolumeUtil.switchVolume(new Path(walog),
FileType.WAL,
manager.getContext().getVolumeReplacements());
if (switchedWalog != null) {
// replaces the volume used for sorting, but do not change entry in
metadata table. When
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/CleanUpBulkImport.java
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/CleanUpBulkImport.java
index 4a1def08d7..e613d53f93 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/CleanUpBulkImport.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/CleanUpBulkImport.java
@@ -59,7 +59,7 @@ public class CleanUpBulkImport extends ManagerRepo {
ample.removeBulkLoadInProgressFlag(
"/" + bulkDir.getParent().getName() + "/" + bulkDir.getName());
ample.putGcFileAndDirCandidates(info.tableId,
- Collections.singleton(new ReferenceFile(info.tableId,
bulkDir.toString())));
+ Collections.singleton(new ReferenceFile(info.tableId, bulkDir)));
if (info.tableState == TableState.ONLINE) {
Text firstSplit = info.firstSplit == null ? null : new
Text(info.firstSplit);
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
index 27d060ad00..0dcc48a728 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/bulkVer2/LoadFiles.java
@@ -41,6 +41,7 @@ import org.apache.accumulo.core.clientImpl.bulk.BulkSerialize;
import org.apache.accumulo.core.clientImpl.bulk.LoadMappingIterator;
import org.apache.accumulo.core.conf.Property;
import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.dataImpl.thrift.TKeyExtent;
@@ -280,7 +281,6 @@ class LoadFiles extends ManagerRepo {
@Override
void load(List<TabletMetadata> tablets, Files files) throws
MutationsRejectedException {
byte[] fam = TextUtil.getBytes(DataFileColumnFamily.NAME);
-
for (TabletMetadata tablet : tablets) {
if (tablet.getLocation() != null) {
unloadingTablets.increment(tablet.getLocation().getHostAndPort(),
1L);
@@ -290,10 +290,12 @@ class LoadFiles extends ManagerRepo {
Mutation mutation = new Mutation(tablet.getExtent().toMetaRow());
for (final Bulk.FileInfo fileInfo : files) {
- String fullPath = new Path(bulkDir,
fileInfo.getFileName()).toString();
+ // Todo: do we need to support importing with a range here?
+ StoredTabletFile fullPath =
+ StoredTabletFile.of(new Path(bulkDir,
fileInfo.getFileName()).toUri(), new Range());
byte[] val =
new DataFileValue(fileInfo.getEstFileSize(),
fileInfo.getEstNumEntries()).encode();
- mutation.put(fam, fullPath.getBytes(UTF_8), val);
+ mutation.put(fam, fullPath.getMetadata().getBytes(UTF_8), val);
}
bw.addMutation(mutation);
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
index a245a95fb8..13ceeb0367 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableExport/WriteExportFiles.java
@@ -50,6 +50,7 @@ import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.fate.Repo;
import org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.ValidationUtil;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
@@ -237,7 +238,9 @@ class WriteExportFiles extends ManagerRepo {
entry.getValue().write(dataOut);
if (entry.getKey().getColumnFamily().equals(DataFileColumnFamily.NAME)) {
- String path =
ValidationUtil.validate(entry.getKey().getColumnQualifierData().toString());
+ // We need to get the actual path of the file to validate unique files
+ String path = ValidationUtil.validate(StoredTabletFile
+
.of(entry.getKey().getColumnQualifierData().toString()).getMetadataPath());
String[] tokens = path.split("/");
if (tokens.length < 1) {
throw new RuntimeException("Illegal path " + path);
diff --git
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/PopulateMetadataTable.java
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/PopulateMetadataTable.java
index 2a9ea557d7..9188eac1b8 100644
---
a/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/PopulateMetadataTable.java
+++
b/server/manager/src/main/java/org/apache/accumulo/manager/tableOps/tableImport/PopulateMetadataTable.java
@@ -26,6 +26,7 @@ import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
+import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.zip.ZipEntry;
@@ -42,6 +43,7 @@ import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.fate.Repo;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
@@ -127,7 +129,8 @@ class PopulateMetadataTable extends ManagerRepo {
Text cq;
if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) {
- String oldName = new
Path(key.getColumnQualifier().toString()).getName();
+ final StoredTabletFile oldTabletFile =
StoredTabletFile.of(key.getColumnQualifier());
+ String oldName = oldTabletFile.getFileName();
String newName = fileNameMappings.get(oldName);
if (newName == null) {
@@ -136,7 +139,9 @@ class PopulateMetadataTable extends ManagerRepo {
"File " + oldName + " does not exist in import dir");
}
- cq = new Text(newName);
+ // Copy over the range for the new file
+ cq = StoredTabletFile.of(URI.create(newName),
oldTabletFile.getRange())
+ .getMetadataText();
} else {
cq = key.getColumnQualifier();
}
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/ExternalCompactionJob.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/ExternalCompactionJob.java
index fcee290c07..a06787da5c 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/ExternalCompactionJob.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/compactions/ExternalCompactionJob.java
@@ -76,7 +76,7 @@ public class ExternalCompactionJob {
List<InputFile> files = jobFiles.entrySet().stream().map(e -> {
var dfv = e.getValue();
- return new InputFile(e.getKey().getNormalizedPathStr(), dfv.getSize(),
dfv.getNumEntries(),
+ return new InputFile(e.getKey().getMetadata(), dfv.getSize(),
dfv.getNumEntries(),
dfv.getTime());
}).collect(Collectors.toList());
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
index b56d8d2083..13263f3bda 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/CompactableUtils.java
@@ -450,13 +450,13 @@ public class CompactableUtils {
}
public static ReferencedTabletFile
computeCompactionFileDest(ReferencedTabletFile tmpFile) {
- String newFilePath = tmpFile.getMetaInsert();
+ String newFilePath = tmpFile.getNormalizedPathStr();
int idx = newFilePath.indexOf("_tmp");
if (idx > 0) {
newFilePath = newFilePath.substring(0, idx);
} else {
- throw new IllegalArgumentException(
- "Expected compaction tmp file " + tmpFile.getMetaInsert() + " to
have suffix '_tmp'");
+ throw new IllegalArgumentException("Expected compaction tmp file "
+ + tmpFile.getNormalizedPathStr() + " to have suffix '_tmp'");
}
return new ReferencedTabletFile(new Path(newFilePath));
}
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
index 349e36d43a..e4daed4f1a 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactionTask.java
@@ -90,7 +90,7 @@ class MinorCompactionTask implements Runnable {
* for the minor compaction
*/
tablet.getTabletServer().minorCompactionStarted(commitSession,
- commitSession.getWALogSeq() + 1, newFile.getMetaInsert());
+ commitSession.getWALogSeq() + 1,
newFile.insert().getMetadataPath());
break;
} catch (IOException e) {
// An IOException could have occurred while creating the new file
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
index ab7f53e2a5..2a3dc01809 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/MinorCompactor.java
@@ -79,7 +79,7 @@ public class MinorCompactor extends FileCompactor {
@Override
public CompactionStats call() {
- final String outputFileName = getOutputFile();
+ final String outputFileName = getOutputFile().getMetadataPath();
log.trace("Begin minor compaction {} {}", outputFileName, getExtent());
// output to new data file with a temporary name
diff --git
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
index 2d910585c1..6a063ae31c 100644
---
a/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
+++
b/server/tserver/src/main/java/org/apache/accumulo/tserver/tablet/Tablet.java
@@ -262,7 +262,7 @@ public class Tablet extends TabletBase {
ReferencedTabletFile getNextDataFilenameForMajc(boolean propagateDeletes)
throws IOException {
String tmpFileName = getNextDataFilename(
!propagateDeletes ? FilePrefix.MAJOR_COMPACTION_ALL_FILES :
FilePrefix.MAJOR_COMPACTION)
- .getMetaInsert() + "_tmp";
+ .insert().getMetadataPath() + "_tmp";
return new ReferencedTabletFile(new Path(tmpFileName));
}
diff --git
a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactableUtilsTest.java
b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactableUtilsTest.java
index 371f4bb314..84229c7a21 100644
---
a/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactableUtilsTest.java
+++
b/server/tserver/src/test/java/org/apache/accumulo/tserver/compaction/CompactableUtilsTest.java
@@ -32,7 +32,7 @@ public class CompactableUtilsTest {
ReferencedTabletFile expected = new ReferencedTabletFile(
new
Path("hdfs://localhost:8020/accumulo/tables/2a/default_tablet/F0000070.rf"));
ReferencedTabletFile tmpFile =
- new ReferencedTabletFile(new Path(expected.getMetaInsert() + "_tmp"));
+ new ReferencedTabletFile(new Path(expected.getNormalizedPathStr() +
"_tmp"));
ReferencedTabletFile dest =
CompactableUtils.computeCompactionFileDest(tmpFile);
assertEquals(expected, dest);
}
diff --git
a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
index 5da7be7956..b834f6bd73 100644
---
a/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
+++
b/server/tserver/src/test/java/org/apache/accumulo/tserver/tablet/CompactableImplFileManagerTest.java
@@ -443,7 +443,8 @@ public class CompactableImplFileManagerTest {
}
static StoredTabletFile newFile(String f) {
- return new StoredTabletFile("hdfs://nn1/accumulo/tables/1/t-0001/" + f);
+ return new StoredTabletFile(
+ StoredTabletFile.serialize("hdfs://nn1/accumulo/tables/1/t-0001/" +
f));
}
static Set<StoredTabletFile> newFiles(String... strings) {
diff --git a/test/src/main/java/org/apache/accumulo/test/CloneIT.java
b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
index e2b4b9c856..de1747818a 100644
--- a/test/src/main/java/org/apache/accumulo/test/CloneIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/CloneIT.java
@@ -22,6 +22,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import java.net.URI;
import java.util.HashSet;
import java.util.Map.Entry;
@@ -31,9 +32,11 @@ import org.apache.accumulo.core.client.BatchWriter;
import org.apache.accumulo.core.client.Scanner;
import org.apache.accumulo.core.data.Key;
import org.apache.accumulo.core.data.Mutation;
+import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.schema.DataFileValue;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
@@ -86,7 +89,8 @@ public class CloneIT extends AccumuloClusterHarness {
ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0"));
ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new
Value("/default_tablet"));
- mut.put(DataFileColumnFamily.NAME.toString(), filePrefix +
"/default_tablet/0_0.rf",
+ mut.put(DataFileColumnFamily.NAME.toString(),
+ getMetadata(filePrefix + "/default_tablet/0_0.rf"),
new DataFileValue(1, 200).encodeAsString());
try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -98,8 +102,10 @@ public class CloneIT extends AccumuloClusterHarness {
MetadataTableUtil.initializeClone(tableName, TableId.of("0"),
TableId.of("1"), client, bw2);
Mutation mut2 = new Mutation(ke.toMetaRow());
- mut2.putDelete(DataFileColumnFamily.NAME.toString(), filePrefix +
"/default_tablet/0_0.rf");
- mut2.put(DataFileColumnFamily.NAME.toString(), filePrefix +
"/default_tablet/1_0.rf",
+ mut2.putDelete(DataFileColumnFamily.NAME.toString(),
+ getMetadata(filePrefix + "/default_tablet/0_0.rf"));
+ mut2.put(DataFileColumnFamily.NAME.toString(),
+ getMetadata(filePrefix + "/default_tablet/1_0.rf"),
new DataFileValue(2, 300).encodeAsString());
bw1.addMutation(mut2);
@@ -126,7 +132,7 @@ public class CloneIT extends AccumuloClusterHarness {
}
}
assertEquals(1, files.size());
- assertTrue(files.contains(filePrefix + "/default_tablet/1_0.rf"));
+ assertTrue(files.contains(getMetadata(filePrefix +
"/default_tablet/1_0.rf")));
}
}
@@ -174,7 +180,7 @@ public class CloneIT extends AccumuloClusterHarness {
}
assertEquals(1, count);
assertEquals(1, files.size());
- assertTrue(files.contains(filePrefix + "/default_tablet/0_0.rf"));
+ assertTrue(files.contains(getMetadata(filePrefix +
"/default_tablet/0_0.rf")));
}
}
@@ -198,7 +204,8 @@ public class CloneIT extends AccumuloClusterHarness {
bw1.addMutation(
createTablet("0", "m", null, "/default_tablet", filePrefix +
"/default_tablet/1_0.rf"));
Mutation mut3 = createTablet("0", null, "m", "/t-1", filePrefix +
"/default_tablet/1_0.rf");
- mut3.putDelete(DataFileColumnFamily.NAME.toString(), filePrefix +
"/default_tablet/0_0.rf");
+ mut3.putDelete(DataFileColumnFamily.NAME.toString(),
+ getMetadata(filePrefix + "/default_tablet/0_0.rf"));
bw1.addMutation(mut3);
bw1.flush();
@@ -226,7 +233,7 @@ public class CloneIT extends AccumuloClusterHarness {
}
assertEquals(1, files.size());
assertEquals(2, count);
- assertTrue(files.contains(filePrefix + "/default_tablet/1_0.rf"));
+ assertTrue(files.contains(getMetadata(filePrefix +
"/default_tablet/1_0.rf")));
}
}
@@ -237,7 +244,7 @@ public class CloneIT extends AccumuloClusterHarness {
TabletColumnFamily.PREV_ROW_COLUMN.putDelete(mut);
ServerColumnFamily.TIME_COLUMN.putDelete(mut);
ServerColumnFamily.DIRECTORY_COLUMN.putDelete(mut);
- mut.putDelete(DataFileColumnFamily.NAME.toString(), file);
+ mut.putDelete(DataFileColumnFamily.NAME.toString(), getMetadata(file));
return mut;
}
@@ -250,7 +257,7 @@ public class CloneIT extends AccumuloClusterHarness {
ServerColumnFamily.TIME_COLUMN.put(mut, new Value("M0"));
ServerColumnFamily.DIRECTORY_COLUMN.put(mut, new Value(dir));
- mut.put(DataFileColumnFamily.NAME.toString(), file,
+ mut.put(DataFileColumnFamily.NAME.toString(), getMetadata(file),
new DataFileValue(10, 200).encodeAsString());
return mut;
@@ -300,8 +307,8 @@ public class CloneIT extends AccumuloClusterHarness {
}
assertEquals(2, count);
assertEquals(2, files.size());
- assertTrue(files.contains(filePrefix + "/d1/file1.rf"));
- assertTrue(files.contains(filePrefix + "/d2/file2.rf"));
+ assertTrue(files.contains(getMetadata(filePrefix + "/d1/file1.rf")));
+ assertTrue(files.contains(getMetadata(filePrefix + "/d2/file2.rf")));
}
}
@@ -365,9 +372,9 @@ public class CloneIT extends AccumuloClusterHarness {
}
assertEquals(3, count);
assertEquals(3, files.size());
-
assertTrue(files.contains("hdfs://nn:8000/accumulo/tables/0/d1/file1.rf"));
-
assertTrue(files.contains("hdfs://nn:8000/accumulo/tables/0/d2/file3.rf"));
-
assertTrue(files.contains("hdfs://nn:8000/accumulo/tables/0/d4/file3.rf"));
+
assertTrue(files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d1/file1.rf")));
+
assertTrue(files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d2/file3.rf")));
+
assertTrue(files.contains(getMetadata("hdfs://nn:8000/accumulo/tables/0/d4/file3.rf")));
}
}
@@ -390,7 +397,7 @@ public class CloneIT extends AccumuloClusterHarness {
bw1.addMutation(deleteTablet("0", "m", null, filePrefix +
"/d1/file1.rf"));
Mutation mut = createTablet("0", null, null, "/d2", filePrefix +
"/d2/file2.rf");
- mut.put(DataFileColumnFamily.NAME.toString(), filePrefix +
"/d1/file1.rf",
+ mut.put(DataFileColumnFamily.NAME.toString(), getMetadata(filePrefix +
"/d1/file1.rf"),
new DataFileValue(10, 200).encodeAsString());
bw1.addMutation(mut);
@@ -401,4 +408,8 @@ public class CloneIT extends AccumuloClusterHarness {
}
}
}
+
+ private static String getMetadata(String file) {
+ return StoredTabletFile.of(URI.create(file), new Range()).getMetadata();
+ }
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/ScanServerMetadataEntriesIT.java
b/test/src/main/java/org/apache/accumulo/test/ScanServerMetadataEntriesIT.java
index 4e1f3dafb4..de7413a898 100644
---
a/test/src/main/java/org/apache/accumulo/test/ScanServerMetadataEntriesIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/ScanServerMetadataEntriesIT.java
@@ -256,7 +256,7 @@ public class ScanServerMetadataEntriesIT extends
SharedMiniClusterBase {
assertTrue(refs.size() > fileCount * 2);
List<Reference> tableRefs =
refs.stream().filter(r -> r.getTableId().equals(tid) &&
!r.isDirectory())
- .peek(r ->
assertTrue(metadataScanFileRefs.contains(r.getMetadataEntry())))
+ .peek(r ->
assertTrue(metadataScanFileRefs.contains(r.getMetadataPath())))
.collect(Collectors.toList());
log.info("Reference List:{}", tableRefs);
// There should be 6 references here. 3 for the table file entries,
and 3 for the scan
@@ -264,7 +264,7 @@ public class ScanServerMetadataEntriesIT extends
SharedMiniClusterBase {
assertEquals(fileCount * 2, tableRefs.size());
Set<String> deduplicatedReferences =
-
tableRefs.stream().map(Reference::getMetadataEntry).collect(Collectors.toSet());
+
tableRefs.stream().map(Reference::getMetadataPath).collect(Collectors.toSet());
assertEquals(fileCount, deduplicatedReferences.size());
}
diff --git a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
index 3c4aa4bbe0..4f231deb6a 100644
--- a/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/VolumeIT.java
@@ -148,8 +148,10 @@ public class VolumeIT extends ConfigurableMacBase {
int fileCount = 0;
for (Entry<Key,Value> entry : scanner) {
- boolean inV1 =
entry.getKey().getColumnQualifier().toString().contains(v1.toString());
- boolean inV2 =
entry.getKey().getColumnQualifier().toString().contains(v2.toString());
+ boolean inV1 =
StoredTabletFile.of(entry.getKey().getColumnQualifier()).getMetadataPath()
+ .contains(v1.toString());
+ boolean inV2 =
StoredTabletFile.of(entry.getKey().getColumnQualifier()).getMetadataPath()
+ .contains(v2.toString());
assertTrue(inV1 || inV2);
fileCount++;
}
@@ -297,10 +299,10 @@ public class VolumeIT extends ConfigurableMacBase {
int[] counts = new int[paths.length];
outer: for (Entry<Key,Value> entry : metaScanner) {
- String path = entry.getKey().getColumnQualifier().toString();
+ String path =
StoredTabletFile.of(entry.getKey().getColumnQualifier()).getMetadataPath();
for (int i = 0; i < paths.length; i++) {
- if (path.startsWith(paths[i].toString())) {
+ if (path.contains(paths[i].toString())) {
counts[i]++;
continue outer;
}
@@ -373,7 +375,7 @@ public class VolumeIT extends ConfigurableMacBase {
int count = 0;
for (StoredTabletFile file : ((ClientContext)
client).getAmple().readTablet(RootTable.EXTENT)
.getFiles()) {
- assertTrue(file.getMetaUpdateDelete().startsWith(v2.toString()));
+ assertTrue(file.getMetadataPath().startsWith(v2.toString()));
count++;
}
@@ -442,8 +444,8 @@ public class VolumeIT extends ConfigurableMacBase {
int count = 0;
for (StoredTabletFile file : ((ClientContext)
client).getAmple().readTablet(RootTable.EXTENT)
.getFiles()) {
- assertTrue(file.getMetaUpdateDelete().startsWith(v8.toString())
- || file.getMetaUpdateDelete().startsWith(v9.toString()));
+ assertTrue(file.getMetadataPath().startsWith(v8.toString())
+ || file.getMetadataPath().startsWith(v9.toString()));
count++;
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/BulkFailureIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/BulkFailureIT.java
index fe8bd12b1b..dca515ebf0 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkFailureIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkFailureIT.java
@@ -61,6 +61,7 @@ import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.file.FileOperations;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.BulkFileColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.rpc.ThriftUtil;
@@ -348,7 +349,7 @@ public class BulkFailureIT extends AccumuloClusterHarness {
scanner.fetchColumnFamily(fam);
for (Entry<Key,Value> entry : scanner) {
- files.add(new Path(entry.getKey().getColumnQualifierData().toString()));
+
files.add(StoredTabletFile.of(entry.getKey().getColumnQualifier()).getPath());
}
return files;
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
index 0993e45863..5280d279dc 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/BulkNewIT.java
@@ -556,7 +556,7 @@ public class BulkNewIT extends SharedMiniClusterBase {
for (TabletMetadata tablet : tablets) {
assertTrue(tablet.getLoaded().isEmpty());
- Set<String> fileHashes = tablet.getFiles().stream().map(f ->
hash(f.getMetaUpdateDelete()))
+ Set<String> fileHashes = tablet.getFiles().stream().map(f ->
hash(f.getMetadataPath()))
.collect(Collectors.toSet());
String endRow = tablet.getEndRow() == null ? "null" :
tablet.getEndRow().toString();
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
index 7c7a231269..95acef55b6 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CleanTmpIT.java
@@ -36,6 +36,7 @@ import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
@@ -92,7 +93,7 @@ public class CleanTmpIT extends ConfigurableMacBase {
s.setRange(Range.prefix(id));
s.fetchColumnFamily(DataFileColumnFamily.NAME);
Entry<Key,Value> entry = getOnlyElement(s);
- file = new Path(entry.getKey().getColumnQualifier().toString());
+ file =
StoredTabletFile.of(entry.getKey().getColumnQualifier()).getPath();
}
FileSystem fs = getCluster().getFileSystem();
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
index 6390a2ca1c..1ff618dfc0 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/CloneTestIT.java
@@ -58,6 +58,7 @@ import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.manager.state.tables.TableState;
import org.apache.accumulo.core.metadata.MetadataTable;
import org.apache.accumulo.core.metadata.RootTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
@@ -158,7 +159,7 @@ public class CloneTestIT extends AccumuloClusterHarness {
k.getColumnQualifier(cq);
if (cf.equals(DataFileColumnFamily.NAME)) {
- Path p = new Path(cq.toString());
+ Path p = StoredTabletFile.of(cq).getPath();
FileSystem fs = cluster.getFileSystem();
assertTrue(fs.exists(p), "File does not exist: " + p);
} else if
(cf.equals(ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily())) {
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/FileNormalizationIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/FileNormalizationIT.java
index 25a54c97cb..789fd5804c 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/FileNormalizationIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/FileNormalizationIT.java
@@ -37,12 +37,12 @@ import org.apache.accumulo.core.data.Mutation;
import org.apache.accumulo.core.data.TableId;
import org.apache.accumulo.core.dataImpl.KeyExtent;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.TablePermission;
import org.apache.accumulo.harness.SharedMiniClusterBase;
import org.apache.accumulo.test.TestIngest;
import org.apache.accumulo.test.VerifyIngest;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
@@ -97,7 +97,7 @@ public class FileNormalizationIT extends
SharedMiniClusterBase {
scanner.forEach((k, v) -> {
var row = k.getRowData().toString();
var qual = k.getColumnQualifierData().toString();
- var path = new Path(qual).toString();
+ var path =
StoredTabletFile.of(k.getColumnQualifierData().toString()).getMetadataPath();
var rowPath = row + "+" + path;
log.debug("split test, inspecting {} {} {}", row, qual, v);
@@ -132,7 +132,7 @@ public class FileNormalizationIT extends
SharedMiniClusterBase {
Set<String> filenames = new HashSet<>();
scanner.forEach((k, v) -> {
- var path = new Path(k.getColumnQualifierData().toString());
+ var path =
StoredTabletFile.of(k.getColumnQualifierData().toString()).getPath();
assertFalse(filenames.contains(path.getName()));
assertTrue(path.getName().startsWith("A"));
filenames.add(path.getName());
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
index e5d34e446c..7995ba4d58 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/FunctionalTestUtils.java
@@ -44,6 +44,7 @@ import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
import org.apache.accumulo.cluster.AccumuloCluster;
import org.apache.accumulo.core.Constants;
@@ -61,6 +62,7 @@ import org.apache.accumulo.core.fate.ZooStore;
import org.apache.accumulo.core.fate.zookeeper.ZooReaderWriter;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
@@ -89,14 +91,18 @@ public class FunctionalTestUtils {
}
public static List<String> getRFilePaths(AccumuloClient c, String tableName)
throws Exception {
- List<String> files = new ArrayList<>();
+ return getStoredTabletFiles(c,
tableName).stream().map(StoredTabletFile::getMetadataPath)
+ .collect(Collectors.toList());
+ }
+
+ public static List<StoredTabletFile> getStoredTabletFiles(AccumuloClient c,
String tableName)
+ throws Exception {
+ List<StoredTabletFile> files = new ArrayList<>();
try (Scanner scanner = c.createScanner(MetadataTable.NAME,
Authorizations.EMPTY)) {
TableId tableId =
TableId.of(c.tableOperations().tableIdMap().get(tableName));
scanner.setRange(TabletsSection.getRange(tableId));
scanner.fetchColumnFamily(DataFileColumnFamily.NAME);
- scanner.forEach(entry -> {
- files.add(entry.getKey().getColumnQualifier().toString());
- });
+ scanner.forEach(entry ->
files.add(StoredTabletFile.of(entry.getKey().getColumnQualifier())));
}
return files;
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
index a5737501e8..0a40516999 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/GarbageCollectorIT.java
@@ -27,6 +27,7 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import java.io.UncheckedIOException;
+import java.net.URI;
import java.time.Duration;
import java.util.Arrays;
import java.util.List;
@@ -318,8 +319,9 @@ public class GarbageCollectorIT extends ConfigurableMacBase
{
for (int i = 0; i < 100000; ++i) {
String longpath = "aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee"
+ "ffffffffffgggggggggghhhhhhhhhhiiiiiiiiiijjjjjjjjjj";
- var path = String.format("file:/%020d/%s", i, longpath);
- Mutation delFlag = ample.createDeleteMutation(new
ReferenceFile(TableId.of("1"), path));
+ var path = URI.create(String.format("file:/%020d/%s", i, longpath));
+ Mutation delFlag =
+ ample.createDeleteMutation(new ReferenceFile(TableId.of("1"), new
Path(path)));
bw.addMutation(delFlag);
}
}
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
index a4e2589f9c..84aa32fa5f 100644
--- a/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
+++ b/test/src/main/java/org/apache/accumulo/test/functional/ReadWriteIT.java
@@ -78,6 +78,7 @@ import org.apache.accumulo.core.file.rfile.PrintInfo;
import org.apache.accumulo.core.lock.ServiceLock;
import org.apache.accumulo.core.lock.ServiceLockData;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.core.util.MonitorUtil;
@@ -398,7 +399,7 @@ public class ReadWriteIT extends AccumuloClusterHarness {
try (PrintStream newOut = new PrintStream(baos)) {
System.setOut(newOut);
List<String> args = new ArrayList<>();
- args.add(entry.getKey().getColumnQualifier().toString());
+
args.add(StoredTabletFile.of(entry.getKey().getColumnQualifier()).getMetadataPath());
args.add("--props");
args.add(getCluster().getAccumuloPropertiesPath());
if (getClusterType() == ClusterType.STANDALONE && saslEnabled()) {
diff --git
a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
index 7291a4280d..6ee5fe5271 100644
---
a/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
+++
b/test/src/main/java/org/apache/accumulo/test/functional/RecoveryWithEmptyRFileIT.java
@@ -33,6 +33,7 @@ import org.apache.accumulo.core.data.Range;
import org.apache.accumulo.core.data.Value;
import org.apache.accumulo.core.file.rfile.CreateEmpty;
import org.apache.accumulo.core.metadata.MetadataTable;
+import org.apache.accumulo.core.metadata.StoredTabletFile;
import
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
import org.apache.accumulo.core.security.Authorizations;
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
@@ -90,7 +91,7 @@ public class RecoveryWithEmptyRFileIT extends
ConfigurableMacBase {
boolean foundFile = false;
for (Entry<Key,Value> entry : meta) {
foundFile = true;
- Path rfile = new
Path(entry.getKey().getColumnQualifier().toString());
+ Path rfile =
StoredTabletFile.of(entry.getKey().getColumnQualifier()).getPath();
log.debug("Removing rfile '{}'", rfile);
cluster.getFileSystem().delete(rfile, false);
Process processInfo = cluster.exec(CreateEmpty.class,
rfile.toString()).getProcess();