This is an automated email from the ASF dual-hosted git repository.

dlmarion pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/accumulo.git

commit a5cf1e622d8eb587fca71288507c7bd24956ad7e
Merge: 1c163b5505 88abe226fd
Author: Dave Marion <[email protected]>
AuthorDate: Fri Dec 5 16:42:02 2025 +0000

    Merge branch '2.1'

 .../org/apache/accumulo/core/conf/Property.java    | 17 +++++++--
 .../apache/accumulo/core/file/FileOperations.java  | 43 ++++++++++++++++------
 .../accumulo/core/file/rfile/RFileOperations.java  | 16 ++++++++
 .../accumulo/server/compaction/FileCompactor.java  |  7 ++--
 .../server/init/FileSystemInitializer.java         |  8 ++--
 .../accumulo/tserver/TabletClientHandler.java      | 11 +++++-
 6 files changed, 78 insertions(+), 24 deletions(-)

diff --cc core/src/main/java/org/apache/accumulo/core/conf/Property.java
index 8c9a3b1ac2,9cd510fbd0..141b02b273
--- a/core/src/main/java/org/apache/accumulo/core/conf/Property.java
+++ b/core/src/main/java/org/apache/accumulo/core/conf/Property.java
@@@ -1122,68 -1256,77 +1122,77 @@@ public enum Property 
    TABLE_BLOOM_ERRORRATE("table.bloom.error.rate", "0.5%", 
PropertyType.FRACTION,
        "Bloom filter error rate.", "1.3.5"),
    TABLE_BLOOM_KEY_FUNCTOR("table.bloom.key.functor",
 -      "org.apache.accumulo.core.file.keyfunctor.RowFunctor", 
PropertyType.CLASSNAME,
 -      "A function that can transform the key prior to insertion and check of"
 -          + " bloom filter. 
org.apache.accumulo.core.file.keyfunctor.RowFunctor,"
 -          + " org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, 
and"
 -          + " org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor 
are"
 -          + " allowable values. One can extend any of the above mentioned 
classes to"
 -          + " perform specialized parsing of the key.",
 -      "1.3.5"),
 +      "org.apache.accumulo.core.file.keyfunctor.RowFunctor", 
PropertyType.CLASSNAME, """
 +          A function that can transform the key prior to insertion and check 
of \
 +          bloom filter. org.apache.accumulo.core.file.keyfunctor.RowFunctor, \
 +          org.apache.accumulo.core.file.keyfunctor.ColumnFamilyFunctor, and \
 +          org.apache.accumulo.core.file.keyfunctor.ColumnQualifierFunctor are 
\
 +          allowable values. One can extend any of the above mentioned classes 
to \
 +          perform specialized parsing of the key.
 +          """, "1.3.5"),
    TABLE_BLOOM_HASHTYPE("table.bloom.hash.type", "murmur", PropertyType.STRING,
        "The bloom filter hash type.", "1.3.5"),
 -  TABLE_BULK_MAX_TABLETS("table.bulk.max.tablets", "0", PropertyType.COUNT,
 -      "The maximum number of tablets allowed for one bulk import file. Value 
of 0 is Unlimited. "
 -          + "This property is only enforced in the new bulk import API.",
 -      "2.1.0"),
    TABLE_BULK_SKIP_THRESHOLD("table.bulk.metadata.skip.distance", "0", 
PropertyType.COUNT,
 -      "When performing bulk v2 imports to a table, the Manager iterates over 
the tables metadata"
 -          + " tablets sequentially. When importing files into a small table 
or into all or a majority"
 -          + " of tablets of a large table then the tablet metadata 
information for most tablets will be needed."
 -          + " However, when importing files into a small number of 
non-contiguous tablets in a large table, then"
 -          + " the Manager will look at each tablets metadata when it could be 
skipped. The value of this"
 -          + " property tells the Manager if, and when, it should set up a new 
scanner over the metadata"
 -          + " table instead of just iterating over tablet metadata to find 
the matching tablet. Setting up"
 -          + " a new scanner is analogous to performing a seek in an iterator, 
but it has a cost. A value of zero (default) disables"
 -          + " this feature. A non-zero value enables this feature and the 
Manager will setup a new scanner"
 -          + " when the tablet metadata distance is above the supplied value.",
 +      """
 +          When performing bulk v2 imports to a table, the Manager iterates 
over the tables metadata \
 +          tablets sequentially. When importing files into a small table or 
into all or a majority \
 +          of tablets of a large table then the tablet metadata information 
for most tablets will be needed. \
 +          However, when importing files into a small number of non-contiguous 
tablets in a large table, then \
 +          the Manager will look at each tablets metadata when it could be 
skipped. The value of this \
 +          property tells the Manager if, and when, it should set up a new 
scanner over the metadata \
 +          table instead of just iterating over tablet metadata to find the 
matching tablet. Setting up \
 +          a new scanner is analogous to performing a seek in an iterator, but 
it has a cost. A value of zero (default) disables \
 +          this feature. A non-zero value enables this feature and the Manager 
will setup a new scanner \
 +          when the tablet metadata distance is above the supplied value.
 +          """,
        "2.1.4"),
 -  TABLE_DURABILITY("table.durability", "sync", PropertyType.DURABILITY,
 -      "The durability of writes to tables includes ensuring that mutations 
written"
 -          + " by clients are persisted in the write-ahead log and that files 
written"
 -          + " during a compaction are persisted to disk successfully. This 
property only"
 -          + " configures the durability used to write to the write-ahead log. 
Legal"
 -          + " values are: none, which skips the write-ahead log; log, which 
sends the"
 -          + " data to the write-ahead log, but does nothing to make it 
durable; flush,"
 -          + " which pushes data out of the JVM (likely to page cache); and 
sync, which"
 -          + " ensures that each mutation is written to the physical disk. To 
configure"
 -          + " the durability of files written during minor and major 
compactions, set the"
 -          + " Hadoop property \"dfs.datanode.synconclose\" to \"true\". This 
will ensure"
 -          + " that the blocks of the files in HDFS are written to the 
physical disk as"
 -          + " the compaction output files are written (Note that this may 
only apply"
 -          + " to replicated files in HDFS).",
 -      "1.7.0"),
 +  TABLE_DURABILITY("table.durability", "sync", PropertyType.DURABILITY, """
-       The durability used to write to the write-ahead log. Legal values are: \
-       none, which skips the write-ahead log; log, which sends the data to the 
\
-       write-ahead log, but does nothing to make it durable; flush, which 
pushes \
-       data to the file system; and sync, which ensures the data is written to 
disk. \
++      The durability of writes to tables includes ensuring that mutations 
written \
++      by clients are persisted in the write-ahead log and that files written \
++      during a compaction are persisted to disk successfully. This property 
only \
++      configures the durability used to write to the write-ahead log. Legal \
++      values are: none, which skips the write-ahead log; log, which sends the 
\
++      data to the write-ahead log, but does nothing to make it durable; 
flush, \
++      which pushes data out of the JVM (likely to page cache); and sync, 
which \
++      ensures that each mutation is written to the physical disk. To 
configure \
++      the durability of files written during minor and major compactions, set 
the \
++      Hadoop property \"dfs.datanode.synconclose\" to \"true\". This will 
ensure \
++      that the blocks of the files in HDFS are written to the physical disk 
as \
++      the compaction output files are written (Note that this may only apply \
++      to replicated files in HDFS). \
 +      """, "1.7.0"),
  
 -  TABLE_FAILURES_IGNORE("table.failures.ignore", "false", 
PropertyType.BOOLEAN,
 -      "If you want queries for your table to hang or fail when data is 
missing"
 -          + " from the system, then set this to false. When this set to true 
missing"
 -          + " data will be reported but queries will still run possibly 
returning a"
 -          + " subset of the data.",
 -      "1.3.5"),
 +  TABLE_FAILURES_IGNORE("table.failures.ignore", "false", 
PropertyType.BOOLEAN, """
 +      If you want queries for your table to hang or fail when data is missing 
\
 +      from the system, then set this to false. When this set to true missing \
 +      data will be reported but queries will still run possibly returning a \
 +      subset of the data. \
 +      """, "1.3.5"),
    TABLE_DEFAULT_SCANTIME_VISIBILITY("table.security.scan.visibility.default", 
"",
 -      PropertyType.STRING,
 -      "The security label that will be assumed at scan time if an entry does"
 -          + " not have a visibility expression.\n"
 -          + "Note: An empty security label is displayed as []. The scan 
results"
 -          + " will show an empty visibility even if the visibility from this"
 -          + " setting is applied to the entry.\n"
 -          + "CAUTION: If a particular key has an empty security label AND its"
 -          + " table's default visibility is also empty, access will ALWAYS be"
 -          + " granted for users with permission to that table. Additionally, 
if this"
 -          + " field is changed, all existing data with an empty visibility 
label"
 -          + " will be interpreted with the new label on the next scan.",
 -      "1.3.5"),
 +      PropertyType.STRING, """
 +          The security label that will be assumed at scan time if an entry 
does \
 +          not have a visibility expression.
 +          Note: An empty security label is displayed as []. The scan results \
 +          will show an empty visibility even if the visibility from this \
 +          setting is applied to the entry.
 +          CAUTION: If a particular key has an empty security label AND its \
 +          table's default visibility is also empty, access will ALWAYS be \
 +          granted for users with permission to that table. Additionally, if 
this \
 +          field is changed, all existing data with an empty visibility label \
 +          will be interpreted with the new label on the next scan. \
 +          """, "1.3.5"),
    TABLE_LOCALITY_GROUPS("table.groups.enabled", "", PropertyType.STRING,
        "A comma separated list of locality group names to enable for this 
table.", "1.3.5"),
 -  TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX,
 -      "Properties in this category are per-table properties that add"
 -          + " constraints to a table. These properties start with the 
category"
 -          + " prefix, followed by a number, and their values correspond to a 
fully"
 -          + " qualified Java class that implements the Constraint 
interface.\nFor example:\n"
 -          + "table.constraint.1 = 
org.apache.accumulo.core.constraints.MyCustomConstraint\n"
 -          + "and:\n table.constraint.2 = 
my.package.constraints.MySecondConstraint.",
 -      "1.3.5"),
 +  TABLE_CONSTRAINT_PREFIX("table.constraint.", null, PropertyType.PREFIX, """
 +      Properties in this category are per-table properties that add \
 +      constraints to a table. These properties start with the category \
 +      prefix, followed by a number, and their values correspond to a fully \
 +      qualified Java class that implements the Constraint interface.
 +      For example:
 +        table.constraint.1 = 
org.apache.accumulo.core.constraints.MyCustomConstraint
 +      and:
 +        table.constraint.2 = my.package.constraints.MySecondConstraint.
 +      """, "1.3.5"),
    TABLE_INDEXCACHE_ENABLED("table.cache.index.enable", "true", 
PropertyType.BOOLEAN,
        "Determines whether index block cache is enabled for a table.", 
"1.3.5"),
    TABLE_BLOCKCACHE_ENABLED("table.cache.block.enable", "false", 
PropertyType.BOOLEAN,
diff --cc core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
index 309b7b3cab,ce5c392fdc..ecbc595664
--- a/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
+++ b/core/src/main/java/org/apache/accumulo/core/file/FileOperations.java
@@@ -29,11 -29,11 +29,12 @@@ import org.apache.accumulo.core.conf.Ac
  import org.apache.accumulo.core.conf.Property;
  import org.apache.accumulo.core.data.ByteSequence;
  import org.apache.accumulo.core.data.Range;
+ import org.apache.accumulo.core.data.TableId;
  import org.apache.accumulo.core.file.blockfile.impl.CacheProvider;
  import org.apache.accumulo.core.file.rfile.RFile;
 +import org.apache.accumulo.core.metadata.TabletFile;
 +import org.apache.accumulo.core.metadata.UnreferencedTabletFile;
  import org.apache.accumulo.core.spi.crypto.CryptoService;
 -import org.apache.accumulo.core.util.ratelimit.RateLimiter;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.FSDataOutputStream;
  import org.apache.hadoop.fs.FileSystem;
@@@ -169,13 -169,15 +170,14 @@@ public abstract class FileOperations 
      return new ReaderBuilder();
    }
  
 -  public static class FileOptions {
 +  protected static class FileOptions {
      // objects used by all
      public final AccumuloConfiguration tableConfiguration;
 -    public final String filename;
 +    public final TabletFile file;
      public final FileSystem fs;
      public final Configuration fsConf;
 -    public final RateLimiter rateLimiter;
      // writer only objects
+     private final TableId tableId;
      public final String compression;
      public final FSDataOutputStream outputStream;
      public final boolean enableAccumuloStart;
@@@ -190,15 -192,17 +192,16 @@@
      public final boolean inclusive;
      public final boolean dropCacheBehind;
  
-     protected FileOptions(AccumuloConfiguration tableConfiguration, 
TabletFile file, FileSystem fs,
-         Configuration fsConf, String compression, FSDataOutputStream 
outputStream,
-         boolean enableAccumuloStart, CacheProvider cacheProvider, 
Cache<String,Long> fileLenCache,
-         boolean seekToBeginning, CryptoService cryptoService, Range range,
-         Set<ByteSequence> columnFamilies, boolean inclusive, boolean 
dropCacheBehind) {
 -    public FileOptions(TableId tableId, AccumuloConfiguration 
tableConfiguration, String filename,
 -        FileSystem fs, Configuration fsConf, RateLimiter rateLimiter, String 
compression,
++    protected FileOptions(TableId tableId, AccumuloConfiguration 
tableConfiguration,
++        TabletFile file, FileSystem fs, Configuration fsConf, String 
compression,
+         FSDataOutputStream outputStream, boolean enableAccumuloStart, 
CacheProvider cacheProvider,
+         Cache<String,Long> fileLenCache, boolean seekToBeginning, 
CryptoService cryptoService,
+         Range range, Set<ByteSequence> columnFamilies, boolean inclusive, 
boolean dropCacheBehind) {
+       this.tableId = tableId;
        this.tableConfiguration = tableConfiguration;
 -      this.filename = filename;
 +      this.file = Objects.requireNonNull(file);
        this.fs = fs;
        this.fsConf = fsConf;
 -      this.rateLimiter = rateLimiter;
        this.compression = compression;
        this.outputStream = outputStream;
        this.enableAccumuloStart = enableAccumuloStart;
@@@ -273,10 -285,12 +280,11 @@@
     * Helper class extended by both writers and readers.
     */
    public static class FileHelper {
+     private TableId tableId;
      private AccumuloConfiguration tableConfiguration;
 -    private String filename;
 +    private TabletFile file;
      private FileSystem fs;
      private Configuration fsConf;
 -    private RateLimiter rateLimiter;
      private CryptoService cryptoService;
      private boolean dropCacheBehind = false;
  
@@@ -312,27 -336,29 +325,28 @@@
  
      protected FileOptions toWriterBuilderOptions(String compression,
          FSDataOutputStream outputStream, boolean startEnabled) {
-       return new FileOptions(tableConfiguration, file, fs, fsConf, 
compression, outputStream,
-           startEnabled, NULL_PROVIDER, null, false, cryptoService, null, 
null, true,
 -      return new FileOptions(tableId, tableConfiguration, filename, fs, 
fsConf, rateLimiter,
 -          compression, outputStream, startEnabled, NULL_PROVIDER, null, 
false, cryptoService, null,
 -          null, true, dropCacheBehind);
++      return new FileOptions(tableId, tableConfiguration, file, fs, fsConf, 
compression,
++          outputStream, startEnabled, NULL_PROVIDER, null, false, 
cryptoService, null, null, true,
 +          dropCacheBehind);
      }
  
      protected FileOptions toReaderBuilderOptions(CacheProvider cacheProvider,
          Cache<String,Long> fileLenCache, boolean seekToBeginning) {
-       return new FileOptions(tableConfiguration, file, fs, fsConf, null, 
null, false,
 -      return new FileOptions(tableId, tableConfiguration, filename, fs, 
fsConf, rateLimiter, null,
 -          null, false, cacheProvider == null ? NULL_PROVIDER : cacheProvider, 
fileLenCache,
 -          seekToBeginning, cryptoService, null, null, true, dropCacheBehind);
++      return new FileOptions(tableId, tableConfiguration, file, fs, fsConf, 
null, null, false,
 +          cacheProvider == null ? NULL_PROVIDER : cacheProvider, 
fileLenCache, seekToBeginning,
 +          cryptoService, null, null, true, dropCacheBehind);
      }
  
      protected FileOptions toIndexReaderBuilderOptions(Cache<String,Long> 
fileLenCache) {
-       return new FileOptions(tableConfiguration, file, fs, fsConf, null, 
null, false, NULL_PROVIDER,
-           fileLenCache, false, cryptoService, null, null, true, 
dropCacheBehind);
 -      return new FileOptions(tableId, tableConfiguration, filename, fs, 
fsConf, rateLimiter, null,
 -          null, false, NULL_PROVIDER, fileLenCache, false, cryptoService, 
null, null, true,
 -          dropCacheBehind);
++      return new FileOptions(tableId, tableConfiguration, file, fs, fsConf, 
null, null, false,
++          NULL_PROVIDER, fileLenCache, false, cryptoService, null, null, 
true, dropCacheBehind);
      }
  
      protected FileOptions toScanReaderBuilderOptions(Range range, 
Set<ByteSequence> columnFamilies,
          boolean inclusive) {
-       return new FileOptions(tableConfiguration, file, fs, fsConf, null, 
null, false, NULL_PROVIDER,
-           null, false, cryptoService, range, columnFamilies, inclusive, 
dropCacheBehind);
 -      return new FileOptions(tableId, tableConfiguration, filename, fs, 
fsConf, rateLimiter, null,
 -          null, false, NULL_PROVIDER, null, false, cryptoService, range, 
columnFamilies, inclusive,
++      return new FileOptions(tableId, tableConfiguration, file, fs, fsConf, 
null, null, false,
++          NULL_PROVIDER, null, false, cryptoService, range, columnFamilies, 
inclusive,
+           dropCacheBehind);
      }
  
      protected AccumuloConfiguration getTableConfiguration() {
diff --cc 
core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
index 884a4f095a,c87b071642..0927b6b374
--- 
a/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
+++ 
b/core/src/main/java/org/apache/accumulo/core/file/rfile/RFileOperations.java
@@@ -33,15 -35,17 +35,17 @@@ import org.apache.accumulo.core.file.Fi
  import org.apache.accumulo.core.file.FileSKVIterator;
  import org.apache.accumulo.core.file.FileSKVWriter;
  import 
org.apache.accumulo.core.file.blockfile.impl.CachableBlockFile.CachableBuilder;
 +import org.apache.accumulo.core.file.rfile.RFile.RFileSKVIterator;
  import org.apache.accumulo.core.file.rfile.bcfile.BCFile;
 -import org.apache.accumulo.core.metadata.MetadataTable;
 -import org.apache.accumulo.core.metadata.RootTable;
++import org.apache.accumulo.core.metadata.SystemTables;
 +import org.apache.accumulo.core.metadata.TabletFile;
  import org.apache.accumulo.core.sample.impl.SamplerConfigurationImpl;
  import org.apache.accumulo.core.sample.impl.SamplerFactory;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.CreateFlag;
  import org.apache.hadoop.fs.FSDataOutputStream;
  import org.apache.hadoop.fs.FileSystem;
 -import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.StreamCapabilities;
  import org.apache.hadoop.fs.permission.FsPermission;
  import org.apache.hadoop.hdfs.DistributedFileSystem;
  import org.slf4j.Logger;
@@@ -54,13 -58,15 +58,14 @@@ public class RFileOperations extends Fi
    private static final Logger LOG = 
LoggerFactory.getLogger(RFileOperations.class);
  
    private static final Collection<ByteSequence> EMPTY_CF_SET = 
Collections.emptySet();
+   private static final AtomicBoolean SYNC_CAPABILITY_LOGGED = new 
AtomicBoolean(false);
  
 -  private static RFile.Reader getReader(FileOptions options) throws 
IOException {
 +  private static RFileSKVIterator getReader(FileOptions options) throws 
IOException {
      CachableBuilder cb = new CachableBuilder()
 -        .fsPath(options.getFileSystem(), new Path(options.getFilename()), 
options.dropCacheBehind)
 +        .fsPath(options.getFileSystem(), options.getFile().getPath(), 
options.dropCacheBehind)
          .conf(options.getConfiguration()).fileLen(options.getFileLenCache())
 -        
.cacheProvider(options.cacheProvider).readLimiter(options.getRateLimiter())
 -        .cryptoService(options.getCryptoService());
 -    return new RFile.Reader(cb);
 +        
.cacheProvider(options.cacheProvider).cryptoService(options.getCryptoService());
 +    return RFile.getReader(cb, options.getFile());
    }
  
    @Override
@@@ -194,9 -200,21 +199,20 @@@
                e.getMessage());
          }
        }
+ 
+       TableId tid = options.getTableId();
 -      if (tid != null && !SYNC_CAPABILITY_LOGGED.get()
 -          && (RootTable.ID.equals(tid) || MetadataTable.ID.equals(tid))) {
++      if (tid != null && !SYNC_CAPABILITY_LOGGED.get() && 
(SystemTables.ROOT.tableId().equals(tid)
++          || SystemTables.METADATA.tableId().equals(tid))) {
+         if (!outputStream.hasCapability(StreamCapabilities.HSYNC)) {
+           SYNC_CAPABILITY_LOGGED.set(true);
+           LOG.warn("File created for table {} does not support hsync. If 
dfs.datanode.synconclose"
+               + " is configured, then it may not work. 
dfs.datanode.synconclose is recommended for the"
+               + " root and metadata tables.", tid);
+         }
+       }
      }
  
 -    BCFile.Writer _cbw = new BCFile.Writer(outputStream, 
options.getRateLimiter(), compression,
 -        conf, options.cryptoService);
 +    BCFile.Writer _cbw = new BCFile.Writer(outputStream, compression, conf, 
options.cryptoService);
  
      return new RFile.Writer(_cbw, (int) blockSize, (int) indexBlockSize, 
samplerConfig, sampler);
    }
diff --cc 
server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
index 813744bee6,0d48f26799..9133fd1b0f
--- 
a/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/compaction/FileCompactor.java
@@@ -355,15 -354,14 +355,16 @@@ public class FileCompactor implements C
  
        final boolean isMinC = env.getIteratorScope() == 
IteratorUtil.IteratorScope.minc;
  
 -      final boolean dropCacheBehindOutput = 
!RootTable.ID.equals(this.extent.tableId())
 -          && !MetadataTable.ID.equals(this.extent.tableId())
 -          && ((isMinC && 
acuTableConf.getBoolean(Property.TABLE_MINC_OUTPUT_DROP_CACHE))
 -              || (!isMinC && 
acuTableConf.getBoolean(Property.TABLE_MAJC_OUTPUT_DROP_CACHE)));
 +      final boolean dropCacheBehindOutput =
 +          !SystemTables.ROOT.tableId().equals(this.extent.tableId())
 +              && 
!SystemTables.METADATA.tableId().equals(this.extent.tableId())
 +              && ((isMinC && 
acuTableConf.getBoolean(Property.TABLE_MINC_OUTPUT_DROP_CACHE))
 +                  || (!isMinC && 
acuTableConf.getBoolean(Property.TABLE_MAJC_OUTPUT_DROP_CACHE)));
  
-       WriterBuilder outBuilder =
-           fileFactory.newWriterBuilder().forFile(outputFile, ns, 
ns.getConf(), cryptoService)
-               .withTableConfiguration(acuTableConf);
+       WriterBuilder outBuilder = 
fileFactory.newWriterBuilder().forTable(this.extent.tableId())
 -          .forFile(outputFile.getMetaInsert(), ns, ns.getConf(), 
cryptoService)
 -          
.withTableConfiguration(acuTableConf).withRateLimiter(env.getWriteLimiter());
++          .forFile(outputFile, ns, ns.getConf(), cryptoService)
++          .withTableConfiguration(acuTableConf);
++
        if (dropCacheBehindOutput) {
          outBuilder.dropCachesBehind();
        }
diff --cc 
server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
index 0c5319553c,873ea84347..a0ac9726a2
--- 
a/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
+++ 
b/server/base/src/main/java/org/apache/accumulo/server/init/FileSystemInitializer.java
@@@ -155,29 -119,23 +155,29 @@@ public class FileSystemInitializer 
  
      // create table and default tablets directories
      createDirectories(fs, rootTabletDirUri, tableMetadataTabletDirUri, 
defaultMetadataTabletDirUri,
 -        replicationTableDefaultTabletDirUri);
 +        fateTableDefaultTabletDirUri, scanRefTableDefaultTabletDirUri);
  
 -    String ext = 
FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
 +    // For a new system mark the fate tablet and scan ref tablet as always 
mergeable.
 +    // Because this is a new system we can just use 0 for the time as that is 
what the Manager
 +    // will initialize with when starting
 +    var always = 
TabletMergeabilityMetadata.always(SteadyTime.from(Duration.ZERO));
 +    InitialTablet fateTablet = createFateRefTablet(context, always);
 +    InitialTablet scanRefTablet = createScanRefTablet(context, always);
  
 -    // populate the metadata tablet with info about the replication tablet
 +    // populate the metadata tablet with info about the fate and scan ref 
tablets
 +    String ext = 
FileOperations.getNewFileExtension(DefaultConfiguration.getInstance());
      String metadataFileName = tableMetadataTabletDirUri + Path.SEPARATOR + 
"0_1." + ext;
-     createMetadataFile(fs, metadataFileName, fateTablet, scanRefTablet);
 -    Tablet replicationTablet =
 -        new Tablet(REPL_TABLE_ID, replicationTableDefaultTabletDirName, null, 
null);
 -    createMetadataFile(fs, metadataFileName, siteConfig, REPL_TABLE_ID, 
replicationTablet);
++    createMetadataFile(fs, metadataFileName, scanRefTablet.tableId, 
fateTablet, scanRefTablet);
  
      // populate the root tablet with info about the metadata table's two 
initial tablets
 -    Tablet tablesTablet = new Tablet(MetadataTable.ID, 
tableMetadataTabletDirName, null, splitPoint,
 -        metadataFileName);
 -    Tablet defaultTablet =
 -        new Tablet(MetadataTable.ID, defaultMetadataTabletDirName, 
splitPoint, null);
 -    createMetadataFile(fs, rootTabletFileUri, siteConfig, MetadataTable.ID, 
tablesTablet,
 -        defaultTablet);
 +    // For the default tablet we want to make that mergeable, but don't make 
the TabletsSection
 +    // tablet mergeable. This will prevent tablets from each either from 
being auto merged
 +    InitialTablet tablesTablet = new 
InitialTablet(SystemTables.METADATA.tableId(),
 +        TABLE_TABLETS_TABLET_DIR, null, SPLIT_POINT, 
TabletMergeabilityMetadata.never(),
 +        StoredTabletFile.of(new Path(metadataFileName)).getMetadataPath());
 +    InitialTablet defaultTablet = new 
InitialTablet(SystemTables.METADATA.tableId(),
 +        defaultMetadataTabletDirName, SPLIT_POINT, null, always);
-     createMetadataFile(fs, rootTabletFileUri, tablesTablet, defaultTablet);
++    createMetadataFile(fs, rootTabletFileUri, tablesTablet.tableId, 
tablesTablet, defaultTablet);
    }
  
    private void createDirectories(VolumeManager fs, String... dirs) throws 
IOException {
@@@ -218,24 -177,21 +218,24 @@@
      }
    }
  
--  private void createMetadataFile(VolumeManager volmanager, String fileName,
 -      AccumuloConfiguration conf, TableId tid, Tablet... tablets) throws 
IOException {
 -    // sort file contents in memory, then play back to the file
 -    TreeMap<Key,Value> sorted = new TreeMap<>();
 -    for (Tablet tablet : tablets) {
 -      createEntriesForTablet(sorted, tablet);
 -    }
 -    FileSystem fs = volmanager.getFileSystemByPath(new Path(fileName));
++  private void createMetadataFile(VolumeManager volmanager, String fileName, 
TableId tid,
 +      InitialTablet... initialTablets) throws IOException {
 +    AccumuloConfiguration conf = initConfig.getSiteConf();
 +    ReferencedTabletFile file = ReferencedTabletFile.of(new Path(fileName));
 +    FileSystem fs = volmanager.getFileSystemByPath(file.getPath());
  
      CryptoService cs = CryptoFactoryLoader.getServiceForServer(conf);
  
-     FileSKVWriter tabletWriter = 
FileOperations.getInstance().newWriterBuilder()
+     FileSKVWriter tabletWriter = 
FileOperations.getInstance().newWriterBuilder().forTable(tid)
 -        .forFile(fileName, fs, fs.getConf(), 
cs).withTableConfiguration(conf).build();
 +        .forFile(file, fs, fs.getConf(), 
cs).withTableConfiguration(conf).build();
      tabletWriter.startDefaultLocalityGroup();
  
 +    TreeMap<Key,Value> sorted = new TreeMap<>();
 +    for (InitialTablet initialTablet : initialTablets) {
 +      // sort file contents in memory, then play back to the file
 +      sorted.putAll(initialTablet.createEntries());
 +    }
 +
      for (Map.Entry<Key,Value> entry : sorted.entrySet()) {
        tabletWriter.append(entry.getKey(), entry.getValue());
      }
diff --cc 
server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
index 5de50cfe93,3644f6146c..97ea23793c
--- 
a/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
+++ 
b/server/tserver/src/main/java/org/apache/accumulo/tserver/TabletClientHandler.java
@@@ -195,6 -302,6 +195,7 @@@ public class TabletClientHandler implem
        tableId = keyExtent.tableId();
        if (sameTable || security.canWrite(us.getCredentials(), tableId,
            server.getContext().getNamespaceId(tableId))) {
++        logDurabilityWarning(keyExtent, us.durability);
          long t2 = System.currentTimeMillis();
          us.authTimes.addStat(t2 - t1);
          us.currentTablet = server.getOnlineTablet(keyExtent);
@@@ -320,9 -426,9 +321,10 @@@
          Tablet tablet = entry.getKey();
          Durability durability =
              DurabilityImpl.resolveDurabilty(us.durability, 
tablet.getDurability());
 -        logDurabilityWarning(tablet, durability);
++        logDurabilityWarning(tablet.getExtent(), durability);
          List<Mutation> mutations = entry.getValue();
          if (!mutations.isEmpty()) {
 +          preppedMutations += mutations.size();
            try {
              server.updateMetrics.addMutationArraySize(mutations.size());
  
@@@ -634,9 -815,8 +636,9 @@@
          } else {
            final Durability durability =
                DurabilityImpl.resolveDurabilty(sess.durability, 
tablet.getDurability());
- 
 -          logDurabilityWarning(tablet, durability);
++          logDurabilityWarning(tablet.getExtent(), durability);
            List<Mutation> mutations = 
Collections.unmodifiableList(entry.getValue());
 +          preppedMutions += mutations.size();
            if (!mutations.isEmpty()) {
  
              PreparedMutations prepared = tablet.prepareMutationsForCommit(
@@@ -1397,4 -1636,11 +1399,11 @@@
        return handleTimeout(sessionId);
      }
    }
+ 
 -  private void logDurabilityWarning(Tablet tablet, Durability durability) {
 -    if (tablet.getExtent().isMeta() && durability != Durability.SYNC) {
++  private void logDurabilityWarning(KeyExtent tablet, Durability durability) {
++    if (tablet.isMeta() && durability != Durability.SYNC) {
+       log.warn("Property {} is not set to 'sync' for table {}", 
Property.TABLE_DURABILITY.getKey(),
 -          tablet.getExtent().tableId());
++          tablet.tableId());
+     }
+   }
  }

Reply via email to