This is an automated email from the ASF dual-hosted git repository.
mimaison pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/kafka.git
The following commit(s) were added to refs/heads/trunk by this push:
new 0a50005408 KAFKA-13929: Replace legacy File.createNewFile() with NIO.2
Files.createFile() (#12197)
0a50005408 is described below
commit 0a50005408b41fc8b3b26e2ed146b3e49d15af10
Author: Divij Vaidya <[email protected]>
AuthorDate: Fri Jun 10 13:28:55 2022 +0200
KAFKA-13929: Replace legacy File.createNewFile() with NIO.2
Files.createFile() (#12197)
Reviewers: Mickael Maison <[email protected]>
---
core/src/test/scala/unit/kafka/log/LogLoaderTest.scala | 4 ++--
core/src/test/scala/unit/kafka/log/LogTestUtils.scala | 5 +++--
.../scala/unit/kafka/log/ProducerStateManagerTest.scala | 16 ++++++++--------
core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala | 10 +++++-----
.../test/scala/unit/kafka/server/LogDirFailureTest.scala | 3 ++-
.../scala/unit/kafka/server/ReplicaManagerTest.scala | 2 +-
core/src/test/scala/unit/kafka/utils/TestUtils.scala | 2 +-
.../metadata/storage/RemoteLogMetadataSnapshotFile.java | 8 ++++++--
.../integration/StateDirectoryIntegrationTest.java | 4 +++-
.../processor/internals/GlobalStateManagerImplTest.java | 2 +-
.../processor/internals/ProcessorStateManagerTest.java | 3 ++-
.../streams/processor/internals/StateDirectoryTest.java | 12 ++++++------
.../streams/processor/internals/TaskManagerTest.java | 3 ++-
.../streams/state/internals/KeyValueSegmentsTest.java | 5 +++--
.../streams/state/internals/TimestampedSegmentsTest.java | 9 +++++----
15 files changed, 50 insertions(+), 38 deletions(-)
diff --git a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala
b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala
index 9d7a3b2023..0d41a5073b 100644
--- a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala
+++ b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala
@@ -936,8 +936,8 @@ class LogLoaderTest {
// The files remain absent until we first access it because we are doing
lazy loading for time index and offset index
// files but in this test case we need to create these files in order to
test we will remove them.
- bogusIndex2.createNewFile()
- bogusTimeIndex2.createNewFile()
+ Files.createFile(bogusIndex2.toPath)
+ Files.createFile(bogusTimeIndex2.toPath)
def createRecords = TestUtils.singletonRecords(value = "test".getBytes,
timestamp = mockTime.milliseconds)
val logConfig = LogTestUtils.createLogConfig(segmentBytes =
createRecords.sizeInBytes * 5, segmentIndexBytes = 1000, indexIntervalBytes = 1)
diff --git a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala
b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala
index e524bcbd7d..f6b58d78ce 100644
--- a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala
+++ b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala
@@ -27,6 +27,7 @@ import org.apache.kafka.common.record.{CompressionType,
ControlRecordType, EndTr
import org.apache.kafka.common.utils.{Time, Utils}
import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse}
+import java.nio.file.Files
import scala.collection.Iterable
import scala.jdk.CollectionConverters._
@@ -142,8 +143,8 @@ object LogTestUtils {
segment.append(MemoryRecords.withRecords(baseOffset + Int.MaxValue - 1,
CompressionType.NONE, 0,
record(baseOffset + Int.MaxValue - 1)))
// Need to create the offset files explicitly to avoid triggering
segment recovery to truncate segment.
- UnifiedLog.offsetIndexFile(logDir, baseOffset).createNewFile()
- UnifiedLog.timeIndexFile(logDir, baseOffset).createNewFile()
+ Files.createFile(UnifiedLog.offsetIndexFile(logDir, baseOffset).toPath)
+ Files.createFile(UnifiedLog.timeIndexFile(logDir, baseOffset).toPath)
baseOffset + Int.MaxValue
}
diff --git a/core/src/test/scala/unit/kafka/log/ProducerStateManagerTest.scala
b/core/src/test/scala/unit/kafka/log/ProducerStateManagerTest.scala
index 93c1724841..60f0c1ce2c 100644
--- a/core/src/test/scala/unit/kafka/log/ProducerStateManagerTest.scala
+++ b/core/src/test/scala/unit/kafka/log/ProducerStateManagerTest.scala
@@ -975,9 +975,9 @@ class ProducerStateManagerTest {
// the broker shutdown cleanly and emitted a snapshot file larger than the
base offset of the active segment.
// Create 3 snapshot files at different offsets.
- UnifiedLog.producerSnapshotFile(logDir, 5).createNewFile() // not stray
- UnifiedLog.producerSnapshotFile(logDir, 2).createNewFile() // stray
- UnifiedLog.producerSnapshotFile(logDir, 42).createNewFile() // not stray
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 5).toPath) // not
stray
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 2).toPath) //
stray
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 42).toPath) //
not stray
// claim that we only have one segment with a base offset of 5
stateManager.removeStraySnapshots(Seq(5))
@@ -995,9 +995,9 @@ class ProducerStateManagerTest {
// Snapshots associated with an offset in the list of segment base offsets
should remain.
// Create 3 snapshot files at different offsets.
- UnifiedLog.producerSnapshotFile(logDir, 5).createNewFile() // stray
- UnifiedLog.producerSnapshotFile(logDir, 2).createNewFile() // stray
- UnifiedLog.producerSnapshotFile(logDir, 42).createNewFile() // not stray
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 5).toPath) //
stray
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 2).toPath) //
stray
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 42).toPath) //
not stray
stateManager.removeStraySnapshots(Seq(42))
assertEquals(Seq(42),
ProducerStateManager.listSnapshotFiles(logDir).map(_.offset).sorted)
@@ -1009,7 +1009,7 @@ class ProducerStateManagerTest {
*/
@Test
def testRemoveAndMarkSnapshotForDeletion(): Unit = {
- UnifiedLog.producerSnapshotFile(logDir, 5).createNewFile()
+ Files.createFile(UnifiedLog.producerSnapshotFile(logDir, 5).toPath)
val manager = new ProducerStateManager(partition, logDir,
maxTransactionTimeoutMs, maxProducerIdExpirationMs, time)
assertTrue(manager.latestSnapshotOffset.isDefined)
val snapshot = manager.removeAndMarkSnapshotForDeletion(5).get
@@ -1027,7 +1027,7 @@ class ProducerStateManagerTest {
@Test
def testRemoveAndMarkSnapshotForDeletionAlreadyDeleted(): Unit = {
val file = UnifiedLog.producerSnapshotFile(logDir, 5)
- file.createNewFile()
+ Files.createFile(file.toPath)
val manager = new ProducerStateManager(partition, logDir,
maxTransactionTimeoutMs, maxProducerIdExpirationMs, time)
assertTrue(manager.latestSnapshotOffset.isDefined)
Files.delete(file.toPath)
diff --git a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
index 193ce160f8..57409a1f03 100755
--- a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
+++ b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala
@@ -67,8 +67,8 @@ class UnifiedLogTest {
def createEmptyLogs(dir: File, offsets: Int*): Unit = {
for(offset <- offsets) {
- UnifiedLog.logFile(dir, offset).createNewFile()
- UnifiedLog.offsetIndexFile(dir, offset).createNewFile()
+ Files.createFile(UnifiedLog.logFile(dir, offset).toPath)
+ Files.createFile(UnifiedLog.offsetIndexFile(dir, offset).toPath)
}
}
@@ -2413,8 +2413,8 @@ class UnifiedLogTest {
private def testDegenerateSplitSegmentWithOverflow(segmentBaseOffset: Long,
records: List[MemoryRecords]): Unit = {
val segment = LogTestUtils.rawSegment(logDir, segmentBaseOffset)
// Need to create the offset files explicitly to avoid triggering segment
recovery to truncate segment.
- UnifiedLog.offsetIndexFile(logDir, segmentBaseOffset).createNewFile()
- UnifiedLog.timeIndexFile(logDir, segmentBaseOffset).createNewFile()
+ Files.createFile(UnifiedLog.offsetIndexFile(logDir,
segmentBaseOffset).toPath)
+ Files.createFile(UnifiedLog.timeIndexFile(logDir,
segmentBaseOffset).toPath)
records.foreach(segment.append _)
segment.close()
@@ -3418,7 +3418,7 @@ class UnifiedLogTest {
// Delete the underlying directory to trigger a KafkaStorageException
val dir = log.dir
Utils.delete(dir)
- dir.createNewFile()
+ Files.createFile(dir.toPath)
assertThrows(classOf[KafkaStorageException], () => {
log.delete()
diff --git a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala
b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala
index bfbb14e1aa..c073f08d13 100644
--- a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala
+++ b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala
@@ -31,6 +31,7 @@ import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{BeforeEach, Test, TestInfo}
+import java.nio.file.Files
import scala.annotation.nowarn
import scala.jdk.CollectionConverters._
@@ -84,7 +85,7 @@ class LogDirFailureTest extends IntegrationTestHarness {
val logDir = new File(kafkaConfig.logDirs.head)
// Make log directory of the partition on the leader broker inaccessible
by replacing it with a file
CoreUtils.swallow(Utils.delete(logDir), this)
- logDir.createNewFile()
+ Files.createFile(logDir.toPath)
assertTrue(logDir.isFile)
server = TestUtils.createServer(kafkaConfig)
diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala
b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala
index e5eef65d0a..c2c8b4ddc7 100644
--- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala
+++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala
@@ -2597,7 +2597,7 @@ class ReplicaManagerTest {
// Delete the underlying directory to trigger an KafkaStorageException
val dir = log.dir.getParentFile
Utils.delete(dir)
- dir.createNewFile()
+ Files.createFile(dir.toPath)
}
val partitionStates = Map(tp0 -> new StopReplicaPartitionState()
diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala
b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
index ad3c34f960..e5d8152a99 100755
--- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala
+++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala
@@ -1464,7 +1464,7 @@ object TestUtils extends Logging {
val localLog = leaderBroker.replicaManager.localLogOrException(partition)
val logDir = localLog.dir.getParentFile
CoreUtils.swallow(Utils.delete(logDir), this)
- logDir.createNewFile()
+ Files.createFile(logDir.toPath)
assertTrue(logDir.isFile)
if (failureType == Roll) {
diff --git
a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataSnapshotFile.java
b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataSnapshotFile.java
index cee77ee81d..db49bb9cb8 100644
---
a/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataSnapshotFile.java
+++
b/storage/src/main/java/org/apache/kafka/server/log/remote/metadata/storage/RemoteLogMetadataSnapshotFile.java
@@ -29,6 +29,7 @@ import java.nio.ByteBuffer;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.channels.ReadableByteChannel;
+import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.ArrayList;
@@ -70,8 +71,11 @@ public class RemoteLogMetadataSnapshotFile {
// Create an empty file if it does not exist.
try {
- boolean newFileCreated = metadataStoreFile.createNewFile();
- log.info("Remote log metadata snapshot file: [{}], newFileCreated:
[{}]", metadataStoreFile, newFileCreated);
+ final boolean fileExists =
Files.exists(metadataStoreFile.toPath());
+ if (!fileExists) {
+ Files.createFile(metadataStoreFile.toPath());
+ }
+ log.info("Remote log metadata snapshot file: [{}], newFileCreated:
[{}]", metadataStoreFile, !fileExists);
} catch (IOException e) {
throw new KafkaException(e);
}
diff --git
a/streams/src/test/java/org/apache/kafka/streams/integration/StateDirectoryIntegrationTest.java
b/streams/src/test/java/org/apache/kafka/streams/integration/StateDirectoryIntegrationTest.java
index 38651cfd34..1515debe84 100644
---
a/streams/src/test/java/org/apache/kafka/streams/integration/StateDirectoryIntegrationTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/integration/StateDirectoryIntegrationTest.java
@@ -18,6 +18,7 @@ package org.apache.kafka.streams.integration;
import java.io.File;
import java.io.IOException;
+import java.nio.file.Files;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.CountDownLatch;
@@ -258,7 +259,8 @@ public class StateDirectoryIntegrationTest {
assertTrue(appDir.exists()); // Application state directory
Exists
try {
- assertTrue((new File(appDir, "dummy")).createNewFile());
+ final File dummyFile = new File(appDir, "dummy");
+ Files.createFile(dummyFile.toPath());
} catch (final IOException e) {
throw new RuntimeException("Failed to create dummy file.", e);
}
diff --git
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java
index 0670fedb0a..4ba88b73e6 100644
---
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/GlobalStateManagerImplTest.java
@@ -167,7 +167,7 @@ public class GlobalStateManagerImplTest {
// set readonly to the CHECKPOINT_FILE_NAME.tmp file because we will
write data to the .tmp file first
// and then swap to CHECKPOINT_FILE_NAME by replacing it
final File file = new File(stateDirectory.globalStateDir(),
StateManagerUtil.CHECKPOINT_FILE_NAME + ".tmp");
- file.createNewFile();
+ Files.createFile(file.toPath());
file.setWritable(false);
try (final LogCaptureAppender appender =
LogCaptureAppender.createAndRegister(GlobalStateManagerImpl.class)) {
diff --git
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
index 5947842a74..4dc62734bf 100644
---
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/ProcessorStateManagerTest.java
@@ -55,6 +55,7 @@ import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
+import java.nio.file.Files;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@@ -813,7 +814,7 @@ public class ProcessorStateManagerTest {
stateMgr.registerStore(persistentStore,
persistentStore.stateRestoreCallback, null);
final File file = new File(stateMgr.baseDir(), CHECKPOINT_FILE_NAME);
- file.createNewFile();
+ Files.createFile(file.toPath());
final FileWriter writer = new FileWriter(file);
writer.write("abcdefg");
writer.close();
diff --git
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
index 81bc7d7562..205f19537b 100644
---
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/StateDirectoryTest.java
@@ -241,7 +241,7 @@ public class StateDirectoryTest {
// Replace application's stateDir to regular file
Utils.delete(appDir);
- appDir.createNewFile();
+ Files.createFile(appDir.toPath());
assertThrows(ProcessorStateException.class, () ->
directory.getOrCreateDirectoryForTask(taskId));
}
@@ -253,7 +253,7 @@ public class StateDirectoryTest {
// Replace taskDir to a regular file
final File taskDir = new File(appDir, toTaskDirString(taskId));
Utils.delete(taskDir);
- taskDir.createNewFile();
+ Files.createFile(taskDir.toPath());
// Error: ProcessorStateException should be thrown.
assertThrows(ProcessorStateException.class, () ->
directory.getOrCreateDirectoryForTask(taskId));
@@ -391,8 +391,8 @@ public class StateDirectoryTest {
// make sure the File#listFiles returns null and
StateDirectory#listAllTaskDirectories is able to handle null
Utils.delete(appDir);
- assertTrue(appDir.createNewFile());
- assertTrue(appDir.exists());
+ Files.createFile(appDir.toPath());
+ assertTrue(Files.exists(appDir.toPath()));
assertNull(appDir.listFiles());
assertEquals(0, directory.listAllTaskDirectories().size());
}
@@ -571,7 +571,7 @@ public class StateDirectoryTest {
// Create a dummy file in appDir; for this, appDir will not be empty
after cleanup.
final File dummyFile = new File(appDir, "dummy");
- assertTrue(dummyFile.createNewFile());
+ Files.createFile(dummyFile.toPath());
try (final LogCaptureAppender appender =
LogCaptureAppender.createAndRegister(StateDirectory.class)) {
// call StateDirectory#clean
@@ -791,7 +791,7 @@ public class StateDirectoryTest {
@Test
public void shouldGetFreshProcessIdIfJsonUnreadable() throws Exception {
final File processFile = new File(appDir, PROCESS_FILE_NAME);
- assertThat(processFile.createNewFile(), is(true));
+ Files.createFile(processFile.toPath());
final UUID processId = UUID.randomUUID();
final FileOutputStream fileOutputStream = new
FileOutputStream(processFile);
diff --git
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
index 7f442d3131..073dede23f 100644
---
a/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/processor/internals/TaskManagerTest.java
@@ -52,6 +52,7 @@ import
org.apache.kafka.streams.processor.internals.testutil.DummyStreamsConfig;
import
org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender;
import org.apache.kafka.streams.state.internals.OffsetCheckpoint;
+import java.nio.file.Files;
import java.util.ArrayList;
import org.easymock.EasyMock;
import org.easymock.EasyMockRunner;
@@ -3275,7 +3276,7 @@ public class TaskManagerTest {
private void writeCheckpointFile(final TaskId task, final
Map<TopicPartition, Long> offsets) throws Exception {
final File checkpointFile = getCheckpointFile(task);
- assertThat(checkpointFile.createNewFile(), is(true));
+ Files.createFile(checkpointFile.toPath());
new OffsetCheckpoint(checkpointFile).write(offsets);
expect(stateDirectory.checkpointFileFor(task)).andReturn(checkpointFile);
}
diff --git
a/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentsTest.java
b/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentsTest.java
index c8f1a0e061..e8fe877b10 100644
---
a/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentsTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/state/internals/KeyValueSegmentsTest.java
@@ -28,6 +28,7 @@ import org.junit.Before;
import org.junit.Test;
import java.io.File;
+import java.nio.file.Files;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
@@ -303,7 +304,7 @@ public class KeyValueSegmentsTest {
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File oldSegment = new File(storeDirectoryPath +
File.separator + storeName + "-" + formatter.format(new Date(segmentId *
segmentInterval)));
//noinspection ResultOfMethodCallIgnored
- oldSegment.createNewFile();
+ Files.createFile(oldSegment.toPath());
}
segments.openExisting(context, -1L);
@@ -325,7 +326,7 @@ public class KeyValueSegmentsTest {
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File oldSegment = new File(storeDirectoryPath +
File.separator + storeName + ":" + segmentId * (RETENTION_PERIOD /
(NUM_SEGMENTS - 1)));
//noinspection ResultOfMethodCallIgnored
- oldSegment.createNewFile();
+ Files.createFile(oldSegment.toPath());
}
segments.openExisting(context, -1L);
diff --git
a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentsTest.java
b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentsTest.java
index 722cb69fd1..50bad3f60c 100644
---
a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentsTest.java
+++
b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimestampedSegmentsTest.java
@@ -28,6 +28,7 @@ import org.junit.Before;
import org.junit.Test;
import java.io.File;
+import java.nio.file.Files;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
@@ -304,7 +305,7 @@ public class TimestampedSegmentsTest {
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File oldSegment = new File(storeDirectoryPath +
File.separator + storeName + "-" + formatter.format(new Date(segmentId *
segmentInterval)));
//noinspection ResultOfMethodCallIgnored
- oldSegment.createNewFile();
+ Files.createFile(oldSegment.toPath());
}
segments.openExisting(context, -1L);
@@ -312,7 +313,7 @@ public class TimestampedSegmentsTest {
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final String segmentName = storeName + "." + (long) segmentId *
segmentInterval;
final File newSegment = new File(storeDirectoryPath +
File.separator + segmentName);
- assertTrue(newSegment.exists());
+ assertTrue(Files.exists(newSegment.toPath()));
}
}
@@ -326,14 +327,14 @@ public class TimestampedSegmentsTest {
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File oldSegment = new File(storeDirectoryPath +
File.separator + storeName + ":" + segmentId * (RETENTION_PERIOD /
(NUM_SEGMENTS - 1)));
//noinspection ResultOfMethodCallIgnored
- oldSegment.createNewFile();
+ Files.createFile(oldSegment.toPath());
}
segments.openExisting(context, -1L);
for (int segmentId = 0; segmentId < NUM_SEGMENTS; ++segmentId) {
final File newSegment = new File(storeDirectoryPath +
File.separator + storeName + "." + segmentId * (RETENTION_PERIOD /
(NUM_SEGMENTS - 1)));
- assertTrue(newSegment.exists());
+ assertTrue(Files.exists(newSegment.toPath()));
}
}