This is an automated email from the ASF dual-hosted git repository.
chesnay pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/flink.git
The following commit(s) were added to refs/heads/master by this push:
new be419e2 [hotfix] Use proper template format substitute for
Preconditions
be419e2 is described below
commit be419e2560ef89683b7795c75eb08ae2337fefee
Author: ap <[email protected]>
AuthorDate: Thu Jun 13 09:46:50 2019 +0200
[hotfix] Use proper template format substitute for Preconditions
---
.../flink/client/program/rest/retry/ExponentialWaitStrategy.java | 2 +-
.../flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java | 2 +-
.../connectors/kafka/internal/FlinkKafkaInternalProducer.java | 2 +-
.../apache/flink/api/java/typeutils/runtime/NullableSerializer.java | 4 ++--
.../flink/api/java/typeutils/runtime/PojoSerializerSnapshot.java | 2 +-
.../main/java/org/apache/flink/util/LinkedOptionalMapSerializer.java | 2 +-
.../org/apache/flink/streaming/python/api/PythonStreamBinderTest.java | 2 +-
.../java/org/apache/flink/runtime/messages/webmonitor/JobDetails.java | 2 +-
.../java/org/apache/flink/runtime/rest/RestClientConfiguration.java | 2 +-
.../apache/flink/runtime/rest/RestServerEndpointConfiguration.java | 2 +-
10 files changed, 11 insertions(+), 11 deletions(-)
diff --git
a/flink-clients/src/main/java/org/apache/flink/client/program/rest/retry/ExponentialWaitStrategy.java
b/flink-clients/src/main/java/org/apache/flink/client/program/rest/retry/ExponentialWaitStrategy.java
index 2bb5051..a81c524 100644
---
a/flink-clients/src/main/java/org/apache/flink/client/program/rest/retry/ExponentialWaitStrategy.java
+++
b/flink-clients/src/main/java/org/apache/flink/client/program/rest/retry/ExponentialWaitStrategy.java
@@ -39,7 +39,7 @@ public class ExponentialWaitStrategy implements WaitStrategy {
@Override
public long sleepTime(final long attempt) {
- checkArgument(attempt >= 0, "attempt must not be negative
(%d)", attempt);
+ checkArgument(attempt >= 0, "attempt must not be negative
(%s)", attempt);
final long exponentialSleepTime = initialWait *
Math.round(Math.pow(2, attempt));
return exponentialSleepTime >= 0 && exponentialSleepTime <
maxWait ? exponentialSleepTime : maxWait;
}
diff --git
a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
index fa672f0..ab4cf52 100644
---
a/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
+++
b/flink-connectors/flink-connector-kafka-0.11/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaProducer.java
@@ -189,7 +189,7 @@ public class FlinkKafkaProducer<K, V> implements
Producer<K, V> {
* {@link
org.apache.kafka.clients.producer.KafkaProducer#initTransactions}.
*/
public void resumeTransaction(long producerId, short epoch) {
- Preconditions.checkState(producerId >= 0 && epoch >= 0,
"Incorrect values for producerId {} and epoch {}", producerId, epoch);
+ Preconditions.checkState(producerId >= 0 && epoch >= 0,
"Incorrect values for producerId %s and epoch %s", producerId, epoch);
LOG.info("Attempting to resume transaction {} with producerId
{} and epoch {}", transactionalId, producerId, epoch);
Object transactionManager = getValue(kafkaProducer,
"transactionManager");
diff --git
a/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaInternalProducer.java
b/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaInternalProducer.java
index 62b2cff..916bfc7 100644
---
a/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaInternalProducer.java
+++
b/flink-connectors/flink-connector-kafka/src/main/java/org/apache/flink/streaming/connectors/kafka/internal/FlinkKafkaInternalProducer.java
@@ -148,7 +148,7 @@ public class FlinkKafkaInternalProducer<K, V> implements
Producer<K, V> {
*
https://github.com/apache/kafka/commit/5d2422258cb975a137a42a4e08f03573c49a387e#diff-f4ef1afd8792cd2a2e9069cd7ddea630
*/
public void resumeTransaction(long producerId, short epoch) {
- Preconditions.checkState(producerId >= 0 && epoch >= 0,
"Incorrect values for producerId {} and epoch {}", producerId, epoch);
+ Preconditions.checkState(producerId >= 0 && epoch >= 0,
"Incorrect values for producerId %s and epoch %s", producerId, epoch);
LOG.info("Attempting to resume transaction {} with producerId
{} and epoch {}", transactionalId, producerId, epoch);
Object transactionManager = getValue(kafkaProducer,
"transactionManager");
diff --git
a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java
b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java
index 282a12f..0883daa 100644
---
a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java
+++
b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/NullableSerializer.java
@@ -310,7 +310,7 @@ public class NullableSerializer<T> extends
TypeSerializer<T> {
private NullableSerializerSnapshot(int nullPaddingLength) {
super(NullableSerializer.class);
checkArgument(nullPaddingLength >= 0,
- "Computed NULL padding can not be negative. %d",
+ "Computed NULL padding can not be negative. %s",
nullPaddingLength);
this.nullPaddingLength = nullPaddingLength;
@@ -329,7 +329,7 @@ public class NullableSerializer<T> extends
TypeSerializer<T> {
@Override
protected NullableSerializer<T>
createOuterSerializerWithNestedSerializers(TypeSerializer<?>[]
nestedSerializers) {
checkState(nullPaddingLength >= 0,
- "Negative padding size after serializer
construction: %d",
+ "Negative padding size after serializer
construction: %s",
nullPaddingLength);
final byte[] padding = (nullPaddingLength == 0) ?
EMPTY_BYTE_ARRAY : new byte[nullPaddingLength];
diff --git
a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerSnapshot.java
b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerSnapshot.java
index 5610536..9987fae 100644
---
a/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerSnapshot.java
+++
b/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/PojoSerializerSnapshot.java
@@ -121,7 +121,7 @@ public class PojoSerializerSnapshot<T> implements
TypeSerializerSnapshot<T> {
@Override
public void readSnapshot(int readVersion, DataInputView in, ClassLoader
userCodeClassLoader) throws IOException {
- checkArgument(readVersion == 2, "unrecognized read version %d",
readVersion);
+ checkArgument(readVersion == 2, "unrecognized read version %s",
readVersion);
snapshotData = PojoSerializerSnapshotData.createFrom(in,
userCodeClassLoader);
}
diff --git
a/flink-core/src/main/java/org/apache/flink/util/LinkedOptionalMapSerializer.java
b/flink-core/src/main/java/org/apache/flink/util/LinkedOptionalMapSerializer.java
index bd9fdd0..47e791f 100644
---
a/flink-core/src/main/java/org/apache/flink/util/LinkedOptionalMapSerializer.java
+++
b/flink-core/src/main/java/org/apache/flink/util/LinkedOptionalMapSerializer.java
@@ -81,7 +81,7 @@ public final class LinkedOptionalMapSerializer {
BiFunctionWithException<DataInputView, String, V, IOException>
valueReader) throws IOException {
final long header = in.readLong();
- checkState(header == HEADER, "Corrupted stream received header
%d", header);
+ checkState(header == HEADER, "Corrupted stream received header
%s", header);
long mapSize = in.readInt();
LinkedOptionalMap<K, V> map = new LinkedOptionalMap<>();
diff --git
a/flink-libraries/flink-streaming-python/src/test/java/org/apache/flink/streaming/python/api/PythonStreamBinderTest.java
b/flink-libraries/flink-streaming-python/src/test/java/org/apache/flink/streaming/python/api/PythonStreamBinderTest.java
index 1a544ff..bc9da76 100644
---
a/flink-libraries/flink-streaming-python/src/test/java/org/apache/flink/streaming/python/api/PythonStreamBinderTest.java
+++
b/flink-libraries/flink-streaming-python/src/test/java/org/apache/flink/streaming/python/api/PythonStreamBinderTest.java
@@ -66,7 +66,7 @@ public class PythonStreamBinderTest extends AbstractTestBase {
Path testEntryPoint = new Path(getBaseTestPythonDir(),
"run_all_tests.py");
List<String> testFiles = findTestFiles();
- Preconditions.checkState(testFiles.size() > 0, "No test files
were found in {}.", getBaseTestPythonDir());
+ Preconditions.checkState(testFiles.size() > 0, "No test files
were found in %s.", getBaseTestPythonDir());
String[] arguments = new String[1 + 1 + testFiles.size()];
arguments[0] = testEntryPoint.getPath();
diff --git
a/flink-runtime/src/main/java/org/apache/flink/runtime/messages/webmonitor/JobDetails.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/messages/webmonitor/JobDetails.java
index 42e41d2..873e01d 100644
---
a/flink-runtime/src/main/java/org/apache/flink/runtime/messages/webmonitor/JobDetails.java
+++
b/flink-runtime/src/main/java/org/apache/flink/runtime/messages/webmonitor/JobDetails.java
@@ -94,7 +94,7 @@ public class JobDetails implements Serializable {
this.status = checkNotNull(status);
this.lastUpdateTime = lastUpdateTime;
Preconditions.checkArgument(tasksPerState.length ==
ExecutionState.values().length,
- "tasksPerState argument must be of size {}.",
ExecutionState.values().length);
+ "tasksPerState argument must be of size %s.",
ExecutionState.values().length);
this.tasksPerState = checkNotNull(tasksPerState);
this.numTasks = numTasks;
}
diff --git
a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
index dbddc06..ed7049f 100644
---
a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
+++
b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestClientConfiguration.java
@@ -49,7 +49,7 @@ public final class RestClientConfiguration {
final long connectionTimeout,
final long idlenessTimeout,
final int maxContentLength) {
- checkArgument(maxContentLength > 0, "maxContentLength must be
positive, was: %d", maxContentLength);
+ checkArgument(maxContentLength > 0, "maxContentLength must be
positive, was: %s", maxContentLength);
this.sslHandlerFactory = sslHandlerFactory;
this.connectionTimeout = connectionTimeout;
this.idlenessTimeout = idlenessTimeout;
diff --git
a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
index 336cd65..3ae28e7 100644
---
a/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
+++
b/flink-runtime/src/main/java/org/apache/flink/runtime/rest/RestServerEndpointConfiguration.java
@@ -68,7 +68,7 @@ public final class RestServerEndpointConfiguration {
final int maxContentLength,
final Map<String, String> responseHeaders) {
- Preconditions.checkArgument(maxContentLength > 0,
"maxContentLength must be positive, was: %d", maxContentLength);
+ Preconditions.checkArgument(maxContentLength > 0,
"maxContentLength must be positive, was: %s", maxContentLength);
this.restAddress = requireNonNull(restAddress);
this.restBindAddress = restBindAddress;