This is an automated email from the ASF dual-hosted git repository.
yunhong pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/fluss.git
The following commit(s) were added to refs/heads/main by this push:
new 9489cceb3 [typo] Fix typos in docs comments and improve code
readability (#2734)
9489cceb3 is described below
commit 9489cceb38790708f0ecaccaf2f4f8bb0afdfd64
Author: xiaozhou <[email protected]>
AuthorDate: Fri Feb 27 11:54:08 2026 +0800
[typo] Fix typos in docs comments and improve code readability (#2734)
* [typo] Fix many typos and optimize some code
* [typo] Fix many typos and optimize some code
---
.../java/org/apache/fluss/client/lookup/LookupType.java | 2 +-
.../client/security/acl/FlussAuthorizationITCase.java | 2 +-
.../test/java/org/apache/fluss/row/BinaryArrayTest.java | 2 +-
.../test/java/org/apache/fluss/row/GenericArrayTest.java | 2 +-
.../fluss/flink/tiering/source/TableBucketWriteResult.java | 2 +-
.../tiering/source/enumerator/TieringSourceEnumerator.java | 2 +-
.../org/apache/fluss/flink/catalog/FlinkCatalogTest.java | 14 +++++++-------
.../source/enumerator/TieringSourceEnumeratorTest.java | 2 +-
.../fluss/lake/iceberg/tiering/IcebergWriteResult.java | 2 +-
.../apache/fluss/lake/lance/tiering/LanceWriteResult.java | 2 +-
.../lake/lance/tiering/LanceWriteResultSerializer.java | 2 +-
.../fluss/lake/paimon/tiering/PaimonWriteResult.java | 2 +-
.../org/apache/fluss/rpc/netty/client/NettyClientTest.java | 2 +-
.../server/coordinator/CoordinatorEventProcessor.java | 8 ++++----
.../fluss/server/coordinator/CoordinatorRequestBatch.java | 4 ++--
.../server/coordinator/event/CommitKvSnapshotEvent.java | 2 +-
.../fluss/server/coordinator/rebalance/ActionType.java | 2 +-
.../org/apache/fluss/server/kv/KvSnapshotResource.java | 2 +-
.../fluss/server/kv/snapshot/KvTabletSnapshotTarget.java | 4 ++--
.../fluss/tools/ci/licensecheck/NoticeFileChecker.java | 2 +-
website/blog/2025-06-01-partial-updates.md | 4 ++--
website/community/how-to-contribute/contribute-code.md | 2 +-
website/docs/maintenance/filesystems/oss.md | 2 +-
23 files changed, 35 insertions(+), 35 deletions(-)
diff --git
a/fluss-client/src/main/java/org/apache/fluss/client/lookup/LookupType.java
b/fluss-client/src/main/java/org/apache/fluss/client/lookup/LookupType.java
index 082fed961..910f0e481 100644
--- a/fluss-client/src/main/java/org/apache/fluss/client/lookup/LookupType.java
+++ b/fluss-client/src/main/java/org/apache/fluss/client/lookup/LookupType.java
@@ -24,5 +24,5 @@ import org.apache.fluss.annotation.Internal;
public enum LookupType {
LOOKUP,
LOOKUP_WITH_INSERT_IF_NOT_EXISTS,
- PREFIX_LOOKUP;
+ PREFIX_LOOKUP
}
diff --git
a/fluss-client/src/test/java/org/apache/fluss/client/security/acl/FlussAuthorizationITCase.java
b/fluss-client/src/test/java/org/apache/fluss/client/security/acl/FlussAuthorizationITCase.java
index 9bb316378..8c301c89a 100644
---
a/fluss-client/src/test/java/org/apache/fluss/client/security/acl/FlussAuthorizationITCase.java
+++
b/fluss-client/src/test/java/org/apache/fluss/client/security/acl/FlussAuthorizationITCase.java
@@ -648,7 +648,7 @@ public class FlussAuthorizationITCase {
Collections.singletonList(noWriteAclTable)));
}
- // 2. Try to write data to writeAclTable. It will success and writeId
will be set.
+ // 2. Try to write data to writeAclTable. It will succeed and writeId
will be set.
try (Table table = guestConn.getTable(writeAclTable)) {
AppendWriter appendWriter = table.newAppend().createWriter();
appendWriter.append(row(1, "a")).get();
diff --git
a/fluss-common/src/test/java/org/apache/fluss/row/BinaryArrayTest.java
b/fluss-common/src/test/java/org/apache/fluss/row/BinaryArrayTest.java
index d3054ead6..bdcadb896 100644
--- a/fluss-common/src/test/java/org/apache/fluss/row/BinaryArrayTest.java
+++ b/fluss-common/src/test/java/org/apache/fluss/row/BinaryArrayTest.java
@@ -287,7 +287,7 @@ public class BinaryArrayTest {
writer.writeInt(2, 30);
writer.complete();
- assertThatThrownBy(() -> array.toIntArray())
+ assertThatThrownBy(array::toIntArray)
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("Primitive array must not contain a null
value");
}
diff --git
a/fluss-common/src/test/java/org/apache/fluss/row/GenericArrayTest.java
b/fluss-common/src/test/java/org/apache/fluss/row/GenericArrayTest.java
index 1ceccc07b..2d365179f 100644
--- a/fluss-common/src/test/java/org/apache/fluss/row/GenericArrayTest.java
+++ b/fluss-common/src/test/java/org/apache/fluss/row/GenericArrayTest.java
@@ -185,7 +185,7 @@ public class GenericArrayTest {
assertThat(array.isNullAt(1)).isTrue();
assertThat(array.isNullAt(2)).isFalse();
- assertThatThrownBy(() -> array.toIntArray())
+ assertThatThrownBy(array::toIntArray)
.isInstanceOf(RuntimeException.class)
.hasMessageContaining("Primitive array must not contain a null
value");
}
diff --git
a/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/TableBucketWriteResult.java
b/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/TableBucketWriteResult.java
index ba648f297..abec3c6c2 100644
---
a/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/TableBucketWriteResult.java
+++
b/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/TableBucketWriteResult.java
@@ -53,7 +53,7 @@ public class TableBucketWriteResult<WriteResult> implements
Serializable {
private final long maxTimestamp;
// the total number of write results in one round of tiering,
- // used for downstream commiter operator to determine when all write
results
+ // used for downstream committer operator to determine when all write
results
// for the round of tiering is finished
private final int numberOfWriteResults;
diff --git
a/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumerator.java
b/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumerator.java
index 1e9bd5dfe..33615039b 100644
---
a/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumerator.java
+++
b/fluss-flink/fluss-flink-common/src/main/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumerator.java
@@ -492,7 +492,7 @@ public class TieringSourceEnumerator
@Override
public TieringSourceEnumeratorState snapshotState(long checkpointId)
throws Exception {
- // do nothing, the downstream lake commiter will snapshot the state to
Fluss Cluster
+ // do nothing, the downstream lake committer will snapshot the state
to Fluss Cluster
return new TieringSourceEnumeratorState();
}
diff --git
a/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/catalog/FlinkCatalogTest.java
b/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/catalog/FlinkCatalogTest.java
index 6c65544c4..775182b5d 100644
---
a/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/catalog/FlinkCatalogTest.java
+++
b/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/catalog/FlinkCatalogTest.java
@@ -205,7 +205,7 @@ class FlinkCatalogTest {
DEFAULT_DB, new
CatalogDatabaseImpl(Collections.emptyMap(), null), true);
} catch (CatalogException e) {
// the auto partitioned manager may create the db zk node
- // in an another thread, so if exception is NodeExistsException,
just ignore
+ // in another thread, so if exception is NodeExistsException, just
ignore
if (!ExceptionUtils.findThrowableWithMessage(e,
"KeeperException$NodeExistsException")
.isPresent()) {
throw e;
@@ -275,7 +275,7 @@ class FlinkCatalogTest {
this.tableInDefaultDb, CATALOG_NAME));
// should be ok since we set ignoreIfNotExists = true
catalog.dropTable(this.tableInDefaultDb, true);
- // create table from an non-exist db
+ // create table from a non-exist db
ObjectPath nonExistDbPath = ObjectPath.fromString("non.exist");
// remove bucket-key
@@ -300,7 +300,7 @@ class FlinkCatalogTest {
resolvedSchema);
catalog.createTable(this.tableInDefaultDb, table2, false);
tableCreated = catalog.getTable(this.tableInDefaultDb);
- // need to over write the option
+ // need to overwrite the option
addedOptions.put(BUCKET_KEY.key(), "third");
expectedTable = addOptions(table2, addedOptions);
@@ -492,7 +492,7 @@ class FlinkCatalogTest {
// should be ok since we set ignoreIfNotExists = true
catalog.dropTable(mt1, true);
- // create table from an non-exist db
+ // create table from a non-exist db
ObjectPath nonExistDbPath = ObjectPath.fromString("non.exist");
// remove bucket-key
@@ -658,10 +658,10 @@ class FlinkCatalogTest {
@Test
void testOperatePartitions() throws Exception {
catalog.createDatabase("db1", new
CatalogDatabaseImpl(Collections.emptyMap(), null), false);
- assertThatThrownBy(() -> catalog.listPartitions(new ObjectPath("db1",
"unkown_table")))
+ assertThatThrownBy(() -> catalog.listPartitions(new ObjectPath("db1",
"unknown_table")))
.isInstanceOf(TableNotExistException.class)
.hasMessage(
- "Table (or view) db1.unkown_table does not exist in
Catalog test-catalog.");
+ "Table (or view) db1.unknown_table does not exist in
Catalog test-catalog.");
// create a none partitioned table.
CatalogTable table = this.newCatalogTable(Collections.emptyMap());
@@ -843,7 +843,7 @@ class FlinkCatalogTest {
Collections::emptyMap);
// Test open() throws proper exception
- assertThatThrownBy(() -> badCatalog.open())
+ assertThatThrownBy(badCatalog::open)
.isInstanceOf(IllegalConfigurationException.class)
.hasMessageContaining("No resolvable bootstrap urls");
}
diff --git
a/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumeratorTest.java
b/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumeratorTest.java
index 7ea150049..b725f99e7 100644
---
a/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumeratorTest.java
+++
b/fluss-flink/fluss-flink-common/src/test/java/org/apache/fluss/flink/tiering/source/enumerator/TieringSourceEnumeratorTest.java
@@ -739,7 +739,7 @@ class TieringSourceEnumeratorTest extends TieringTestBase {
try (FlussMockSplitEnumeratorContext<TieringSplit> context =
new FlussMockSplitEnumeratorContext<>(numSubtasks);
TieringSourceEnumerator enumerator =
- createTieringSourceEnumerator(flussConf, context); ) {
+ createTieringSourceEnumerator(flussConf, context)) {
enumerator.start();
// Register all readers
diff --git
a/fluss-lake/fluss-lake-iceberg/src/main/java/org/apache/fluss/lake/iceberg/tiering/IcebergWriteResult.java
b/fluss-lake/fluss-lake-iceberg/src/main/java/org/apache/fluss/lake/iceberg/tiering/IcebergWriteResult.java
index 2b36c87c2..d7e25403f 100644
---
a/fluss-lake/fluss-lake-iceberg/src/main/java/org/apache/fluss/lake/iceberg/tiering/IcebergWriteResult.java
+++
b/fluss-lake/fluss-lake-iceberg/src/main/java/org/apache/fluss/lake/iceberg/tiering/IcebergWriteResult.java
@@ -25,7 +25,7 @@ import javax.annotation.Nullable;
import java.io.Serializable;
-/** The write result of Iceberg lake writer to pass to commiter to commit. */
+/** The write result of Iceberg lake writer to pass to committer to commit. */
public class IcebergWriteResult implements Serializable {
private static final long serialVersionUID = 1L;
diff --git
a/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResult.java
b/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResult.java
index f2732bea0..309b1d389 100644
---
a/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResult.java
+++
b/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResult.java
@@ -22,7 +22,7 @@ import com.lancedb.lance.FragmentMetadata;
import java.io.Serializable;
import java.util.List;
-/** The write result of Lance lake writer to pass to commiter to commit. */
+/** The write result of Lance lake writer to pass to committer to commit. */
public class LanceWriteResult implements Serializable {
private static final long serialVersionUID = 1L;
diff --git
a/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResultSerializer.java
b/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResultSerializer.java
index 228f2215b..38ab2352c 100644
---
a/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResultSerializer.java
+++
b/fluss-lake/fluss-lake-lance/src/main/java/org/apache/fluss/lake/lance/tiering/LanceWriteResultSerializer.java
@@ -37,7 +37,7 @@ public class LanceWriteResultSerializer implements
SimpleVersionedSerializer<Lan
@Override
public byte[] serialize(LanceWriteResult lanceWriteResult) throws
IOException {
try (ByteArrayOutputStream baos = new ByteArrayOutputStream();
- ObjectOutputStream oos = new ObjectOutputStream(baos); ) {
+ ObjectOutputStream oos = new ObjectOutputStream(baos)) {
oos.writeObject(lanceWriteResult);
return baos.toByteArray();
}
diff --git
a/fluss-lake/fluss-lake-paimon/src/main/java/org/apache/fluss/lake/paimon/tiering/PaimonWriteResult.java
b/fluss-lake/fluss-lake-paimon/src/main/java/org/apache/fluss/lake/paimon/tiering/PaimonWriteResult.java
index d6f9ba75d..70575c00e 100644
---
a/fluss-lake/fluss-lake-paimon/src/main/java/org/apache/fluss/lake/paimon/tiering/PaimonWriteResult.java
+++
b/fluss-lake/fluss-lake-paimon/src/main/java/org/apache/fluss/lake/paimon/tiering/PaimonWriteResult.java
@@ -21,7 +21,7 @@ import org.apache.paimon.table.sink.CommitMessage;
import java.io.Serializable;
-/** The write result of Paimon lake writer to pass to commiter to commit. */
+/** The write result of Paimon lake writer to pass to committer to commit. */
public class PaimonWriteResult implements Serializable {
private static final long serialVersionUID = 1L;
diff --git
a/fluss-rpc/src/test/java/org/apache/fluss/rpc/netty/client/NettyClientTest.java
b/fluss-rpc/src/test/java/org/apache/fluss/rpc/netty/client/NettyClientTest.java
index 6d97c2f88..4c0ba7b14 100644
---
a/fluss-rpc/src/test/java/org/apache/fluss/rpc/netty/client/NettyClientTest.java
+++
b/fluss-rpc/src/test/java/org/apache/fluss/rpc/netty/client/NettyClientTest.java
@@ -219,7 +219,7 @@ final class NettyClientTest {
.get();
assertThat(nettyClient.connections().size()).isEqualTo(1);
try (NettyClient client =
- new NettyClient(conf,
TestingClientMetricGroup.newInstance(), false); ) {
+ new NettyClient(conf,
TestingClientMetricGroup.newInstance(), false)) {
client.sendRequest(
new ServerNode(
2,
diff --git
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorEventProcessor.java
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorEventProcessor.java
index 511132e29..909fa503d 100644
---
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorEventProcessor.java
+++
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorEventProcessor.java
@@ -1005,7 +1005,7 @@ public class CoordinatorEventProcessor implements
EventProcessor {
// it may happen during coordinator server initiation, the watcher
watch a new tablet
// server register event and put it to event manager, but after
that, the coordinator
// server read
- // all tablet server nodes registered which contain the tablet
server a; in this case,
+ // all tablet server nodes registered which contain the tablet
server; in this case,
// we can ignore it.
return;
}
@@ -1398,7 +1398,7 @@ public class CoordinatorEventProcessor implements
EventProcessor {
*
* <ul>
* <li>B1. Move all replicas in AR to OnlineReplica state.
- * <li>B2. Send a LeaderAndIsr request with RS = ORS +TRS. The will make
the origin leader
+ * <li>B2. Send a LeaderAndIsr request with RS = ORS +TRS. This will
make the origin leader
* change to the new leader. this request will be sent to every
tabletServer in ORS +TRS.
* <li>B3. Set RS = TRS, AR = [], RR = [] in memory.
* <li>Re-send LeaderAndIsr request with new leader and a new RS (using
TRS) and same isr to
@@ -1408,8 +1408,8 @@ public class CoordinatorEventProcessor implements
EventProcessor {
* Leader to notify it of the shrunk isr. After that, we send a
StopReplica (delete =
* false and deleteRemote = false) to the replicas in RR.
* <li>B6. Move all replicas in RR to ReplicaMigrationStarted state.
This will send a
- * StopReplica (delete = true and deleteRemote = false) to he
replicas in RR to physically
- * delete the replicas on disk but don't delete the data in remote
storage.
+ * StopReplica (delete = true and deleteRemote = false) to the
replicas in RR to
+ * physically delete the replicas on disk but don't delete the data
in remote storage.
* <li>B7. Update ZK with RS=TRS, AR=[], RR=[].
* <li>B8. After electing leader, the replicas and isr information
changes. So resend the
* update metadata request to every tabletServer.
diff --git
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorRequestBatch.java
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorRequestBatch.java
index fe85ff391..063ba5878 100644
---
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorRequestBatch.java
+++
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/CoordinatorRequestBatch.java
@@ -276,11 +276,11 @@ public class CoordinatorRequestBatch {
* none-partitioned table
* <li>case3: Table create and bucketAssignment don't generated, case
will happen for new
* created partitioned table
- * <li>case4: Table is queued for deletion, in this case we will set a
empty tableBucket set
+ * <li>case4: Table is queued for deletion, in this case we will set an
empty tableBucket set
* and tableId set to {@link TableMetadata#DELETED_TABLE_ID} to
avoid send unless info to
* tabletServer
* <li>case5: Partition create and bucketAssignment of this partition
generated.
- * <li>case6: Partition is queued for deletion, in this case we will set
a empty tableBucket
+ * <li>case6: Partition is queued for deletion, in this case we will set
an empty tableBucket
* set and partitionId set to {@link
PartitionMetadata#DELETED_PARTITION_ID } to avoid
* send unless info to tabletServer
* <li>case7: Leader and isr is changed for these input tableBuckets
diff --git
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/event/CommitKvSnapshotEvent.java
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/event/CommitKvSnapshotEvent.java
index 984c63f27..ca7befddb 100644
---
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/event/CommitKvSnapshotEvent.java
+++
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/event/CommitKvSnapshotEvent.java
@@ -23,7 +23,7 @@ import org.apache.fluss.server.entity.CommitKvSnapshotData;
import java.util.concurrent.CompletableFuture;
-/** An event for receiving the request of commiting a completed snapshot to
coordinator server. */
+/** An event for receiving the request of committing a completed snapshot to
coordinator server. */
public class CommitKvSnapshotEvent implements FencedCoordinatorEvent {
private final CommitKvSnapshotData commitKvSnapshotData;
diff --git
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/rebalance/ActionType.java
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/rebalance/ActionType.java
index a24bf8acb..8f347a7d8 100644
---
a/fluss-server/src/main/java/org/apache/fluss/server/coordinator/rebalance/ActionType.java
+++
b/fluss-server/src/main/java/org/apache/fluss/server/coordinator/rebalance/ActionType.java
@@ -26,5 +26,5 @@ public enum ActionType {
* Move leadership of a leader from a source tabletServer to a follower of
the same replica
* residing in a destination tabletServer.
*/
- LEADERSHIP_MOVEMENT;
+ LEADERSHIP_MOVEMENT
}
diff --git
a/fluss-server/src/main/java/org/apache/fluss/server/kv/KvSnapshotResource.java
b/fluss-server/src/main/java/org/apache/fluss/server/kv/KvSnapshotResource.java
index 0f8f748fb..85ee1ebcf 100644
---
a/fluss-server/src/main/java/org/apache/fluss/server/kv/KvSnapshotResource.java
+++
b/fluss-server/src/main/java/org/apache/fluss/server/kv/KvSnapshotResource.java
@@ -113,7 +113,7 @@ public class KvSnapshotResource {
// shutdown asyncOperationsThreadPool now
asyncOperationsThreadPool.shutdownNow();
// close kvSnapshotScheduler, also stop any actively executing task
immediately
- // otherwise, a snapshot will still be take although it's closed,
which will case exception
+ // otherwise, a snapshot will still be take although it's closed,
which will cause exception
kvSnapshotScheduler.shutdownNow();
}
}
diff --git
a/fluss-server/src/main/java/org/apache/fluss/server/kv/snapshot/KvTabletSnapshotTarget.java
b/fluss-server/src/main/java/org/apache/fluss/server/kv/snapshot/KvTabletSnapshotTarget.java
index 569d75c6e..e3e8ede43 100644
---
a/fluss-server/src/main/java/org/apache/fluss/server/kv/snapshot/KvTabletSnapshotTarget.java
+++
b/fluss-server/src/main/java/org/apache/fluss/server/kv/snapshot/KvTabletSnapshotTarget.java
@@ -300,8 +300,8 @@ public class KvTabletSnapshotTarget implements
PeriodicSnapshotManager.SnapshotT
// Fix for issue: https://github.com/apache/fluss/issues/1304
// Tablet server try to commit kv snapshot to coordinator server,
// coordinator server commit the kv snapshot to zk, then failover.
- // Tablet server will got exception from coordinator server, but
mistake it as a fail
- // commit although coordinator server has committed to zk, then
discard the commited kv
+ // Tablet server will get exception from coordinator server, but
mistake it as a fail
+ // commit although coordinator server has committed to zk, then
discard the committed kv
// snapshot.
//
// Idempotent check: Double check ZK to verify if the snapshot
actually exists before
diff --git
a/tools/ci/fluss-ci-tools/src/main/java/org/apache/fluss/tools/ci/licensecheck/NoticeFileChecker.java
b/tools/ci/fluss-ci-tools/src/main/java/org/apache/fluss/tools/ci/licensecheck/NoticeFileChecker.java
index 051a50718..0cdbb5a57 100644
---
a/tools/ci/fluss-ci-tools/src/main/java/org/apache/fluss/tools/ci/licensecheck/NoticeFileChecker.java
+++
b/tools/ci/fluss-ci-tools/src/main/java/org/apache/fluss/tools/ci/licensecheck/NoticeFileChecker.java
@@ -80,7 +80,7 @@ public class NoticeFileChecker {
"Extracted "
+ deployedModules.size()
+ " modules that were deployed and "
- + modulesWithBundledDependencies.keySet().size()
+ + modulesWithBundledDependencies.size()
+ " modules which bundle dependencies with a total of "
+
modulesWithBundledDependencies.values().stream().mapToInt(Set::size).sum()
+ " dependencies");
diff --git a/website/blog/2025-06-01-partial-updates.md
b/website/blog/2025-06-01-partial-updates.md
index 55317235e..a250401fe 100644
--- a/website/blog/2025-06-01-partial-updates.md
+++ b/website/blog/2025-06-01-partial-updates.md
@@ -45,7 +45,7 @@ Next, let's try and better understand how this works in
practice with a concrete
### Example: Building a Unified Wide Table
> You can find the full source code on github
> [here](https://github.com/ververica/ververica-fluss-examples/tree/main/partial_updates).
-Start by cloning the repository, run `docker compose up` to spin up the
development enviroment and finally grab a terminal
+Start by cloning the repository, run `docker compose up` to spin up the
development environment and finally grab a terminal
into the `jobmanager` and start the Flink SQL cli, by running the following
command:
```shell
./bin/sql-client.sh
@@ -149,7 +149,7 @@ and then run:
```sql
SELECT * FROM user_rec_wide;
```
-to observe the output of the table, as we insert `partially` records into the
it from the different sources.
+to observe the output of the table, as we insert `partially` records into it
from the different sources.
**Step 5:** Let's insert the records from the `recommendations` table into the
`user_rec_wide` table.
```sql
diff --git a/website/community/how-to-contribute/contribute-code.md
b/website/community/how-to-contribute/contribute-code.md
index 5ac4cddaf..be1f3d2d1 100644
--- a/website/community/how-to-contribute/contribute-code.md
+++ b/website/community/how-to-contribute/contribute-code.md
@@ -28,7 +28,7 @@ Create an issue and reach consensus.
### Implement
-Implement the change according to the Code Style and Quality(refer to the
[Flink
doc](https://flink.apache.org/how-to-contribute/code-style-and-quality-preamble/)
Guide and the approach agreed upon in the issue.
+Implement the change according to the Code Style and Quality (refer to the
[Flink
doc](https://flink.apache.org/how-to-contribute/code-style-and-quality-preamble/)
Guide and the approach agreed upon in the issue).
1. Only start working on the implementation if there is consensus on the
approach (e.g. you are assigned to the ticket)
2. If you are newer, can refer to [ide setup](/community/dev/ide-setup) to
setup a Fluss dev environment.
diff --git a/website/docs/maintenance/filesystems/oss.md
b/website/docs/maintenance/filesystems/oss.md
index 3c9a6c3a1..a88f8692a 100644
--- a/website/docs/maintenance/filesystems/oss.md
+++ b/website/docs/maintenance/filesystems/oss.md
@@ -12,7 +12,7 @@ sidebar_position: 3
## Configurations setup
-To enabled OSS as remote storage, there are some required configurations that
must be add to Fluss' `server.yaml`:
+To enabled OSS as remote storage, there are some required configurations that
must be added to Fluss' `server.yaml`:
```yaml
# The dir that used to be as the remote storage of Fluss