chia7712 commented on code in PR #15989:
URL: https://github.com/apache/kafka/pull/15989#discussion_r1609683516


##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();

Review Comment:
   `Collections.emptyMap()`



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));

Review Comment:
   `assertEquals(Collections.singleton(connectorName), 
configState.connectors());`



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));
+        assertEquals(Collections.emptyList(), 
configState.tasks(connectorName));
+        assertEquals(Collections.EMPTY_SET, 
configState.inconsistentConnectors());

Review Comment:
   `Collections.emptySet()`



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));
+        assertEquals(Collections.emptyList(), 
configState.tasks(connectorName));
+        assertEquals(Collections.EMPTY_SET, 
configState.inconsistentConnectors());
+
+        configStorage.stop();
+        verify(configStorage).stop();
+    }
+
+    @Test
+    public void testBackgroundUpdateTargetState() throws Exception {
+        // verify that we handle target state changes correctly when they come 
up through the log
+        List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(
+                new ConsumerRecord<>(TOPIC, 0, 0, 0L, 
TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(0), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 1, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(1), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 2, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1),
+                        CONFIGS_SERIALIZED.get(2), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 3, 0L, 
TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(3), new RecordHeaders(), 
Optional.empty()));
+        LinkedHashMap<byte[], Struct> deserializedOnStartup = new 
LinkedHashMap<>();
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), 
CONNECTOR_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), 
TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
+        logOffset = 5;
+
+        expectStart(existingRecords, deserializedOnStartup);
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+        verify(configLog).start();
+
+        // Should see a single connector with initial state started
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), 
configStorage.connectorTargetStates.keySet());
+        assertEquals(TargetState.STARTED, 
configState.targetState(CONNECTOR_IDS.get(0)));
+
+        LinkedHashMap<String, byte[]> serializedAfterStartup = new 
LinkedHashMap<>();

Review Comment:
   It seems we don't use the insert order, so maybe `HashMap` is enough .



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));
+        assertEquals(Collections.emptyList(), 
configState.tasks(connectorName));
+        assertEquals(Collections.EMPTY_SET, 
configState.inconsistentConnectors());
+
+        configStorage.stop();
+        verify(configStorage).stop();

Review Comment:
   We should verify `configLog#stop`, as `configStorage` is just stopped 
before, right?
   `verify(configLog).stop();` 



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));
+        assertEquals(Collections.emptyList(), 
configState.tasks(connectorName));
+        assertEquals(Collections.EMPTY_SET, 
configState.inconsistentConnectors());
+
+        configStorage.stop();
+        verify(configStorage).stop();
+    }
+
+    @Test
+    public void testBackgroundUpdateTargetState() throws Exception {
+        // verify that we handle target state changes correctly when they come 
up through the log
+        List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(
+                new ConsumerRecord<>(TOPIC, 0, 0, 0L, 
TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(0), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 1, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(1), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 2, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1),
+                        CONFIGS_SERIALIZED.get(2), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 3, 0L, 
TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(3), new RecordHeaders(), 
Optional.empty()));
+        LinkedHashMap<byte[], Struct> deserializedOnStartup = new 
LinkedHashMap<>();
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), 
CONNECTOR_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), 
TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
+        logOffset = 5;
+
+        expectStart(existingRecords, deserializedOnStartup);
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+        verify(configLog).start();
+
+        // Should see a single connector with initial state started
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), 
configStorage.connectorTargetStates.keySet());
+        assertEquals(TargetState.STARTED, 
configState.targetState(CONNECTOR_IDS.get(0)));
+
+        LinkedHashMap<String, byte[]> serializedAfterStartup = new 
LinkedHashMap<>();
+        serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), 
CONFIGS_SERIALIZED.get(0));
+        serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), 
CONFIGS_SERIALIZED.get(1));
+        
doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd();
+
+        Map<String, Struct> deserializedAfterStartup = new HashMap<>();
+        deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), 
TARGET_STATE_PAUSED);
+        deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), 
TARGET_STATE_STOPPED);
+        expectRead(serializedAfterStartup, deserializedAfterStartup);
+
+        // Should see two connectors now, one paused and one stopped
+        configStorage.refresh(0, TimeUnit.SECONDS);
+        
verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0));
+        configUpdateListener.onConnectorConfigRemove(CONNECTOR_IDS.get(0));
+        configState = configStorage.snapshot();
+
+        assertEquals(new HashSet<>(CONNECTOR_IDS), 
configStorage.connectorTargetStates.keySet());
+        assertEquals(TargetState.PAUSED, 
configState.targetState(CONNECTOR_IDS.get(0)));
+        assertEquals(TargetState.STOPPED, 
configState.targetState(CONNECTOR_IDS.get(1)));
+
+        configStorage.stop();
+        verify(configStorage).stop();

Review Comment:
   ditto



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1315,4 +1450,22 @@ private Map<String, Object> structToMap(Struct struct) {
         for (Field field : struct.schema().fields()) result.put(field.name(), 
struct.get(field));
         return result;
     }
+
+    // Manually insert a connector into config storage, updating the task 
configs, connector config, and root config
+    private void whiteboxAddConnector(String connectorName, Map<String, 
String> connectorConfig, List<Map<String, String>> taskConfigs) {

Review Comment:
   `whiteBoxAddConnector`



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));
+        assertEquals(Collections.emptyList(), 
configState.tasks(connectorName));
+        assertEquals(Collections.EMPTY_SET, 
configState.inconsistentConnectors());
+
+        configStorage.stop();
+        verify(configStorage).stop();
+    }
+
+    @Test
+    public void testBackgroundUpdateTargetState() throws Exception {
+        // verify that we handle target state changes correctly when they come 
up through the log
+        List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(
+                new ConsumerRecord<>(TOPIC, 0, 0, 0L, 
TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(0), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 1, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(1), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 2, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1),
+                        CONFIGS_SERIALIZED.get(2), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 3, 0L, 
TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(3), new RecordHeaders(), 
Optional.empty()));
+        LinkedHashMap<byte[], Struct> deserializedOnStartup = new 
LinkedHashMap<>();
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), 
CONNECTOR_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), 
TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
+        logOffset = 5;
+
+        expectStart(existingRecords, deserializedOnStartup);
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+        verify(configLog).start();
+
+        // Should see a single connector with initial state started
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), 
configStorage.connectorTargetStates.keySet());
+        assertEquals(TargetState.STARTED, 
configState.targetState(CONNECTOR_IDS.get(0)));
+
+        LinkedHashMap<String, byte[]> serializedAfterStartup = new 
LinkedHashMap<>();
+        serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), 
CONFIGS_SERIALIZED.get(0));
+        serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), 
CONFIGS_SERIALIZED.get(1));
+        
doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd();
+
+        Map<String, Struct> deserializedAfterStartup = new HashMap<>();
+        deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), 
TARGET_STATE_PAUSED);
+        deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), 
TARGET_STATE_STOPPED);
+        expectRead(serializedAfterStartup, deserializedAfterStartup);
+
+        // Should see two connectors now, one paused and one stopped
+        configStorage.refresh(0, TimeUnit.SECONDS);
+        
verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0));
+        configUpdateListener.onConnectorConfigRemove(CONNECTOR_IDS.get(0));
+        configState = configStorage.snapshot();
+
+        assertEquals(new HashSet<>(CONNECTOR_IDS), 
configStorage.connectorTargetStates.keySet());
+        assertEquals(TargetState.PAUSED, 
configState.targetState(CONNECTOR_IDS.get(0)));
+        assertEquals(TargetState.STOPPED, 
configState.targetState(CONNECTOR_IDS.get(1)));
+
+        configStorage.stop();
+        verify(configStorage).stop();
+    }
+
+    @Test
+    public void testSameTargetState() throws Exception {
+        // verify that we handle target state changes correctly when they come 
up through the log
+        List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(
+                new ConsumerRecord<>(TOPIC, 0, 0, 0L, 
TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(0), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 1, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(1), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 2, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1),
+                        CONFIGS_SERIALIZED.get(2), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 3, 0L, 
TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(3), new RecordHeaders(), 
Optional.empty()));
+        LinkedHashMap<byte[], Struct> deserialized = new LinkedHashMap<>();
+        deserialized.put(CONFIGS_SERIALIZED.get(0), 
CONNECTOR_CONFIG_STRUCTS.get(0));
+        deserialized.put(CONFIGS_SERIALIZED.get(1), 
TASK_CONFIG_STRUCTS.get(0));
+        deserialized.put(CONFIGS_SERIALIZED.get(2), 
TASK_CONFIG_STRUCTS.get(0));
+        deserialized.put(CONFIGS_SERIALIZED.get(3), 
TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
+        logOffset = 5;
+
+        expectStart(existingRecords, deserialized);
+
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+        verify(configLog).start();
+
+        ClusterConfigState configState = configStorage.snapshot();
+        expectRead(TARGET_STATE_KEYS.get(0), CONFIGS_SERIALIZED.get(0), 
TARGET_STATE_STARTED);

Review Comment:
   Maybe we can call `expectRead(Map<String, byte[]> serializedValues, 
Map<String, Struct> deserializedValues)` directly. For example:
   
   ```java
           expectRead(Collections.singletonMap(TARGET_STATE_KEYS.get(0), 
CONFIGS_SERIALIZED.get(0)),
               Collections.singletonMap(TARGET_STATE_KEYS.get(0), 
TARGET_STATE_STARTED));
   ```
   
   BTW, we don't need to use `LinkedHashMap` in `expectRead`



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();

Review Comment:
   we don't need temp `taskConfigs` here



##########
connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreMockitoTest.java:
##########
@@ -1184,6 +1185,140 @@ public void 
testRestoreRestartRequestInconsistentState() {
         verify(configLog).stop();
     }
 
+    @Test
+    public void testPutTaskConfigsZeroTasks() throws Exception {
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+
+        // Bootstrap as if we had already added the connector, but no tasks 
had been added yet
+        whiteboxAddConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), 
Collections.emptyList());
+
+        // Null before writing
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(-1, configState.offset());
+
+        // Task configs should read to end, write to the log, read to end, 
write root.
+        doAnswer(expectReadToEnd(new 
LinkedHashMap<>())).when(configLog).readToEnd();
+
+        expectConvertWriteRead(
+                COMMIT_TASKS_CONFIG_KEYS.get(0), 
KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(0),
+                "tasks", 0); // We have 0 tasks
+
+        List<Map<String, String>> taskConfigs = Collections.emptyList();
+        configStorage.putTaskConfigs("connector1", taskConfigs);
+
+        // As soon as root is rewritten, we should see a callback notifying us 
that we reconfigured some tasks
+        configUpdateListener.onTaskConfigUpdate(Collections.emptyList());
+        // Validate root config by listing all connectors and tasks
+        configState = configStorage.snapshot();
+        String connectorName = CONNECTOR_IDS.get(0);
+        assertEquals(Arrays.asList(connectorName), new 
ArrayList<>(configState.connectors()));
+        assertEquals(Collections.emptyList(), 
configState.tasks(connectorName));
+        assertEquals(Collections.EMPTY_SET, 
configState.inconsistentConnectors());
+
+        configStorage.stop();
+        verify(configStorage).stop();
+    }
+
+    @Test
+    public void testBackgroundUpdateTargetState() throws Exception {
+        // verify that we handle target state changes correctly when they come 
up through the log
+        List<ConsumerRecord<String, byte[]>> existingRecords = Arrays.asList(
+                new ConsumerRecord<>(TOPIC, 0, 0, 0L, 
TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(0), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 1, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(1), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 2, 0L, 
TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(1),
+                        CONFIGS_SERIALIZED.get(2), new RecordHeaders(), 
Optional.empty()),
+                new ConsumerRecord<>(TOPIC, 0, 3, 0L, 
TimestampType.CREATE_TIME, 0, 0, COMMIT_TASKS_CONFIG_KEYS.get(0),
+                        CONFIGS_SERIALIZED.get(3), new RecordHeaders(), 
Optional.empty()));
+        LinkedHashMap<byte[], Struct> deserializedOnStartup = new 
LinkedHashMap<>();
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(0), 
CONNECTOR_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(1), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(2), 
TASK_CONFIG_STRUCTS.get(0));
+        deserializedOnStartup.put(CONFIGS_SERIALIZED.get(3), 
TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR);
+        logOffset = 5;
+
+        expectStart(existingRecords, deserializedOnStartup);
+        when(configLog.partitionCount()).thenReturn(1);
+
+        configStorage.setupAndCreateKafkaBasedLog(TOPIC, config);
+        verifyConfigure();
+        configStorage.start();
+        verify(configLog).start();
+
+        // Should see a single connector with initial state started
+        ClusterConfigState configState = configStorage.snapshot();
+        assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), 
configStorage.connectorTargetStates.keySet());
+        assertEquals(TargetState.STARTED, 
configState.targetState(CONNECTOR_IDS.get(0)));
+
+        LinkedHashMap<String, byte[]> serializedAfterStartup = new 
LinkedHashMap<>();
+        serializedAfterStartup.put(TARGET_STATE_KEYS.get(0), 
CONFIGS_SERIALIZED.get(0));
+        serializedAfterStartup.put(TARGET_STATE_KEYS.get(1), 
CONFIGS_SERIALIZED.get(1));
+        
doAnswer(expectReadToEnd(serializedAfterStartup)).when(configLog).readToEnd();
+
+        Map<String, Struct> deserializedAfterStartup = new HashMap<>();
+        deserializedAfterStartup.put(TARGET_STATE_KEYS.get(0), 
TARGET_STATE_PAUSED);
+        deserializedAfterStartup.put(TARGET_STATE_KEYS.get(1), 
TARGET_STATE_STOPPED);
+        expectRead(serializedAfterStartup, deserializedAfterStartup);
+
+        // Should see two connectors now, one paused and one stopped
+        configStorage.refresh(0, TimeUnit.SECONDS);
+        
verify(configUpdateListener).onConnectorTargetStateChange(CONNECTOR_IDS.get(0));
+        configUpdateListener.onConnectorConfigRemove(CONNECTOR_IDS.get(0));

Review Comment:
   Why we need this? If you expect it should be called, please add 
`verify(configUpdateListener).onConnectorConfigRemove(CONNECTOR_IDS.get(0));` 
instead.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to