lindong28 commented on a change in pull request #52: URL: https://github.com/apache/flink-ml/pull/52#discussion_r831693277
########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexer.java ########## @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.functions.FlatMapFunction; +import org.apache.flink.api.common.functions.MapPartitionFunction; +import org.apache.flink.api.java.functions.KeySelector; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.apache.flink.util.Preconditions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * An Estimator which implements the string indexing algorithm. + * + * <p>A string indexer maps each input column (string/numerical value) to a index column (integer + * value) such that if the indices of two input are same iff their corresponding input columns are Review comment: There appears to be typo in this sentence. ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexerModel.java ########## @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.functions.RichFlatMapFunction; +import org.apache.flink.api.common.typeinfo.BasicTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.ml.api.Model; +import org.apache.flink.ml.common.broadcast.BroadcastUtils; +import org.apache.flink.ml.common.datastream.TableUtils; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.apache.flink.util.Preconditions; + +import org.apache.commons.lang3.ArrayUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * A Model which transforms input string/numeric column(s) to integer column(s) using the model data + * computed by {@link StringIndexer}. + */ +public class StringIndexerModel + implements Model<StringIndexerModel>, StringIndexerParams<StringIndexerModel> { + private final Map<Param<?>, Object> paramMap = new HashMap<>(); + private Table modelDataTable; + + public StringIndexerModel() { + ParamUtils.initializeMapWithDefaultValues(paramMap, this); + } + + @Override + public void save(String path) throws IOException { + ReadWriteUtils.saveMetadata(this, path); + ReadWriteUtils.saveModelData( + StringIndexerModelData.getModelDataStream(modelDataTable), + path, + new StringIndexerModelData.ModelDataEncoder()); + } + + public static StringIndexerModel load(StreamExecutionEnvironment env, String path) + throws IOException { + StreamTableEnvironment tEnv = StreamTableEnvironment.create(env); + StringIndexerModel model = ReadWriteUtils.loadStageParam(path); + DataStream<StringIndexerModelData> modelData = + ReadWriteUtils.loadModelData( + env, path, new StringIndexerModelData.ModelDataDecoder()); + return model.setModelData(tEnv.fromDataStream(modelData)); + } + + @Override + public Map<Param<?>, Object> getParamMap() { + return paramMap; + } + + @Override + public StringIndexerModel setModelData(Table... inputs) { + modelDataTable = inputs[0]; + return this; + } + + @Override + public Table[] getModelData() { + return new Table[] {modelDataTable}; + } + + @Override + public Table[] transform(Table... inputs) { + Preconditions.checkArgument(inputs.length == 1); + String[] inputCols = getInputCols(); + String[] outputCols = getOutputCols(); + Preconditions.checkArgument(inputCols.length == outputCols.length); + StreamTableEnvironment tEnv = + (StreamTableEnvironment) ((TableImpl) modelDataTable).getTableEnvironment(); + + final String broadcastModelKey = "broadcastModelKey"; + DataStream<StringIndexerModelData> modelDataStream = + StringIndexerModelData.getModelDataStream(modelDataTable); + RowTypeInfo inputTypeInfo = TableUtils.getRowTypeInfo(inputs[0].getResolvedSchema()); + TypeInformation<?>[] outputTypes = new TypeInformation[outputCols.length]; + Arrays.fill(outputTypes, BasicTypeInfo.INT_TYPE_INFO); + RowTypeInfo outputTypeInfo = + new RowTypeInfo( + ArrayUtils.addAll(inputTypeInfo.getFieldTypes(), outputTypes), + ArrayUtils.addAll(inputTypeInfo.getFieldNames(), getOutputCols())); + DataStream<Row> result = + BroadcastUtils.withBroadcastStream( + Collections.singletonList(tEnv.toDataStream(inputs[0])), + Collections.singletonMap(broadcastModelKey, modelDataStream), + inputList -> { + DataStream inputData = inputList.get(0); + return inputData.flatMap( + new String2Index( + broadcastModelKey, inputCols, getHandleInvalid()), + outputTypeInfo); + }); + return new Table[] {tEnv.fromDataStream(result)}; + } + + /** Maps the input columns to integer values according to the model data. */ + private static class String2Index extends RichFlatMapFunction<Row, Row> { + private HashMap<String, Integer>[] modelDataMap; + private final String broadcastModelKey; + private final String[] inputCols; + private final String handleInValid; + + public String2Index(String broadcastModelKey, String[] inputCols, String handleInValid) { + this.broadcastModelKey = broadcastModelKey; + this.inputCols = inputCols; + this.handleInValid = handleInValid; + } + + @Override + public void flatMap(Row input, Collector<Row> out) throws Exception { + if (modelDataMap == null) { + modelDataMap = new HashMap[inputCols.length]; + StringIndexerModelData modelData = + (StringIndexerModelData) + getRuntimeContext().getBroadcastVariable(broadcastModelKey).get(0); + String[][] stringsArray = modelData.stringsArray; + for (int i = 0; i < stringsArray.length; i++) { + int idx = 0; + modelDataMap[i] = new HashMap<>(stringsArray[i].length); + for (String string : stringsArray[i]) { + modelDataMap[i].put(string, idx++); + } + } + } + Row outputIndices = new Row(inputCols.length); + for (int i = 0; i < inputCols.length; i++) { + Object objVal = input.getField(inputCols[i]); + String stringVal; + if (objVal instanceof String) { + stringVal = (String) objVal; + } else if (objVal instanceof Number) { + stringVal = String.valueOf(objVal); + } else { + throw new RuntimeException( + "The input column only supports string type and numeric type."); + } + if (modelDataMap[i].containsKey(stringVal)) { + outputIndices.setField(i, modelDataMap[i].get(stringVal)); + } else { + switch (handleInValid) { + case StringIndexerModelParams.SKIP_INVALID: + return; + case StringIndexerModelParams.ERROR_INVALID: + throw new RuntimeException( Review comment: The validator of the stringOrderType parameter should already guarantee that this order that is in the given list of values. Would it be simpler to throw `IllegalStateException` here without listing the expected values? Same for `handleInvalid` used in `StringIndexerModel::String2Index`. ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexer.java ########## @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.functions.FlatMapFunction; +import org.apache.flink.api.common.functions.MapPartitionFunction; +import org.apache.flink.api.java.functions.KeySelector; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.apache.flink.util.Preconditions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * An Estimator which implements the string indexing algorithm. + * + * <p>A string indexer maps each input column (string/numerical value) to a index column (integer + * value) such that if the indices of two input are same iff their corresponding input columns are + * the same. The indices are in [0, numDistinctValuesThisColumn). + * + * <p>The input columns are cast to string if they are numeric values. By default, the output model + * is random ordered. Users can control this by setting {@link + * StringIndexerParams#STRING_ORDER_TYPE}. + */ +public class StringIndexer + implements Estimator<StringIndexer, StringIndexerModel>, + StringIndexerParams<StringIndexer> { + private final Map<Param<?>, Object> paramMap = new HashMap<>(); + + public StringIndexer() { + ParamUtils.initializeMapWithDefaultValues(paramMap, this); + } + + @Override + public void save(String path) throws IOException { + ReadWriteUtils.saveMetadata(this, path); + } + + public static StringIndexer load(StreamExecutionEnvironment env, String path) + throws IOException { + return ReadWriteUtils.loadStageParam(path); + } + + @Override + public Map<Param<?>, Object> getParamMap() { + return paramMap; + } + + @Override + public StringIndexerModel fit(Table... inputs) { + Preconditions.checkArgument(inputs.length == 1); + String[] inputCols = getInputCols(); + String[] outputCols = getOutputCols(); + Preconditions.checkArgument(inputCols.length == outputCols.length); + StreamTableEnvironment tEnv = + (StreamTableEnvironment) ((TableImpl) inputs[0]).getTableEnvironment(); + + DataStream<Tuple2<Integer, String>> columnIdAndString = + tEnv.toDataStream(inputs[0]).flatMap(new ExtractColumnIdAndString(inputCols)); + DataStream<Tuple3<Integer, String, Long>> columnIdAndStringAndCnt = + DataStreamUtils.mapPartition( + columnIdAndString.keyBy( + (KeySelector<Tuple2<Integer, String>, Integer>) Tuple2::hashCode), + new CountStringsByColumn(inputCols.length)); + DataStream<StringIndexerModelData> modelData = + DataStreamUtils.mapPartition( + columnIdAndStringAndCnt, + new GenerateModel(inputCols.length, getStringOrderType())); + modelData.getTransformation().setParallelism(1); + StringIndexerModel model = + new StringIndexerModel().setModelData(tEnv.fromDataStream(modelData)); + ReadWriteUtils.updateExistingParams(model, paramMap); + return model; + } + + /** + * Merges all the extracted strings and generates the {@link StringIndexerModelData} according + * to the specified string order type. + */ + private static class GenerateModel + implements MapPartitionFunction<Tuple3<Integer, String, Long>, StringIndexerModelData> { + private final int numCols; + private final String stringOrderType; + + public GenerateModel(int numCols, String stringOrderType) { + this.numCols = numCols; + this.stringOrderType = stringOrderType; + } + + @Override + public void mapPartition( + Iterable<Tuple3<Integer, String, Long>> values, + Collector<StringIndexerModelData> out) { + String[][] stringsArray = new String[numCols][]; + ArrayList<Tuple2<String, Long>>[] stringsAndCntsByColumn = new ArrayList[numCols]; + for (int i = 0; i < numCols; i++) { + stringsAndCntsByColumn[i] = new ArrayList<>(); + } + for (Tuple3<Integer, String, Long> colIdAndStringAndCnt : values) { + stringsAndCntsByColumn[colIdAndStringAndCnt.f0].add( + Tuple2.of(colIdAndStringAndCnt.f1, colIdAndStringAndCnt.f2)); + } + for (int i = 0; i < stringsAndCntsByColumn.length; i++) { + List<Tuple2<String, Long>> stringsAndCnts = stringsAndCntsByColumn[i]; + switch (stringOrderType) { + case StringIndexerParams.ALPHABET_ASC_ORDER: + stringsAndCnts.sort(Comparator.comparing(valAndCnt -> valAndCnt.f0)); + break; + case StringIndexerParams.ALPHABET_DESC_ORDER: + stringsAndCnts.sort( + (valAndCnt1, valAndCnt2) -> + -valAndCnt1.f0.compareTo(valAndCnt2.f0)); + break; + case StringIndexerParams.FREQUENCY_ASC_ORDER: + stringsAndCnts.sort(Comparator.comparing(valAndCnt -> valAndCnt.f1)); + break; + case StringIndexerParams.FREQUENCY_DESC_ORDER: + stringsAndCnts.sort( + (valAndCnt1, valAndCnt2) -> + -valAndCnt1.f1.compareTo(valAndCnt2.f1)); + break; + case StringIndexerParams.RANDOM_ORDER: + break; + default: + throw new RuntimeException( + "Unsupported string order type: " + + stringOrderType + + ". Supported options are: [" + + StringIndexerParams.RANDOM_ORDER + + ", " + + StringIndexerParams.ALPHABET_ASC_ORDER + + ", " + + StringIndexerParams.ALPHABET_DESC_ORDER + + ", " + + StringIndexerParams.FREQUENCY_ASC_ORDER + + ", " + + StringIndexerParams.ALPHABET_DESC_ORDER + + "]."); + } + stringsArray[i] = new String[stringsAndCnts.size()]; + for (int stringId = 0; stringId < stringsArray[i].length; stringId++) { + stringsArray[i][stringId] = stringsAndCnts.get(stringId).f0; + } + } + + out.collect(new StringIndexerModelData(stringsArray)); + } + } + + /** Computes the frequency of strings in each column. */ + private static class CountStringsByColumn + implements MapPartitionFunction< + Tuple2<Integer, String>, Tuple3<Integer, String, Long>> { + private final int numCols; + + public CountStringsByColumn(int numCols) { + this.numCols = numCols; + } + + @Override + public void mapPartition( + Iterable<Tuple2<Integer, String>> values, + Collector<Tuple3<Integer, String, Long>> out) { + HashMap<String, Long>[] string2CntByColumn = new HashMap[numCols]; + for (int i = 0; i < numCols; i++) { + string2CntByColumn[i] = new HashMap<>(); + } + for (Tuple2<Integer, String> columnIdAndString : values) { + int colId = columnIdAndString.f0; + String stringVal = columnIdAndString.f1; + if (string2CntByColumn[colId].containsKey(stringVal)) { Review comment: It might be more performant and readable to do the following: ``` Long cnt = string2CntByColumn[colId].getOrDefault(stringVal, 0L) + 1; string2CntByColumn[colId].put(stringVal, cnt); ``` ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexerModel.java ########## @@ -0,0 +1,204 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.functions.RichFlatMapFunction; +import org.apache.flink.api.common.typeinfo.BasicTypeInfo; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.java.typeutils.RowTypeInfo; +import org.apache.flink.ml.api.Model; +import org.apache.flink.ml.common.broadcast.BroadcastUtils; +import org.apache.flink.ml.common.datastream.TableUtils; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.apache.flink.util.Preconditions; + +import org.apache.commons.lang3.ArrayUtils; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * A Model which transforms input string/numeric column(s) to integer column(s) using the model data + * computed by {@link StringIndexer}. + */ +public class StringIndexerModel + implements Model<StringIndexerModel>, StringIndexerParams<StringIndexerModel> { Review comment: Should this class implement `StringIndexerModelParams` instead of `StringIndexerParams`? ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexer.java ########## @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.functions.FlatMapFunction; +import org.apache.flink.api.common.functions.MapPartitionFunction; +import org.apache.flink.api.java.functions.KeySelector; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.apache.flink.util.Preconditions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * An Estimator which implements the string indexing algorithm. + * + * <p>A string indexer maps each input column (string/numerical value) to a index column (integer + * value) such that if the indices of two input are same iff their corresponding input columns are + * the same. The indices are in [0, numDistinctValuesThisColumn). Review comment: Is this statement still correct if `handleInvalid = keep`? ########## File path: flink-ml-lib/src/test/java/org/apache/flink/ml/feature/stringindexer/StringIndexerTest.java ########## @@ -0,0 +1,319 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.ml.util.StageTestUtils; +import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.test.util.TestBaseUtils; +import org.apache.flink.types.Row; + +import org.apache.commons.collections.IteratorUtils; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** Tests the {@link StringIndexer} and {@link StringIndexerModel}. */ +public class StringIndexerTest { + @Rule public final TemporaryFolder tempFolder = new TemporaryFolder(); + + private StreamExecutionEnvironment env; + private StreamTableEnvironment tEnv; + private Table trainTable; + private Table predictTable; + + private String[][] expectedAlphabeticAscModelData; + private List<Row> expectedAlphabeticAscPredictData; + private List<Row> expectedAlphabeticDescPredictData; + private List<Row> expectedFreqAscPredictData; + private List<Row> expectedFreqDescPredictData; + + @Before + public void before() { + Configuration config = new Configuration(); + config.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, true); + env = StreamExecutionEnvironment.getExecutionEnvironment(config); + env.setParallelism(4); + env.enableCheckpointing(100); + env.setRestartStrategy(RestartStrategies.noRestart()); + tEnv = StreamTableEnvironment.create(env); + + List<Row> trainData = + Arrays.asList( + Row.of("a", 1.), + Row.of("b", 1.), + Row.of("b", 2.0), Review comment: It appears that we use both `2.` and `2.0` in the code. Could we use the same style for consistency? ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexerModelData.java ########## @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.serialization.Encoder; +import org.apache.flink.api.common.typeinfo.TypeInformation; +import org.apache.flink.api.common.typeutils.base.IntSerializer; +import org.apache.flink.api.common.typeutils.base.array.StringArraySerializer; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.connector.file.src.reader.SimpleStreamFormat; +import org.apache.flink.core.fs.FSDataInputStream; +import org.apache.flink.core.memory.DataInputViewStreamWrapper; +import org.apache.flink.core.memory.DataOutputViewStreamWrapper; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; + +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; + +/** + * Model data of {@link StringIndexerModel} and {@link IndexToStringModel}. + * + * <p>This class also provides methods to convert model data from Table to DataStream, and classes + * to save/load model data. + */ +public class StringIndexerModelData { + /** Ordered strings of each input column. */ + public String[][] stringsArray; Review comment: Given that it is an array of array of strings, would it be more self-explanatory to rename this variable as `stringArrays`? Note that it seems redundant to name it stringsArrays given that an array must be `plural`. If we agree to change this variable name, could you also update variables names used in the rest of this PR for consistency? ########## File path: flink-ml-lib/src/test/java/org/apache/flink/ml/feature/stringindexer/StringIndexerTest.java ########## @@ -0,0 +1,319 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.restartstrategy.RestartStrategies; +import org.apache.flink.configuration.Configuration; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.ml.util.StageTestUtils; +import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.test.util.TestBaseUtils; +import org.apache.flink.types.Row; + +import org.apache.commons.collections.IteratorUtils; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** Tests the {@link StringIndexer} and {@link StringIndexerModel}. */ +public class StringIndexerTest { + @Rule public final TemporaryFolder tempFolder = new TemporaryFolder(); + + private StreamExecutionEnvironment env; + private StreamTableEnvironment tEnv; + private Table trainTable; + private Table predictTable; + + private String[][] expectedAlphabeticAscModelData; + private List<Row> expectedAlphabeticAscPredictData; + private List<Row> expectedAlphabeticDescPredictData; + private List<Row> expectedFreqAscPredictData; + private List<Row> expectedFreqDescPredictData; + + @Before + public void before() { + Configuration config = new Configuration(); + config.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, true); + env = StreamExecutionEnvironment.getExecutionEnvironment(config); + env.setParallelism(4); + env.enableCheckpointing(100); + env.setRestartStrategy(RestartStrategies.noRestart()); + tEnv = StreamTableEnvironment.create(env); + + List<Row> trainData = + Arrays.asList( + Row.of("a", 1.), + Row.of("b", 1.), + Row.of("b", 2.0), + Row.of("c", 0.0), + Row.of("d", 2.), + Row.of("a", 2.), + Row.of("b", 2.), + Row.of("b", -1.), + Row.of("a", -1.), + Row.of("c", -1.)); + trainTable = + tEnv.fromDataStream(env.fromCollection(trainData)).as("inputCol1", "inputCol2"); + + List<Row> predictData = Arrays.asList(Row.of("a", 2.), Row.of("b", 1.), Row.of("e", 2.)); + predictTable = + tEnv.fromDataStream(env.fromCollection(predictData)).as("inputCol1", "inputCol2"); + + expectedAlphabeticAscModelData = + new String[][] {{"a", "b", "c", "d"}, {"-1.0", "0.0", "1.0", "2.0"}}; + expectedAlphabeticAscPredictData = + Arrays.asList(Row.of("a", 2., 0, 3), Row.of("b", 1., 1, 2), Row.of("e", 2.0, 4, 3)); + expectedAlphabeticDescPredictData = + Arrays.asList(Row.of("a", 2., 3, 0), Row.of("b", 1., 2, 1), Row.of("e", 2.0, 4, 0)); + expectedFreqAscPredictData = + Arrays.asList(Row.of("a", 2., 2, 3), Row.of("b", 1., 3, 1), Row.of("e", 2.0, 4, 3)); + expectedFreqDescPredictData = + Arrays.asList(Row.of("a", 2., 1, 0), Row.of("b", 1., 0, 2), Row.of("e", 2.0, 4, 0)); + } + + @Test + public void testFitParam() { + StringIndexer stringIndexer = new StringIndexer(); + assertEquals(stringIndexer.getStringOrderType(), StringIndexerParams.RANDOM_ORDER); + assertEquals(stringIndexer.getHandleInvalid(), StringIndexerParams.ERROR_INVALID); + + stringIndexer + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol1") + .setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER) + .setHandleInvalid(StringIndexerParams.SKIP_INVALID); + + assertArrayEquals(new String[] {"inputCol1", "inputCol2"}, stringIndexer.getInputCols()); + assertArrayEquals(new String[] {"outputCol1", "outputCol1"}, stringIndexer.getOutputCols()); + assertEquals(stringIndexer.getStringOrderType(), StringIndexerParams.ALPHABET_ASC_ORDER); + assertEquals(stringIndexer.getHandleInvalid(), StringIndexerParams.SKIP_INVALID); + } + + @Test + public void testPredictParam() { + StringIndexer stringIndexer = + new StringIndexer() + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol2") + .setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER) + .setHandleInvalid(StringIndexerParams.SKIP_INVALID); + Table output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + assertEquals( + Arrays.asList("inputCol1", "inputCol2", "outputCol1", "outputCol2"), + output.getResolvedSchema().getColumnNames()); + } + + @Test + public void testStringOrderType() throws Exception { + StringIndexer stringIndexer = + new StringIndexer() + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol2") + .setHandleInvalid(StringIndexerParams.KEEP_INVALID); + + Table output; + List<Row> predictedResult; + + // alphabetAsc + stringIndexer.setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + predictedResult = IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedAlphabeticAscPredictData, predictedResult); + + // alphabetDesc + stringIndexer.setStringOrderType(StringIndexerParams.ALPHABET_DESC_ORDER); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + predictedResult = IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedAlphabeticDescPredictData, predictedResult); + + // frequencyAsc + stringIndexer.setStringOrderType(StringIndexerParams.FREQUENCY_ASC_ORDER); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + predictedResult = IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedFreqAscPredictData, predictedResult); + + // frequencyDesc + stringIndexer.setStringOrderType(StringIndexerParams.FREQUENCY_DESC_ORDER); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + predictedResult = IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedFreqDescPredictData, predictedResult); + + // random + stringIndexer.setStringOrderType(StringIndexerParams.RANDOM_ORDER); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + predictedResult = IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + Set<Integer> distinctStringsCol1 = new HashSet<>(); + Set<Integer> distinctStringsCol2 = new HashSet<>(); + int index; + for (Row r : predictedResult) { + index = (Integer) r.getField(2); + distinctStringsCol1.add(index); + assertTrue(index >= 0 && index <= 4); + index = (Integer) r.getField(3); + assertTrue(index >= 0 && index <= 3); + distinctStringsCol2.add(index); + } + assertEquals(3, distinctStringsCol1.size()); + assertEquals(2, distinctStringsCol2.size()); + } + + @Test + public void testHandleInvalid() throws Exception { + StringIndexer stringIndexer = + new StringIndexer() + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol2") + .setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER); + + Table output; + List<Row> expectedResult; + + // keep invalid data + stringIndexer.setHandleInvalid(StringIndexerParams.KEEP_INVALID); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + List<Row> predictedResult = + IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedAlphabeticAscPredictData, predictedResult); + + // skip invalid data + stringIndexer.setHandleInvalid(StringIndexerParams.SKIP_INVALID); + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + predictedResult = IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + expectedResult = Arrays.asList(Row.of("a", 2., 0, 3), Row.of("b", 1., 1, 2)); + verifyPredictionResult(expectedResult, predictedResult); + + // throw exception on invalid data + stringIndexer.setHandleInvalid(StringIndexerParams.ERROR_INVALID); + try { + output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + fail(); + } catch (Exception e) { + assertEquals( + "The input contains unseen string: e. " + + "To handle unseen strings, set Param handleInvalid to keep.", + e.getCause().getCause().getCause().getCause().getCause().getMessage()); + } + } + + @Test + public void testFitAndPredict() throws Exception { + StringIndexer stringIndexer = + new StringIndexer() + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol2") + .setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER) + .setHandleInvalid(StringIndexerParams.KEEP_INVALID); + Table output = stringIndexer.fit(trainTable).transform(predictTable)[0]; + List<Row> predictedResult = + IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedAlphabeticAscPredictData, predictedResult); + } + + @Test + public void testSaveLoadAndPredict() throws Exception { + StringIndexer stringIndexer = + new StringIndexer() + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol2") + .setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER) + .setHandleInvalid(StringIndexerParams.KEEP_INVALID); + stringIndexer = + StageTestUtils.saveAndReload( + env, stringIndexer, tempFolder.newFolder().getAbsolutePath()); + StringIndexerModel model = stringIndexer.fit(trainTable); + model = StageTestUtils.saveAndReload(env, model, tempFolder.newFolder().getAbsolutePath()); + assertEquals( + Collections.singletonList("stringsArray"), + model.getModelData()[0].getResolvedSchema().getColumnNames()); + Table output = model.transform(predictTable)[0]; + List<Row> predictedResult = + IteratorUtils.toList(tEnv.toDataStream(output).executeAndCollect()); + verifyPredictionResult(expectedAlphabeticAscPredictData, predictedResult); + } + + @Test + public void testGetModelData() throws Exception { + StringIndexerModel model = + new StringIndexer() + .setInputCols("inputCol1", "inputCol2") + .setOutputCols("outputCol1", "outputCol2") + .setStringOrderType(StringIndexerParams.ALPHABET_ASC_ORDER) + .fit(trainTable); + StringIndexerModelData modelData = + StringIndexerModelData.getModelDataStream(model.getModelData()[0]) Review comment: Given that the focus of this test is on the `getModelData(..)` API, could we also verify that the model data table has the expected schema? Same for `IndexToStringModelTest::testGetModelData()`. ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexerParams.java ########## @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.param.ParamValidators; +import org.apache.flink.ml.param.StringParam; + +/** + * Params of {@link StringIndexer}. + * + * @param <T> The class type of this instance. + */ +public interface StringIndexerParams<T> extends StringIndexerModelParams<T> { + String RANDOM_ORDER = "random"; + String FREQUENCY_DESC_ORDER = "frequencyDesc"; + String FREQUENCY_ASC_ORDER = "frequencyAsc"; + String ALPHABET_DESC_ORDER = "alphabetDesc"; + String ALPHABET_ASC_ORDER = "alphabetAsc"; + + /** Param for {@link StringIndexer} to decide the order of the strings of each column. */ + Param<String> STRING_ORDER_TYPE = Review comment: Should we add Javadoc to explain the semantics for each of these options? Maybe follow Spark's Javadoc for example. ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexer.java ########## @@ -0,0 +1,247 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.api.common.functions.FlatMapFunction; +import org.apache.flink.api.common.functions.MapPartitionFunction; +import org.apache.flink.api.java.functions.KeySelector; +import org.apache.flink.api.java.tuple.Tuple2; +import org.apache.flink.api.java.tuple.Tuple3; +import org.apache.flink.ml.api.Estimator; +import org.apache.flink.ml.common.datastream.DataStreamUtils; +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.util.ParamUtils; +import org.apache.flink.ml.util.ReadWriteUtils; +import org.apache.flink.streaming.api.datastream.DataStream; +import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment; +import org.apache.flink.table.api.Table; +import org.apache.flink.table.api.bridge.java.StreamTableEnvironment; +import org.apache.flink.table.api.internal.TableImpl; +import org.apache.flink.types.Row; +import org.apache.flink.util.Collector; +import org.apache.flink.util.Preconditions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * An Estimator which implements the string indexing algorithm. + * + * <p>A string indexer maps each input column (string/numerical value) to a index column (integer + * value) such that if the indices of two input are same iff their corresponding input columns are + * the same. The indices are in [0, numDistinctValuesThisColumn). + * + * <p>The input columns are cast to string if they are numeric values. By default, the output model + * is random ordered. Users can control this by setting {@link + * StringIndexerParams#STRING_ORDER_TYPE}. + */ +public class StringIndexer + implements Estimator<StringIndexer, StringIndexerModel>, + StringIndexerParams<StringIndexer> { + private final Map<Param<?>, Object> paramMap = new HashMap<>(); + + public StringIndexer() { + ParamUtils.initializeMapWithDefaultValues(paramMap, this); + } + + @Override + public void save(String path) throws IOException { + ReadWriteUtils.saveMetadata(this, path); + } + + public static StringIndexer load(StreamExecutionEnvironment env, String path) + throws IOException { + return ReadWriteUtils.loadStageParam(path); + } + + @Override + public Map<Param<?>, Object> getParamMap() { + return paramMap; + } + + @Override + public StringIndexerModel fit(Table... inputs) { + Preconditions.checkArgument(inputs.length == 1); + String[] inputCols = getInputCols(); + String[] outputCols = getOutputCols(); + Preconditions.checkArgument(inputCols.length == outputCols.length); + StreamTableEnvironment tEnv = + (StreamTableEnvironment) ((TableImpl) inputs[0]).getTableEnvironment(); + + DataStream<Tuple2<Integer, String>> columnIdAndString = + tEnv.toDataStream(inputs[0]).flatMap(new ExtractColumnIdAndString(inputCols)); + DataStream<Tuple3<Integer, String, Long>> columnIdAndStringAndCnt = + DataStreamUtils.mapPartition( + columnIdAndString.keyBy( + (KeySelector<Tuple2<Integer, String>, Integer>) Tuple2::hashCode), + new CountStringsByColumn(inputCols.length)); + DataStream<StringIndexerModelData> modelData = + DataStreamUtils.mapPartition( + columnIdAndStringAndCnt, + new GenerateModel(inputCols.length, getStringOrderType())); + modelData.getTransformation().setParallelism(1); + StringIndexerModel model = + new StringIndexerModel().setModelData(tEnv.fromDataStream(modelData)); + ReadWriteUtils.updateExistingParams(model, paramMap); + return model; + } + + /** + * Merges all the extracted strings and generates the {@link StringIndexerModelData} according + * to the specified string order type. + */ + private static class GenerateModel + implements MapPartitionFunction<Tuple3<Integer, String, Long>, StringIndexerModelData> { + private final int numCols; + private final String stringOrderType; + + public GenerateModel(int numCols, String stringOrderType) { + this.numCols = numCols; + this.stringOrderType = stringOrderType; + } + + @Override + public void mapPartition( + Iterable<Tuple3<Integer, String, Long>> values, + Collector<StringIndexerModelData> out) { + String[][] stringsArray = new String[numCols][]; + ArrayList<Tuple2<String, Long>>[] stringsAndCntsByColumn = new ArrayList[numCols]; + for (int i = 0; i < numCols; i++) { + stringsAndCntsByColumn[i] = new ArrayList<>(); + } + for (Tuple3<Integer, String, Long> colIdAndStringAndCnt : values) { + stringsAndCntsByColumn[colIdAndStringAndCnt.f0].add( + Tuple2.of(colIdAndStringAndCnt.f1, colIdAndStringAndCnt.f2)); + } + for (int i = 0; i < stringsAndCntsByColumn.length; i++) { + List<Tuple2<String, Long>> stringsAndCnts = stringsAndCntsByColumn[i]; + switch (stringOrderType) { + case StringIndexerParams.ALPHABET_ASC_ORDER: + stringsAndCnts.sort(Comparator.comparing(valAndCnt -> valAndCnt.f0)); + break; + case StringIndexerParams.ALPHABET_DESC_ORDER: + stringsAndCnts.sort( + (valAndCnt1, valAndCnt2) -> + -valAndCnt1.f0.compareTo(valAndCnt2.f0)); + break; + case StringIndexerParams.FREQUENCY_ASC_ORDER: + stringsAndCnts.sort(Comparator.comparing(valAndCnt -> valAndCnt.f1)); + break; + case StringIndexerParams.FREQUENCY_DESC_ORDER: + stringsAndCnts.sort( + (valAndCnt1, valAndCnt2) -> + -valAndCnt1.f1.compareTo(valAndCnt2.f1)); + break; + case StringIndexerParams.RANDOM_ORDER: + break; + default: + throw new RuntimeException( + "Unsupported string order type: " + + stringOrderType + + ". Supported options are: [" + + StringIndexerParams.RANDOM_ORDER + + ", " + + StringIndexerParams.ALPHABET_ASC_ORDER + + ", " + + StringIndexerParams.ALPHABET_DESC_ORDER + + ", " + + StringIndexerParams.FREQUENCY_ASC_ORDER + + ", " + + StringIndexerParams.ALPHABET_DESC_ORDER + + "]."); + } + stringsArray[i] = new String[stringsAndCnts.size()]; + for (int stringId = 0; stringId < stringsArray[i].length; stringId++) { + stringsArray[i][stringId] = stringsAndCnts.get(stringId).f0; + } + } + + out.collect(new StringIndexerModelData(stringsArray)); + } + } + + /** Computes the frequency of strings in each column. */ + private static class CountStringsByColumn + implements MapPartitionFunction< + Tuple2<Integer, String>, Tuple3<Integer, String, Long>> { + private final int numCols; + + public CountStringsByColumn(int numCols) { + this.numCols = numCols; + } + + @Override + public void mapPartition( + Iterable<Tuple2<Integer, String>> values, + Collector<Tuple3<Integer, String, Long>> out) { + HashMap<String, Long>[] string2CntByColumn = new HashMap[numCols]; + for (int i = 0; i < numCols; i++) { + string2CntByColumn[i] = new HashMap<>(); + } + for (Tuple2<Integer, String> columnIdAndString : values) { + int colId = columnIdAndString.f0; + String stringVal = columnIdAndString.f1; + if (string2CntByColumn[colId].containsKey(stringVal)) { + string2CntByColumn[colId].put( + stringVal, string2CntByColumn[colId].get(stringVal) + 1); + } else { + string2CntByColumn[colId].put(stringVal, 1L); + } + } + for (int i = 0; i < numCols; i++) { + if (string2CntByColumn[i].size() != 0) { Review comment: Would it be simpler to remove the `if (...)` here? ########## File path: flink-ml-lib/src/main/java/org/apache/flink/ml/feature/stringindexer/StringIndexerParams.java ########## @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.flink.ml.feature.stringindexer; + +import org.apache.flink.ml.param.Param; +import org.apache.flink.ml.param.ParamValidators; +import org.apache.flink.ml.param.StringParam; + +/** + * Params of {@link StringIndexer}. + * + * @param <T> The class type of this instance. + */ +public interface StringIndexerParams<T> extends StringIndexerModelParams<T> { + String RANDOM_ORDER = "random"; Review comment: Strictly speaking it is not random. Random means when the program is executed multiple time with exactly the same input, the output will likely still be different. This usually happen when the code explicitly permutes the output before returning output to the caller. It seems better to name it `arbitrary`. What do you think? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
