okumin commented on code in PR #5624: URL: https://github.com/apache/hive/pull/5624#discussion_r2177419232
########## ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java: ########## @@ -96,53 +97,51 @@ private Text evaluateInternal(Text t, int pos, int len) { return r; } - String s = t.toString(); - int[] index = makeIndex(pos, len, s.length()); - if (index == null) { + byte[] utf8String = t.toString().getBytes(); + StringSubstrColStartLen.populateSubstrOffsets(utf8String, 0, utf8String.length, adjustStartPos(pos), len, index); + if (index[0] == -1) { return r; } - r.set(s.substring(index[0], index[1])); + r.set(new String(utf8String, index[0], index[1])); return r; } - private int[] makeIndex(int pos, int len, int inputLen) { - if ((Math.abs(pos) > inputLen)) { - return null; - } - - int start, end; + private Text evaluateInternal(Text t, int pos) { + r.clear(); - if (pos > 0) { - start = pos - 1; - } else if (pos < 0) { - start = inputLen + pos; - } else { - start = 0; + byte[] utf8String = t.toString().getBytes(); Review Comment: Ditto ########## ql/src/test/queries/clientpositive/udf_substr.q: ########## @@ -89,3 +89,11 @@ FROM src tablesample (1 rows); SELECT substr('ABC', cast(2147483649 as bigint)) FROM src tablesample (1 rows); + +--test 4-byte charactor +set hive.vectorized.execution.enabled=false; +SELECT + substr('あa🤎いiうu', 1, 3) as b1, + substr('あa🤎いiうu', 3) as b2, + substr('あa🤎いiうu', -5) as b3 +FROM src tablesample (1 rows); Review Comment: I verified the master branch can't pass this test case ``` +POSTHOOK: query: SELECT + substr('あa🤎いiうu', 1, 3) as b1, + substr('あa🤎いiうu', 3) as b2, + substr('あa🤎いiうu', -5) as b3 +FROM src tablesample (1 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +あa? 🤎いiうu ?いiうu ``` ########## ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java: ########## @@ -96,53 +97,51 @@ private Text evaluateInternal(Text t, int pos, int len) { return r; } - String s = t.toString(); - int[] index = makeIndex(pos, len, s.length()); - if (index == null) { + byte[] utf8String = t.toString().getBytes(); Review Comment: Why not use `Text#getBytes` or `Text#copyBytes`? Probably, `getBytes` is preferable because we don't mutate it. Note that we have to use `Text#getLength` instead of the size of the byte array. https://github.com/apache/hadoop/blob/rel/release-3.4.1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java#L116-L132 ########## ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java: ########## @@ -96,53 +97,51 @@ private Text evaluateInternal(Text t, int pos, int len) { return r; } - String s = t.toString(); - int[] index = makeIndex(pos, len, s.length()); - if (index == null) { + byte[] utf8String = t.toString().getBytes(); + StringSubstrColStartLen.populateSubstrOffsets(utf8String, 0, utf8String.length, adjustStartPos(pos), len, index); + if (index[0] == -1) { return r; } - r.set(s.substring(index[0], index[1])); + r.set(new String(utf8String, index[0], index[1])); return r; } - private int[] makeIndex(int pos, int len, int inputLen) { - if ((Math.abs(pos) > inputLen)) { - return null; - } - - int start, end; + private Text evaluateInternal(Text t, int pos) { + r.clear(); - if (pos > 0) { - start = pos - 1; - } else if (pos < 0) { - start = inputLen + pos; - } else { - start = 0; + byte[] utf8String = t.toString().getBytes(); + int offset = StringSubstrColStart.getSubstrStartOffset(utf8String, 0, utf8String.length, adjustStartPos(pos)); + if (offset == -1) { + return r; } - if ((inputLen - start) < len) { - end = inputLen; - } else { - end = start + len; - } - index[0] = start; - index[1] = end; - return index; + r.set(new String(utf8String, offset, utf8String.length - offset)); Review Comment: Ditto ########## ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java: ########## @@ -172,32 +171,59 @@ public BytesWritable evaluate(BytesWritable bw, IntWritable pos, IntWritable len } private BytesWritable evaluateInternal(BytesWritable bw, int pos, int len) { - if (len <= 0) { return new BytesWritable(); } - int[] index = makeIndex(pos, len, bw.getLength()); - if (index == null) { + // Even though we are using longs, substr can only deal with ints, so we use + // the maximum int value as the maxValue + StringSubstrColStartLen.populateSubstrOffsets(bw.getBytes(), 0, bw.getLength(), adjustStartPos(pos), len, index); Review Comment: `StringSubstrColStartLen.populateSubstrOffsets` assumes the byte array as UTF-8 and `substrStart` and `substrLength` as the position of UTF-8. However, `UDFSubstr#evaluate(ByteWritable)` seems to handle the byte array as a pure byte array without assuming any encoding. So, we don't need to update the behavior, I think. ########## ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java: ########## @@ -96,53 +97,51 @@ private Text evaluateInternal(Text t, int pos, int len) { return r; } - String s = t.toString(); - int[] index = makeIndex(pos, len, s.length()); - if (index == null) { + byte[] utf8String = t.toString().getBytes(); + StringSubstrColStartLen.populateSubstrOffsets(utf8String, 0, utf8String.length, adjustStartPos(pos), len, index); + if (index[0] == -1) { return r; } - r.set(s.substring(index[0], index[1])); + r.set(new String(utf8String, index[0], index[1])); Review Comment: I understand the previous `index[1]` holds the end index, but the current one has the length. Let's use `new String(utf8, index[0], index[1], StandardCharsets.UTF-8)` because the current one relies on Java's configuration. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: gitbox-unsubscr...@hive.apache.org For additional commands, e-mail: gitbox-h...@hive.apache.org