Github user kiszk commented on a diff in the pull request: https://github.com/apache/spark/pull/21045#discussion_r181489957 --- Diff: sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala --- @@ -87,6 +87,62 @@ case class MapKeys(child: Expression) override def prettyName: String = "map_keys" } +@ExpressionDescription( + usage = """_FUNC_(a1, a2) - Returns a merged array matching N-th element of first + array with the N-th element of second.""", + examples = """ + Examples + > SELECT _FUNC_(array(1, 2, 3), array(2, 3, 4)); + [[1, 2], [2, 3], [3, 4]] + """, + since = "2.4.0") +case class Zip(left: Expression, right: Expression) + extends BinaryExpression with ExpectsInputTypes { + + override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType, ArrayType) + + override def dataType: DataType = ArrayType(left.dataType.asInstanceOf[ArrayType].elementType) + + override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { + nullSafeCodeGen(ctx, ev, (arr1, arr2) => { + val i = ctx.freshName("i") + s""" + for (int $i = 0; $i < $arr1.numElements(); $i ++) { + if ($arr1.isNullAt($i)) { + ${ev.isNull} = true; + } else { + ${ev.value}[$i] = ($arr1[$i], $arr2[$i]); --- End diff -- I might be wrong. We have the similar discussion [here](https://github.com/apache/spark/pull/21061#discussion_r181399858). If we need to generate Java code, how about writing runtime routine in scala and calling the routine from Java?
--- --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org