spark git commit: [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans
Repository: spark Updated Branches: refs/heads/branch-2.0 9070bd31c -> 759bd4a6a [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans Remove this reference. (cherry picked from commit 70dfdcbbf11c9c3174abc111afa2250236e31af2) Signed-off-by: Herman van Hovell Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/759bd4a6 Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/759bd4a6 Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/759bd4a6 Branch: refs/heads/branch-2.0 Commit: 759bd4a6a7ea83b654089c6bd1d1574c709ca35f Parents: 9070bd3 Author: Herman van Hovell Authored: Mon Nov 28 04:41:43 2016 -0800 Committer: Herman van Hovell Committed: Mon Nov 28 04:46:32 2016 -0800 -- .../apache/spark/sql/catalyst/expressions/objects/objects.scala| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/759bd4a6/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala -- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 8043475..c17c807 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -695,7 +695,7 @@ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Exp val fieldGen = fieldValue.genCode(ctx) s""" ${fieldGen.code} - this.${javaBeanInstance}.$setterMethod(${fieldGen.value}); + ${javaBeanInstance}.$setterMethod(${fieldGen.value}); """ } val initializeCode = ctx.splitExpressions(ctx.INPUT_ROW, initialize.toSeq) - To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org
spark git commit: [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans
Repository: spark Updated Branches: refs/heads/branch-2.1 712bd5abc -> e449f7546 [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans Remove this reference. (cherry picked from commit 70dfdcbbf11c9c3174abc111afa2250236e31af2) Signed-off-by: Herman van Hovell Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/e449f754 Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/e449f754 Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/e449f754 Branch: refs/heads/branch-2.1 Commit: e449f7546897c5f29075e6a0913a5a6106bcbb5f Parents: 712bd5a Author: Herman van Hovell Authored: Mon Nov 28 04:41:43 2016 -0800 Committer: Herman van Hovell Committed: Mon Nov 28 04:45:23 2016 -0800 -- .../apache/spark/sql/catalyst/expressions/objects/objects.scala| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/e449f754/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala -- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 6952f54..e517ec1 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -905,7 +905,7 @@ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Exp val fieldGen = fieldValue.genCode(ctx) s""" ${fieldGen.code} - this.${javaBeanInstance}.$setterMethod(${fieldGen.value}); + ${javaBeanInstance}.$setterMethod(${fieldGen.value}); """ } val initializeCode = ctx.splitExpressions(ctx.INPUT_ROW, initialize.toSeq) - To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org
spark git commit: [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans\nRemove this reference.
Repository: spark Updated Branches: refs/heads/master f075cd9cb -> 70dfdcbbf [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans\nRemove this reference. Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/70dfdcbb Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/70dfdcbb Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/70dfdcbb Branch: refs/heads/master Commit: 70dfdcbbf11c9c3174abc111afa2250236e31af2 Parents: f075cd9 Author: Herman van Hovell Authored: Mon Nov 28 04:41:43 2016 -0800 Committer: Herman van Hovell Committed: Mon Nov 28 04:41:43 2016 -0800 -- .../apache/spark/sql/catalyst/expressions/objects/objects.scala| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/70dfdcbb/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala -- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 6952f54..e517ec1 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -905,7 +905,7 @@ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Exp val fieldGen = fieldValue.genCode(ctx) s""" ${fieldGen.code} - this.${javaBeanInstance}.$setterMethod(${fieldGen.value}); + ${javaBeanInstance}.$setterMethod(${fieldGen.value}); """ } val initializeCode = ctx.splitExpressions(ctx.INPUT_ROW, initialize.toSeq) - To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org
spark git commit: [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans
Repository: spark Updated Branches: refs/heads/branch-2.1 d6e027e61 -> 712bd5abc [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans ## What changes were proposed in this pull request? This PR avoids a compilation error due to more than 64KB Java byte code size. This error occur since generated java code `SpecificSafeProjection.apply()` for nested JavaBeans is too big. This PR avoids this compilation error by splitting a big code chunk into multiple methods by calling `CodegenContext.splitExpression` at `InitializeJavaBean.doGenCode` An object reference for JavaBean is stored to an instance variable `javaBean...`. Then, the instance variable will be referenced in the split methods. Generated code with this PR /* 22098 */ private void apply130_0(InternalRow i) { ... /* 22125 */ boolean isNull238 = i.isNullAt(2); /* 22126 */ InternalRow value238 = isNull238 ? null : (i.getStruct(2, 3)); /* 22127 */ boolean isNull236 = false; /* 22128 */ test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value236 = null; /* 22129 */ if (!false && isNull238) { /* 22130 */ /* 22131 */ final test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value239 = null; /* 22132 */ isNull236 = true; /* 22133 */ value236 = value239; /* 22134 */ } else { /* 22135 */ /* 22136 */ final test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value241 = false ? null : new test.org.apache.spark.sql.JavaDatasetSuite$Nesting1(); /* 22137 */ this.javaBean14 = value241; /* 22138 */ if (!false) { /* 22139 */ apply25_0(i); /* 22140 */ apply25_1(i); /* 22141 */ apply25_2(i); /* 22142 */ } /* 22143 */ isNull236 = false; /* 22144 */ value236 = value241; /* 22145 */ } /* 22146 */ this.javaBean.setField2(value236); /* 22147 */ /* 22148 */ } ... /* 22928 */ public java.lang.Object apply(java.lang.Object _i) { /* 22929 */ InternalRow i = (InternalRow) _i; /* 22930 */ /* 22931 */ final test.org.apache.spark.sql.JavaDatasetSuite$NestedComplicatedJavaBean value1 = false ? null : new test.org.apache.spark.sql.JavaDatasetSuite$NestedComplicatedJavaBean(); /* 22932 */ this.javaBean = value1; /* 22933 */ if (!false) { /* 22934 */ apply130_0(i); /* 22935 */ apply130_1(i); /* 22936 */ apply130_2(i); /* 22937 */ apply130_3(i); /* 22938 */ apply130_4(i); /* 22939 */ } /* 22940 */ if (false) { /* 22941 */ mutableRow.setNullAt(0); /* 22942 */ } else { /* 22943 */ /* 22944 */ mutableRow.update(0, value1); /* 22945 */ } /* 22946 */ /* 22947 */ return mutableRow; /* 22948 */ } ## How was this patch tested? added a test suite into `JavaDatasetSuite.java` Author: Kazuaki Ishizaki Closes #16032 from kiszk/SPARK-18118. (cherry picked from commit f075cd9cb7157819df9aec67baee8913c4ed5c53) Signed-off-by: Herman van Hovell Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/712bd5ab Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/712bd5ab Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/712bd5ab Branch: refs/heads/branch-2.1 Commit: 712bd5abc827c4eaf3f53bfc9155c8535584ca96 Parents: d6e027e Author: Kazuaki Ishizaki Authored: Mon Nov 28 04:18:35 2016 -0800 Committer: Herman van Hovell Committed: Mon Nov 28 04:18:46 2016 -0800 -- .../catalyst/expressions/objects/objects.scala | 10 +- .../org/apache/spark/sql/JavaDatasetSuite.java | 429 +++ 2 files changed, 437 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/712bd5ab/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala -- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 5c27179..6952f54 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -896,19 +896,25 @@ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Exp override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val instanceGen = beanInstance.genCode(ctx) +val javaBeanInstance = ctx.freshName("javaBean") +val beanInstanceJavaType = ctx.javaType(beanInstance.dataType) +ctx.addMutableState(beanInstanceJavaType, javaBeanInstance, "") + val initialize = setters.map { case (setterMethod, fieldValue) => val fieldGen = fieldValue.genCode(ctx) s""" ${fiel
spark git commit: [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans
Repository: spark Updated Branches: refs/heads/branch-2.0 e67ce4837 -> 9070bd31c [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans ## What changes were proposed in this pull request? This PR avoids a compilation error due to more than 64KB Java byte code size. This error occur since generated java code `SpecificSafeProjection.apply()` for nested JavaBeans is too big. This PR avoids this compilation error by splitting a big code chunk into multiple methods by calling `CodegenContext.splitExpression` at `InitializeJavaBean.doGenCode` An object reference for JavaBean is stored to an instance variable `javaBean...`. Then, the instance variable will be referenced in the split methods. Generated code with this PR /* 22098 */ private void apply130_0(InternalRow i) { ... /* 22125 */ boolean isNull238 = i.isNullAt(2); /* 22126 */ InternalRow value238 = isNull238 ? null : (i.getStruct(2, 3)); /* 22127 */ boolean isNull236 = false; /* 22128 */ test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value236 = null; /* 22129 */ if (!false && isNull238) { /* 22130 */ /* 22131 */ final test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value239 = null; /* 22132 */ isNull236 = true; /* 22133 */ value236 = value239; /* 22134 */ } else { /* 22135 */ /* 22136 */ final test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value241 = false ? null : new test.org.apache.spark.sql.JavaDatasetSuite$Nesting1(); /* 22137 */ this.javaBean14 = value241; /* 22138 */ if (!false) { /* 22139 */ apply25_0(i); /* 22140 */ apply25_1(i); /* 22141 */ apply25_2(i); /* 22142 */ } /* 22143 */ isNull236 = false; /* 22144 */ value236 = value241; /* 22145 */ } /* 22146 */ this.javaBean.setField2(value236); /* 22147 */ /* 22148 */ } ... /* 22928 */ public java.lang.Object apply(java.lang.Object _i) { /* 22929 */ InternalRow i = (InternalRow) _i; /* 22930 */ /* 22931 */ final test.org.apache.spark.sql.JavaDatasetSuite$NestedComplicatedJavaBean value1 = false ? null : new test.org.apache.spark.sql.JavaDatasetSuite$NestedComplicatedJavaBean(); /* 22932 */ this.javaBean = value1; /* 22933 */ if (!false) { /* 22934 */ apply130_0(i); /* 22935 */ apply130_1(i); /* 22936 */ apply130_2(i); /* 22937 */ apply130_3(i); /* 22938 */ apply130_4(i); /* 22939 */ } /* 22940 */ if (false) { /* 22941 */ mutableRow.setNullAt(0); /* 22942 */ } else { /* 22943 */ /* 22944 */ mutableRow.update(0, value1); /* 22945 */ } /* 22946 */ /* 22947 */ return mutableRow; /* 22948 */ } ## How was this patch tested? added a test suite into `JavaDatasetSuite.java` Author: Kazuaki Ishizaki Closes #16032 from kiszk/SPARK-18118. (cherry picked from commit f075cd9cb7157819df9aec67baee8913c4ed5c53) Signed-off-by: Herman van Hovell Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/9070bd31 Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/9070bd31 Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/9070bd31 Branch: refs/heads/branch-2.0 Commit: 9070bd31c243d74d3c28c5208bc11e41876590ca Parents: e67ce48 Author: Kazuaki Ishizaki Authored: Mon Nov 28 04:18:35 2016 -0800 Committer: Herman van Hovell Committed: Mon Nov 28 04:19:01 2016 -0800 -- .../catalyst/expressions/objects/objects.scala | 10 +- .../org/apache/spark/sql/JavaDatasetSuite.java | 429 +++ 2 files changed, 437 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/9070bd31/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala -- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index d9c29b3..8043475 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -686,19 +686,25 @@ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Exp override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val instanceGen = beanInstance.genCode(ctx) +val javaBeanInstance = ctx.freshName("javaBean") +val beanInstanceJavaType = ctx.javaType(beanInstance.dataType) +ctx.addMutableState(beanInstanceJavaType, javaBeanInstance, "") + val initialize = setters.map { case (setterMethod, fieldValue) => val fieldGen = fieldValue.genCode(ctx) s""" ${fiel
spark git commit: [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans
Repository: spark Updated Branches: refs/heads/master 454b80499 -> f075cd9cb [SPARK-18118][SQL] fix a compilation error due to nested JavaBeans ## What changes were proposed in this pull request? This PR avoids a compilation error due to more than 64KB Java byte code size. This error occur since generated java code `SpecificSafeProjection.apply()` for nested JavaBeans is too big. This PR avoids this compilation error by splitting a big code chunk into multiple methods by calling `CodegenContext.splitExpression` at `InitializeJavaBean.doGenCode` An object reference for JavaBean is stored to an instance variable `javaBean...`. Then, the instance variable will be referenced in the split methods. Generated code with this PR /* 22098 */ private void apply130_0(InternalRow i) { ... /* 22125 */ boolean isNull238 = i.isNullAt(2); /* 22126 */ InternalRow value238 = isNull238 ? null : (i.getStruct(2, 3)); /* 22127 */ boolean isNull236 = false; /* 22128 */ test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value236 = null; /* 22129 */ if (!false && isNull238) { /* 22130 */ /* 22131 */ final test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value239 = null; /* 22132 */ isNull236 = true; /* 22133 */ value236 = value239; /* 22134 */ } else { /* 22135 */ /* 22136 */ final test.org.apache.spark.sql.JavaDatasetSuite$Nesting1 value241 = false ? null : new test.org.apache.spark.sql.JavaDatasetSuite$Nesting1(); /* 22137 */ this.javaBean14 = value241; /* 22138 */ if (!false) { /* 22139 */ apply25_0(i); /* 22140 */ apply25_1(i); /* 22141 */ apply25_2(i); /* 22142 */ } /* 22143 */ isNull236 = false; /* 22144 */ value236 = value241; /* 22145 */ } /* 22146 */ this.javaBean.setField2(value236); /* 22147 */ /* 22148 */ } ... /* 22928 */ public java.lang.Object apply(java.lang.Object _i) { /* 22929 */ InternalRow i = (InternalRow) _i; /* 22930 */ /* 22931 */ final test.org.apache.spark.sql.JavaDatasetSuite$NestedComplicatedJavaBean value1 = false ? null : new test.org.apache.spark.sql.JavaDatasetSuite$NestedComplicatedJavaBean(); /* 22932 */ this.javaBean = value1; /* 22933 */ if (!false) { /* 22934 */ apply130_0(i); /* 22935 */ apply130_1(i); /* 22936 */ apply130_2(i); /* 22937 */ apply130_3(i); /* 22938 */ apply130_4(i); /* 22939 */ } /* 22940 */ if (false) { /* 22941 */ mutableRow.setNullAt(0); /* 22942 */ } else { /* 22943 */ /* 22944 */ mutableRow.update(0, value1); /* 22945 */ } /* 22946 */ /* 22947 */ return mutableRow; /* 22948 */ } ## How was this patch tested? added a test suite into `JavaDatasetSuite.java` Author: Kazuaki Ishizaki Closes #16032 from kiszk/SPARK-18118. Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/f075cd9c Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/f075cd9c Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/f075cd9c Branch: refs/heads/master Commit: f075cd9cb7157819df9aec67baee8913c4ed5c53 Parents: 454b804 Author: Kazuaki Ishizaki Authored: Mon Nov 28 04:18:35 2016 -0800 Committer: Herman van Hovell Committed: Mon Nov 28 04:18:35 2016 -0800 -- .../catalyst/expressions/objects/objects.scala | 10 +- .../org/apache/spark/sql/JavaDatasetSuite.java | 429 +++ 2 files changed, 437 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/f075cd9c/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala -- diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala index 5c27179..6952f54 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala @@ -896,19 +896,25 @@ case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Exp override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val instanceGen = beanInstance.genCode(ctx) +val javaBeanInstance = ctx.freshName("javaBean") +val beanInstanceJavaType = ctx.javaType(beanInstance.dataType) +ctx.addMutableState(beanInstanceJavaType, javaBeanInstance, "") + val initialize = setters.map { case (setterMethod, fieldValue) => val fieldGen = fieldValue.genCode(ctx) s""" ${fieldGen.code} - ${instanceGen.value}.$setterMethod(${fieldGen.value}); + this.${javaBeanInstanc