spark git commit: [SPARK-18745][SQL] Fix signed integer overflow due to toInt cast
Repository: spark Updated Branches: refs/heads/branch-2.1 eb2d9bfd4 -> 562507ef0 [SPARK-18745][SQL] Fix signed integer overflow due to toInt cast ## What changes were proposed in this pull request? This PR avoids that a result of a cast `toInt` is negative due to signed integer overflow (e.g. 0x__1???L.toInt < 0 ). This PR performs casts after we can ensure the value is within range of signed integer (the result of `max(array.length, ???)` is always integer). ## How was this patch tested? Manually executed query68 of TPC-DS with 100TB Author: Kazuaki IshizakiCloses #16235 from kiszk/SPARK-18745. (cherry picked from commit d60ab5fd9b6af9aa5080a2d13b3589d8b79c5c5c) Signed-off-by: Herman van Hovell Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/562507ef Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/562507ef Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/562507ef Branch: refs/heads/branch-2.1 Commit: 562507ef038f09ff422e9831416af5119282a9d0 Parents: eb2d9bf Author: Kazuaki Ishizaki Authored: Fri Dec 9 23:13:36 2016 +0100 Committer: Herman van Hovell Committed: Fri Dec 9 23:13:50 2016 +0100 -- .../apache/spark/sql/execution/joins/HashedRelation.scala| 8 1 file changed, 4 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/562507ef/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala -- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala index 8821c0d..b9f6601 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala @@ -670,9 +670,9 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var offset: Long = Platform.LONG_ARRAY_OFFSET val end = len * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { - val size = Math.min(buffer.length, (end - offset).toInt) + val size = Math.min(buffer.length, end - offset) Platform.copyMemory(arr, offset, buffer, Platform.BYTE_ARRAY_OFFSET, size) - writeBuffer(buffer, 0, size) + writeBuffer(buffer, 0, size.toInt) offset += size } } @@ -710,8 +710,8 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var offset: Long = Platform.LONG_ARRAY_OFFSET val end = length * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { - val size = Math.min(buffer.length, (end - offset).toInt) - readBuffer(buffer, 0, size) + val size = Math.min(buffer.length, end - offset) + readBuffer(buffer, 0, size.toInt) Platform.copyMemory(buffer, Platform.BYTE_ARRAY_OFFSET, array, offset, size) offset += size } - To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org
spark git commit: [SPARK-18745][SQL] Fix signed integer overflow due to toInt cast
Repository: spark Updated Branches: refs/heads/branch-2.0 65b4b0561 -> 2c342e5a4 [SPARK-18745][SQL] Fix signed integer overflow due to toInt cast ## What changes were proposed in this pull request? This PR avoids that a result of a cast `toInt` is negative due to signed integer overflow (e.g. 0x__1???L.toInt < 0 ). This PR performs casts after we can ensure the value is within range of signed integer (the result of `max(array.length, ???)` is always integer). ## How was this patch tested? Manually executed query68 of TPC-DS with 100TB Author: Kazuaki IshizakiCloses #16235 from kiszk/SPARK-18745. (cherry picked from commit d60ab5fd9b6af9aa5080a2d13b3589d8b79c5c5c) Signed-off-by: Herman van Hovell Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/2c342e5a Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/2c342e5a Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/2c342e5a Branch: refs/heads/branch-2.0 Commit: 2c342e5a4a9c88ed1ffc2ff19dd0b2eb3b6336ac Parents: 65b4b05 Author: Kazuaki Ishizaki Authored: Fri Dec 9 23:13:36 2016 +0100 Committer: Herman van Hovell Committed: Fri Dec 9 23:14:04 2016 +0100 -- .../apache/spark/sql/execution/joins/HashedRelation.scala| 8 1 file changed, 4 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/2c342e5a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala -- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala index 8821c0d..b9f6601 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala @@ -670,9 +670,9 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var offset: Long = Platform.LONG_ARRAY_OFFSET val end = len * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { - val size = Math.min(buffer.length, (end - offset).toInt) + val size = Math.min(buffer.length, end - offset) Platform.copyMemory(arr, offset, buffer, Platform.BYTE_ARRAY_OFFSET, size) - writeBuffer(buffer, 0, size) + writeBuffer(buffer, 0, size.toInt) offset += size } } @@ -710,8 +710,8 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var offset: Long = Platform.LONG_ARRAY_OFFSET val end = length * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { - val size = Math.min(buffer.length, (end - offset).toInt) - readBuffer(buffer, 0, size) + val size = Math.min(buffer.length, end - offset) + readBuffer(buffer, 0, size.toInt) Platform.copyMemory(buffer, Platform.BYTE_ARRAY_OFFSET, array, offset, size) offset += size } - To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org
spark git commit: [SPARK-18745][SQL] Fix signed integer overflow due to toInt cast
Repository: spark Updated Branches: refs/heads/master b08b50045 -> d60ab5fd9 [SPARK-18745][SQL] Fix signed integer overflow due to toInt cast ## What changes were proposed in this pull request? This PR avoids that a result of a cast `toInt` is negative due to signed integer overflow (e.g. 0x__1???L.toInt < 0 ). This PR performs casts after we can ensure the value is within range of signed integer (the result of `max(array.length, ???)` is always integer). ## How was this patch tested? Manually executed query68 of TPC-DS with 100TB Author: Kazuaki IshizakiCloses #16235 from kiszk/SPARK-18745. Project: http://git-wip-us.apache.org/repos/asf/spark/repo Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/d60ab5fd Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/d60ab5fd Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/d60ab5fd Branch: refs/heads/master Commit: d60ab5fd9b6af9aa5080a2d13b3589d8b79c5c5c Parents: b08b500 Author: Kazuaki Ishizaki Authored: Fri Dec 9 23:13:36 2016 +0100 Committer: Herman van Hovell Committed: Fri Dec 9 23:13:36 2016 +0100 -- .../apache/spark/sql/execution/joins/HashedRelation.scala| 8 1 file changed, 4 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/spark/blob/d60ab5fd/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala -- diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala index 8821c0d..b9f6601 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/joins/HashedRelation.scala @@ -670,9 +670,9 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var offset: Long = Platform.LONG_ARRAY_OFFSET val end = len * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { - val size = Math.min(buffer.length, (end - offset).toInt) + val size = Math.min(buffer.length, end - offset) Platform.copyMemory(arr, offset, buffer, Platform.BYTE_ARRAY_OFFSET, size) - writeBuffer(buffer, 0, size) + writeBuffer(buffer, 0, size.toInt) offset += size } } @@ -710,8 +710,8 @@ private[execution] final class LongToUnsafeRowMap(val mm: TaskMemoryManager, cap var offset: Long = Platform.LONG_ARRAY_OFFSET val end = length * 8L + Platform.LONG_ARRAY_OFFSET while (offset < end) { - val size = Math.min(buffer.length, (end - offset).toInt) - readBuffer(buffer, 0, size) + val size = Math.min(buffer.length, end - offset) + readBuffer(buffer, 0, size.toInt) Platform.copyMemory(buffer, Platform.BYTE_ARRAY_OFFSET, array, offset, size) offset += size } - To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org