Github user davies commented on the pull request:

    https://github.com/apache/spark/pull/11032#issuecomment-178846031
  
    ```
    /* 001 */
    /* 002 */ public Object generate(Object[] references) {
    /* 003 */   return new GeneratedIterator(references);
    /* 004 */ }
    /* 005 */
    /* 006 */ class GeneratedIterator extends 
org.apache.spark.sql.execution.BufferedRowIterator {
    /* 007 */
    /* 008 */   private Object[] references;
    /* 009 */   private boolean agg_initAgg;
    /* 010 */   private boolean agg_bufIsNull;
    /* 011 */   private long agg_bufValue;
    /* 012 */   private UnsafeRow agg_result;
    /* 013 */   private 
org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
    /* 014 */   private 
org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
    /* 015 */
    /* 016 */   public GeneratedIterator(Object[] references) {
    /* 017 */     this.references = references;
    /* 018 */     agg_initAgg = false;
    /* 019 */
    /* 020 */     agg_result = new UnsafeRow(1);
    /* 021 */     this.agg_holder = new 
org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
    /* 022 */     this.agg_rowWriter = new 
org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 
1);
    /* 023 */   }
    /* 024 */
    /* 025 */   private void agg_doAggregateWithoutKey() throws 
java.io.IOException {
    /* 026 */     // initialize aggregation buffer
    /* 027 */
    /* 028 */     agg_bufIsNull = false;
    /* 029 */     agg_bufValue = 0L;
    /* 030 */
    /* 031 */     while (input.hasNext()) {
    /* 032 */       InternalRow inputadapter_row = (InternalRow) input.next();
    /* 033 */       /* input[0, bigint] */
    /* 034 */       boolean inputadapter_isNull = inputadapter_row.isNullAt(0);
    /* 035 */       long inputadapter_value = inputadapter_isNull ? -1L : 
(inputadapter_row.getLong(0));
    /* 036 */       // do aggregate
    /* 037 */       /* (input[0, bigint] + input[1, bigint]) */
    /* 038 */       long agg_value3 = -1L;
    /* 039 */       agg_value3 = agg_bufValue + inputadapter_value;
    /* 040 */       // update aggregation buffer
    /* 041 */       agg_bufIsNull = false;
    /* 042 */       agg_bufValue = agg_value3;
    /* 043 */     }
    /* 044 */
    /* 045 */   }
    /* 046 */
    /* 047 */   protected void processNext() throws java.io.IOException {
    /* 048 */     if (!agg_initAgg) {
    /* 049 */       agg_initAgg = true;
    /* 050 */       agg_doAggregateWithoutKey();
    /* 051 */
    /* 052 */       // output the result
    /* 053 */
    /* 054 */       agg_rowWriter.zeroOutNullBytes();
    /* 055 */
    /* 056 */       if (agg_bufIsNull) {
    /* 057 */         agg_rowWriter.setNullAt(0);
    /* 058 */       } else {
    /* 059 */         agg_rowWriter.write(0, agg_bufValue);
    /* 060 */       }
    /* 061 */       currentRow = agg_result;
    /* 062 */       return;
    /* 063 */     }
    /* 064 */   }
    /* 065 */ }
    /* 066 */
    
    /* 001 */
    /* 002 */ public Object generate(Object[] references) {
    /* 003 */   return new GeneratedIterator(references);
    /* 004 */ }
    /* 005 */
    /* 006 */ class GeneratedIterator extends 
org.apache.spark.sql.execution.BufferedRowIterator {
    /* 007 */
    /* 008 */   private Object[] references;
    /* 009 */   private boolean agg_initAgg;
    /* 010 */   private boolean agg_bufIsNull;
    /* 011 */   private long agg_bufValue;
    /* 012 */   private boolean range_initRange;
    /* 013 */   private long range_partitionEnd;
    /* 014 */   private long range_number;
    /* 015 */   private boolean range_overflow;
    /* 016 */   private UnsafeRow agg_result;
    /* 017 */   private 
org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder agg_holder;
    /* 018 */   private 
org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter agg_rowWriter;
    /* 019 */
    /* 020 */   public GeneratedIterator(Object[] references) {
    /* 021 */     this.references = references;
    /* 022 */     agg_initAgg = false;
    /* 023 */
    /* 024 */     range_initRange = false;
    /* 025 */     range_partitionEnd = 0L;
    /* 026 */     range_number = 0L;
    /* 027 */     range_overflow = false;
    /* 028 */     agg_result = new UnsafeRow(1);
    /* 029 */     this.agg_holder = new 
org.apache.spark.sql.catalyst.expressions.codegen.BufferHolder(agg_result, 0);
    /* 030 */     this.agg_rowWriter = new 
org.apache.spark.sql.catalyst.expressions.codegen.UnsafeRowWriter(agg_holder, 
1);
    /* 031 */   }
    /* 032 */
    /* 033 */   private void agg_doAggregateWithoutKey() throws 
java.io.IOException {
    /* 034 */     // initialize aggregation buffer
    /* 035 */
    /* 036 */     agg_bufIsNull = false;
    /* 037 */     agg_bufValue = 0L;
    /* 038 */
    /* 039 */     // initialize Range
    /* 040 */     if (!range_initRange) {
    /* 041 */       range_initRange = true;
    /* 042 */       if (input.hasNext()) {
    /* 043 */         initRange(((InternalRow) input.next()).getInt(0));
    /* 044 */       } else {
    /* 045 */         return;
    /* 046 */       }
    /* 047 */     }
    /* 048 */
    /* 049 */     while (!range_overflow && range_number < range_partitionEnd) {
    /* 050 */       long range_value = range_number;
    /* 051 */       range_number += 1L;
    /* 052 */       if (range_number < range_value ^ 1L < 0) {
    /* 053 */         range_overflow = true;
    /* 054 */       }
    /* 055 */
    /* 056 */       /* ((input[0, bigint] & 1) = 1) */
    /* 057 */       /* (input[0, bigint] & 1) */
    /* 058 */       long filter_value1 = -1L;
    /* 059 */       filter_value1 = range_value & 1L;
    /* 060 */
    /* 061 */       boolean filter_value = false;
    /* 062 */       filter_value = filter_value1 == 1L;
    /* 063 */       if ( filter_value) {
    /* 064 */
    /* 065 */         // do aggregate
    /* 066 */         /* (input[0, bigint] + 1) */
    /* 067 */         long agg_value1 = -1L;
    /* 068 */         agg_value1 = agg_bufValue + 1L;
    /* 069 */         // update aggregation buffer
    /* 070 */         agg_bufIsNull = false;
    /* 071 */         agg_bufValue = agg_value1;
    /* 072 */
    /* 073 */       }
    /* 074 */
    /* 075 */     }
    /* 076 */
    /* 077 */   }
    /* 078 */
    /* 079 */   private void initRange(int idx) {
    /* 080 */     java.math.BigInteger index = 
java.math.BigInteger.valueOf(idx);
    /* 081 */     java.math.BigInteger numSlice = 
java.math.BigInteger.valueOf(1L);
    /* 082 */     java.math.BigInteger numElement = 
java.math.BigInteger.valueOf(209715200L);
    /* 083 */     java.math.BigInteger step = java.math.BigInteger.valueOf(1L);
    /* 084 */     java.math.BigInteger start = java.math.BigInteger.valueOf(0L);
    /* 085 */
    /* 086 */     java.math.BigInteger st = 
index.multiply(numElement).divide(numSlice).multiply(step).add(start);
    /* 087 */     if 
(st.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
    /* 088 */       range_number = Long.MAX_VALUE;
    /* 089 */     } else if 
(st.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
    /* 090 */       range_number = Long.MIN_VALUE;
    /* 091 */     } else {
    /* 092 */       range_number = st.longValue();
    /* 093 */     }
    /* 094 */
    /* 095 */     java.math.BigInteger end = 
index.add(java.math.BigInteger.ONE).multiply(numElement).divide(numSlice)
    /* 096 */     .multiply(step).add(start);
    /* 097 */     if 
(end.compareTo(java.math.BigInteger.valueOf(Long.MAX_VALUE)) > 0) {
    /* 098 */       range_partitionEnd = Long.MAX_VALUE;
    /* 099 */     } else if 
(end.compareTo(java.math.BigInteger.valueOf(Long.MIN_VALUE)) < 0) {
    /* 100 */       range_partitionEnd = Long.MIN_VALUE;
    /* 101 */     } else {
    /* 102 */       range_partitionEnd = end.longValue();
    /* 103 */     }
    /* 104 */   }
    /* 105 */
    /* 106 */   protected void processNext() throws java.io.IOException {
    /* 107 */     if (!agg_initAgg) {
    /* 108 */       agg_initAgg = true;
    /* 109 */       agg_doAggregateWithoutKey();
    /* 110 */
    /* 111 */       // output the result
    /* 112 */
    /* 113 */       agg_rowWriter.zeroOutNullBytes();
    /* 114 */
    /* 115 */       if (agg_bufIsNull) {
    /* 116 */         agg_rowWriter.setNullAt(0);
    /* 117 */       } else {
    /* 118 */         agg_rowWriter.write(0, agg_bufValue);
    /* 119 */       }
    /* 120 */       currentRow = agg_result;
    /* 121 */       return;
    /* 122 */     }
    /* 123 */   }
    /* 124 */ }
    /* 125 */
    ```


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to