Github user ravipesala commented on a diff in the pull request:

    https://github.com/apache/carbondata/pull/2823#discussion_r228106032
  
    --- Diff: 
integration/spark-datasource/src/main/spark2.1andspark2.2/org/apache/spark/sql/CarbonVectorProxy.java
 ---
    @@ -150,127 +140,189 @@ public void reset() {
             columnarBatch.reset();
         }
     
    -    public void putRowToColumnBatch(int rowId, Object value, int offset) {
    -        org.apache.spark.sql.types.DataType t = dataType(offset);
    -        if (null == value) {
    -            putNull(rowId, offset);
    -        } else {
    -            if (t == org.apache.spark.sql.types.DataTypes.BooleanType) {
    -                putBoolean(rowId, (boolean) value, offset);
    -            } else if (t == org.apache.spark.sql.types.DataTypes.ByteType) 
{
    -                putByte(rowId, (byte) value, offset);
    -            } else if (t == 
org.apache.spark.sql.types.DataTypes.ShortType) {
    -                putShort(rowId, (short) value, offset);
    -            } else if (t == 
org.apache.spark.sql.types.DataTypes.IntegerType) {
    -                putInt(rowId, (int) value, offset);
    -            } else if (t == org.apache.spark.sql.types.DataTypes.LongType) 
{
    -                putLong(rowId, (long) value, offset);
    -            } else if (t == 
org.apache.spark.sql.types.DataTypes.FloatType) {
    -                putFloat(rowId, (float) value, offset);
    -            } else if (t == 
org.apache.spark.sql.types.DataTypes.DoubleType) {
    -                putDouble(rowId, (double) value, offset);
    -            } else if (t == 
org.apache.spark.sql.types.DataTypes.StringType) {
    -                UTF8String v = (UTF8String) value;
    -                putByteArray(rowId, v.getBytes(), offset);
    -            } else if (t instanceof 
org.apache.spark.sql.types.DecimalType) {
    -                DecimalType dt = (DecimalType) t;
    -                Decimal d = Decimal.fromDecimal(value);
    -                if (dt.precision() <= Decimal.MAX_INT_DIGITS()) {
    -                    putInt(rowId, (int) d.toUnscaledLong(), offset);
    -                } else if (dt.precision() <= Decimal.MAX_LONG_DIGITS()) {
    -                    putLong(rowId, d.toUnscaledLong(), offset);
    -                } else {
    -                    final BigInteger integer = 
d.toJavaBigDecimal().unscaledValue();
    -                    byte[] bytes = integer.toByteArray();
    -                    putByteArray(rowId, bytes, 0, bytes.length, offset);
    +
    +    public static class ColumnVectorProxy {
    +
    +        private ColumnVector vector;
    +
    +        public ColumnVectorProxy(ColumnarBatch columnarBatch, int ordinal) 
{
    +            this.vector = columnarBatch.column(ordinal);
    +        }
    +
    +        public void putRowToColumnBatch(int rowId, Object value, int 
offset) {
    +            org.apache.spark.sql.types.DataType t = dataType(offset);
    --- End diff --
    
    ok


---

Reply via email to