This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 4216b03ed4e [enhance](parquet) Support BYTE_STREAM_SPLIT encoding for 
parquet reader (#41683)
4216b03ed4e is described below

commit 4216b03ed4e6fe73607beac509f0b56bd3767240
Author: Socrates <[email protected]>
AuthorDate: Mon Oct 14 14:14:25 2024 +0800

    [enhance](parquet) Support BYTE_STREAM_SPLIT encoding for parquet reader 
(#41683)
    
    ## Proposed changes
    Impl ByteStreamSplitDecoder to decode BYTE_STREAM_SPLIT encoding
    parquet.
    relate pr: https://github.com/apache/arrow/issues/42372
    
    > Apache Parquet does not have any encodings suitable for FP data and
    the available text compressors (zstd, gzip, etc) do not handle FP data
    very well.
    It is possible to apply a simple data transformation named "stream
    splitting". Such could be "byte stream splitting" which creates K
    streams of length N where K is the number of bytes in the data type (4
    for floats, 8 for doubles) and N is the number of elements in the
    sequence.
    
    ---------
    
    Co-authored-by: morningman <[email protected]>
---
 be/src/util/byte_stream_split.cpp                  | 119 +++++++++++++++++++++
 be/src/util/byte_stream_split.h                    |  37 +++++++
 .../format/parquet/byte_stream_split_decoder.cpp   |  95 ++++++++++++++++
 .../format/parquet/byte_stream_split_decoder.h     |  38 +++++++
 be/src/vec/exec/format/parquet/decoder.cpp         |  17 ++-
 be/test/util/byte_stream_split_test.cpp            |  95 ++++++++++++++++
 .../tvf/test_hdfs_parquet_group0.out               | Bin 22377 -> 23993 bytes
 .../tvf/test_hdfs_parquet_group4.out               | Bin 106165 -> 106266 bytes
 .../tvf/test_hdfs_parquet_group0.groovy            |  10 +-
 .../tvf/test_hdfs_parquet_group4.groovy            |   5 +-
 10 files changed, 403 insertions(+), 13 deletions(-)

diff --git a/be/src/util/byte_stream_split.cpp 
b/be/src/util/byte_stream_split.cpp
new file mode 100644
index 00000000000..0e0fc9257e1
--- /dev/null
+++ b/be/src/util/byte_stream_split.cpp
@@ -0,0 +1,119 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "byte_stream_split.h"
+
+#include <glog/logging.h>
+
+#include <array>
+#include <cstring>
+#include <vector>
+
+#include "gutil/port.h"
+
+namespace doris {
+
+inline void do_merge_streams(const uint8_t** src_streams, int width, int64_t 
nvalues,
+                             uint8_t* dest) {
+    // Value empirically chosen to provide the best performance on the 
author's machine
+    constexpr int kBlockSize = 128;
+
+    while (nvalues >= kBlockSize) {
+        for (int stream = 0; stream < width; ++stream) {
+            // Take kBlockSize bytes from the given stream and spread them
+            // to their logical places in destination.
+            const uint8_t* src = src_streams[stream];
+            for (int i = 0; i < kBlockSize; i += 8) {
+                uint64_t v;
+                std::memcpy(&v, src + i, sizeof(v));
+#ifdef IS_LITTLE_ENDIAN
+                dest[stream + i * width] = static_cast<uint8_t>(v);
+                dest[stream + (i + 1) * width] = static_cast<uint8_t>(v >> 8);
+                dest[stream + (i + 2) * width] = static_cast<uint8_t>(v >> 16);
+                dest[stream + (i + 3) * width] = static_cast<uint8_t>(v >> 24);
+                dest[stream + (i + 4) * width] = static_cast<uint8_t>(v >> 32);
+                dest[stream + (i + 5) * width] = static_cast<uint8_t>(v >> 40);
+                dest[stream + (i + 6) * width] = static_cast<uint8_t>(v >> 48);
+                dest[stream + (i + 7) * width] = static_cast<uint8_t>(v >> 56);
+#elif defined IS_BIG_ENDIAN
+                dest[stream + i * width] = static_cast<uint8_t>(v >> 56);
+                dest[stream + (i + 1) * width] = static_cast<uint8_t>(v >> 48);
+                dest[stream + (i + 2) * width] = static_cast<uint8_t>(v >> 40);
+                dest[stream + (i + 3) * width] = static_cast<uint8_t>(v >> 32);
+                dest[stream + (i + 4) * width] = static_cast<uint8_t>(v >> 24);
+                dest[stream + (i + 5) * width] = static_cast<uint8_t>(v >> 16);
+                dest[stream + (i + 6) * width] = static_cast<uint8_t>(v >> 8);
+                dest[stream + (i + 7) * width] = static_cast<uint8_t>(v);
+#endif
+            }
+            src_streams[stream] += kBlockSize;
+        }
+        dest += width * kBlockSize;
+        nvalues -= kBlockSize;
+    }
+
+    // Epilog
+    for (int stream = 0; stream < width; ++stream) {
+        const uint8_t* src = src_streams[stream];
+        for (int64_t i = 0; i < nvalues; ++i) {
+            dest[stream + i * width] = src[i];
+        }
+    }
+}
+
+template <int kNumStreams>
+void byte_stream_split_decode_scalar(const uint8_t* src, int width, int64_t 
offset,
+                                     int64_t num_values, int64_t stride, 
uint8_t* dest) {
+    DCHECK(width == kNumStreams);
+    std::array<const uint8_t*, kNumStreams> src_streams;
+    for (int stream = 0; stream < kNumStreams; ++stream) {
+        src_streams[stream] = &src[stream * stride + offset];
+    }
+    do_merge_streams(src_streams.data(), kNumStreams, num_values, dest);
+}
+
+inline void byte_stream_split_decode_scalar_dynamic(const uint8_t* src, int 
width, int64_t offset,
+                                                    int64_t num_values, 
int64_t stride,
+                                                    uint8_t* dest) {
+    std::vector<const uint8_t*> src_streams;
+    src_streams.resize(width);
+    for (int stream = 0; stream < width; ++stream) {
+        src_streams[stream] = &src[stream * stride + offset];
+    }
+    do_merge_streams(src_streams.data(), width, num_values, dest);
+}
+
+// TODO: optimize using simd: https://github.com/apache/arrow/pull/38529
+void byte_stream_split_decode(const uint8_t* src, int width, int64_t offset, 
int64_t num_values,
+                              int64_t stride, uint8_t* dest) {
+    switch (width) {
+    case 1:
+        memcpy(dest, src + offset * width, num_values);
+        return;
+    case 2:
+        return byte_stream_split_decode_scalar<2>(src, width, offset, 
num_values, stride, dest);
+    case 4:
+        return byte_stream_split_decode_scalar<4>(src, width, offset, 
num_values, stride, dest);
+    case 8:
+        return byte_stream_split_decode_scalar<8>(src, width, offset, 
num_values, stride, dest);
+    case 16:
+        return byte_stream_split_decode_scalar<16>(src, width, offset, 
num_values, stride, dest);
+    }
+    return byte_stream_split_decode_scalar_dynamic(src, width, offset, 
num_values, stride, dest);
+}
+
+} // namespace doris
diff --git a/be/src/util/byte_stream_split.h b/be/src/util/byte_stream_split.h
new file mode 100644
index 00000000000..4b016e2e692
--- /dev/null
+++ b/be/src/util/byte_stream_split.h
@@ -0,0 +1,37 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include <cstdint>
+
+namespace doris {
+
+/**
+ * @brief Decode a byte stream into a byte stream split format.
+ * 
+ * @param src The encoded data by byte stream split.
+ * @param width The width of type.
+ * @param offset The offset of encoded data.
+ * @param num_values The num of values to decode.
+ * @param stride The length of each stream.
+ * @param dest The buffer to store the decoded data.
+ */
+void byte_stream_split_decode(const uint8_t* src, int width, int64_t offset, 
int64_t num_values,
+                              int64_t stride, uint8_t* dest);
+
+} // namespace doris
diff --git a/be/src/vec/exec/format/parquet/byte_stream_split_decoder.cpp 
b/be/src/vec/exec/format/parquet/byte_stream_split_decoder.cpp
new file mode 100644
index 00000000000..cc01166dbaf
--- /dev/null
+++ b/be/src/vec/exec/format/parquet/byte_stream_split_decoder.cpp
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "byte_stream_split_decoder.h"
+
+#include <cstdint>
+
+#include "util/byte_stream_split.h"
+
+namespace doris::vectorized {
+
+Status ByteStreamSplitDecoder::decode_values(MutableColumnPtr& doris_column, 
DataTypePtr& data_type,
+                                             ColumnSelectVector& select_vector,
+                                             bool is_dict_filter) {
+    if (select_vector.has_filter()) {
+        return _decode_values<true>(doris_column, data_type, select_vector, 
is_dict_filter);
+    } else {
+        return _decode_values<false>(doris_column, data_type, select_vector, 
is_dict_filter);
+    }
+}
+
+template <bool has_filter>
+Status ByteStreamSplitDecoder::_decode_values(MutableColumnPtr& doris_column,
+                                              DataTypePtr& data_type,
+                                              ColumnSelectVector& 
select_vector,
+                                              bool is_dict_filter) {
+    size_t non_null_size = select_vector.num_values() - 
select_vector.num_nulls();
+    if (UNLIKELY(_offset + non_null_size > _data->size)) {
+        return Status::IOError(
+                "Out-of-bounds access in parquet data decoder: offset = {}, 
non_null_size = "
+                "{},size = {}",
+                _offset, non_null_size, _data->size);
+    }
+
+    size_t primitive_length = 
remove_nullable(data_type)->get_size_of_value_in_memory();
+    size_t data_index = doris_column->size() * primitive_length;
+    size_t scale_size = (select_vector.num_values() - 
select_vector.num_filtered()) *
+                        (_type_length / primitive_length);
+    doris_column->resize(doris_column->size() + scale_size);
+    char* raw_data = const_cast<char*>(doris_column->get_raw_data().data);
+    ColumnSelectVector::DataReadType read_type;
+    DCHECK(_data->get_size() % _type_length == 0);
+    int64_t stride = _data->get_size() / _type_length;
+
+    while (size_t run_length = 
select_vector.get_next_run<has_filter>(&read_type)) {
+        switch (read_type) {
+        case ColumnSelectVector::CONTENT: {
+            byte_stream_split_decode(reinterpret_cast<const 
uint8_t*>(_data->get_data()),
+                                     _type_length, _offset / _type_length, 
run_length, stride,
+                                     reinterpret_cast<uint8_t*>(raw_data) + 
data_index);
+            _offset += run_length * _type_length;
+            data_index += run_length * _type_length;
+            break;
+        }
+        case ColumnSelectVector::NULL_DATA: {
+            data_index += run_length * _type_length;
+            break;
+        }
+        case ColumnSelectVector::FILTERED_CONTENT: {
+            _offset += _type_length * run_length;
+            break;
+        }
+        case ColumnSelectVector::FILTERED_NULL: {
+            // do nothing
+            break;
+        }
+        }
+    }
+    return Status::OK();
+}
+
+Status ByteStreamSplitDecoder::skip_values(size_t num_values) {
+    _offset += _type_length * num_values;
+    if (UNLIKELY(_offset > _data->size)) {
+        return Status::IOError(
+                "Out-of-bounds access in parquet data decoder: offset = {}, 
size = {}", _offset,
+                _data->size);
+    }
+    return Status::OK();
+}
+}; // namespace doris::vectorized
diff --git a/be/src/vec/exec/format/parquet/byte_stream_split_decoder.h 
b/be/src/vec/exec/format/parquet/byte_stream_split_decoder.h
new file mode 100644
index 00000000000..6b1bf242b09
--- /dev/null
+++ b/be/src/vec/exec/format/parquet/byte_stream_split_decoder.h
@@ -0,0 +1,38 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#pragma once
+
+#include "vec/exec/format/parquet/decoder.h"
+
+namespace doris::vectorized {
+class ByteStreamSplitDecoder final : public Decoder {
+public:
+    ByteStreamSplitDecoder() = default;
+    ~ByteStreamSplitDecoder() override = default;
+
+    Status decode_values(MutableColumnPtr& doris_column, DataTypePtr& 
data_type,
+                         ColumnSelectVector& select_vector, bool 
is_dict_filter) override;
+
+    template <bool has_filter>
+    Status _decode_values(MutableColumnPtr& doris_column, DataTypePtr& 
data_type,
+                          ColumnSelectVector& select_vector, bool 
is_dict_filter);
+
+    Status skip_values(size_t num_values) override;
+};
+
+} // namespace doris::vectorized
diff --git a/be/src/vec/exec/format/parquet/decoder.cpp 
b/be/src/vec/exec/format/parquet/decoder.cpp
index 87daa781004..335f3029da4 100644
--- a/be/src/vec/exec/format/parquet/decoder.cpp
+++ b/be/src/vec/exec/format/parquet/decoder.cpp
@@ -24,10 +24,10 @@
 #include "vec/exec/format/parquet/bool_rle_decoder.h"
 #include "vec/exec/format/parquet/byte_array_dict_decoder.h"
 #include "vec/exec/format/parquet/byte_array_plain_decoder.h"
+#include "vec/exec/format/parquet/byte_stream_split_decoder.h"
 #include "vec/exec/format/parquet/delta_bit_pack_decoder.h"
 #include "vec/exec/format/parquet/fix_length_dict_decoder.hpp"
 #include "vec/exec/format/parquet/fix_length_plain_decoder.h"
-#include "vec/exec/format/parquet/schema_desc.h"
 
 namespace doris::vectorized {
 
@@ -118,6 +118,21 @@ Status Decoder::get_decoder(tparquet::Type::type type, 
tparquet::Encoding::type
             return Status::InternalError("DELTA_LENGTH_BYTE_ARRAY only 
supports BYTE_ARRAY.");
         }
         break;
+    case tparquet::Encoding::BYTE_STREAM_SPLIT:
+        switch (type) {
+        case tparquet::Type::INT32:
+        case tparquet::Type::INT64:
+        case tparquet::Type::INT96:
+        case tparquet::Type::FLOAT:
+        case tparquet::Type::DOUBLE:
+        case tparquet::Type::FIXED_LEN_BYTE_ARRAY:
+            decoder.reset(new ByteStreamSplitDecoder());
+            break;
+        default:
+            return Status::InternalError("Unsupported type {}(encoding={}) in 
parquet decoder",
+                                         tparquet::to_string(type), 
tparquet::to_string(encoding));
+        }
+        break;
     default:
         return Status::InternalError("Unsupported encoding {}(type={}) in 
parquet decoder",
                                      tparquet::to_string(encoding), 
tparquet::to_string(type));
diff --git a/be/test/util/byte_stream_split_test.cpp 
b/be/test/util/byte_stream_split_test.cpp
new file mode 100644
index 00000000000..d4a07bf4a73
--- /dev/null
+++ b/be/test/util/byte_stream_split_test.cpp
@@ -0,0 +1,95 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "util/byte_stream_split.h"
+
+#include <glog/logging.h>
+
+#include <cstdint>
+#include <limits>
+
+#include "gtest/gtest.h"
+
+namespace doris {
+
+// just for test
+void byte_stream_split_encode(const uint8_t* src, int64_t length, int width, 
uint8_t* dest) {
+    int nums = length / width;
+    for (int i = 0; i < nums; i++) {
+        for (int j = 0; j < width; j++) {
+            dest[j * nums + i] = src[i * width + j];
+        }
+    }
+}
+
+template <typename T>
+void test_byte_stream_split(const T* data, int64_t length) {
+    int width = sizeof(T);
+    int nums = length / width;
+    T* encoded_data = new T[nums];
+    T* decoded_data = new T[nums];
+    byte_stream_split_encode((const uint8_t*)data, length, width, 
(uint8_t*)encoded_data);
+    int offset = 0;
+    int run_length = 4;
+    while (offset < nums) {
+        int num_values = std::min(run_length, nums - offset);
+        byte_stream_split_decode((const uint8_t*)encoded_data, width, offset, 
num_values, nums,
+                                 (uint8_t*)decoded_data + offset * width);
+        offset += run_length;
+    }
+
+    for (int i = 0; i < nums; i++) {
+        EXPECT_EQ(data[i], decoded_data[i]);
+    }
+    delete[] encoded_data;
+    delete[] decoded_data;
+}
+
+TEST(ByteStreamSplit, ByteStreamSplitDecode) {
+    int8_t data1[] = {0, -1, 1, INT8_MIN / 2, INT8_MAX / 2, INT8_MIN, 
INT8_MAX};
+    test_byte_stream_split(data1, sizeof(data1));
+    int16_t data2[] = {0, -1, 1, INT16_MIN / 2, INT16_MAX / 2, INT16_MIN, 
INT16_MAX};
+    test_byte_stream_split(data2, sizeof(data2));
+    int32_t data3[] = {0, -1, 1, INT32_MIN / 2, INT32_MAX / 2, INT32_MIN, 
INT32_MAX};
+    test_byte_stream_split(data3, sizeof(data3));
+    int64_t data4[] = {
+            0,         -1,       1, INT64_MIN / 4, INT64_MAX / 4, INT64_MIN / 
2, INT64_MAX / 2,
+            INT64_MIN, INT64_MAX};
+    test_byte_stream_split(data4, sizeof(data4));
+    float data5[] = {0.0,
+                     -1.0,
+                     1.0,
+                     0.0000001,
+                     -0.0000001,
+                     std::numeric_limits<float>::min() / 2,
+                     std::numeric_limits<float>::max() / 2,
+                     std::numeric_limits<float>::min(),
+                     std::numeric_limits<float>::max()};
+    test_byte_stream_split(data5, sizeof(data5));
+    double data6[] = {0.0,
+                      -1.0,
+                      1.0,
+                      0.000000000000001,
+                      -0.000000000000001,
+                      std::numeric_limits<double>::min() / 2,
+                      std::numeric_limits<double>::max() / 2,
+                      std::numeric_limits<double>::min(),
+                      std::numeric_limits<double>::max()};
+    test_byte_stream_split(data6, sizeof(data6));
+}
+
+} // namespace doris
diff --git 
a/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group0.out 
b/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group0.out
index cccf17bb0bb..6b58c1478b1 100644
Binary files 
a/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group0.out and 
b/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group0.out differ
diff --git 
a/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group4.out 
b/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group4.out
index c323a62e5e2..c47e4c740de 100644
Binary files 
a/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group4.out and 
b/regression-test/data/external_table_p0/tvf/test_hdfs_parquet_group4.out differ
diff --git 
a/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group0.groovy 
b/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group0.groovy
index 1e1587baa90..2af8eef6cb5 100644
--- 
a/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group0.groovy
+++ 
b/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group0.groovy
@@ -55,13 +55,10 @@ 
suite("test_hdfs_parquet_group0","external,hive,tvf,external_docker") {
 
 
             uri = "${defaultFS}" + 
"/user/doris/tvf_data/test_hdfs_parquet/group0/byte_stream_split_extended.gzip.parquet"
-            test {
-                sql """ select * from HDFS(
+            order_qt_test_4 """ select * from HDFS(
                         "uri" = "${uri}",
                         "hadoop.username" = "${hdfsUserName}",
                         "format" = "parquet") limit 10; """
-                exception "Unsupported encoding 
BYTE_STREAM_SPLIT(type=FIXED_LEN_BYTE_ARRAY) in parquet decoder"
-            }
 
 
             uri = "${defaultFS}" + 
"/user/doris/tvf_data/test_hdfs_parquet/group0/nested_maps.snappy.parquet"
@@ -258,13 +255,10 @@ 
suite("test_hdfs_parquet_group0","external,hive,tvf,external_docker") {
 
 
             uri = "${defaultFS}" + 
"/user/doris/tvf_data/test_hdfs_parquet/group0/byte_stream_split.zstd.parquet"
-            test{
-                sql """ select * from HDFS(
+            order_qt_test_32 """ select * from HDFS(
                         "uri" = "${uri}",
                         "hadoop.username" = "${hdfsUserName}",
                         "format" = "parquet") limit 10; """
-                exception "Unsupported encoding BYTE_STREAM_SPLIT(type=FLOAT) 
in parquet decoder"
-            }
 
 
             uri = "${defaultFS}" + 
"/user/doris/tvf_data/test_hdfs_parquet/group0/rle-dict-snappy-checksum.parquet"
diff --git 
a/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group4.groovy 
b/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group4.groovy
index d8ca75515b0..9b6fd1e1c60 100644
--- 
a/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group4.groovy
+++ 
b/regression-test/suites/external_table_p0/tvf/test_hdfs_parquet_group4.groovy
@@ -195,13 +195,10 @@ 
suite("test_hdfs_parquet_group4","external,hive,tvf,external_docker") {
 
 
             uri = "${defaultFS}" + 
"/user/doris/tvf_data/test_hdfs_parquet/group4/byte_stream_split_float_and_double.parquet"
-            test {
-                sql """ select * from HDFS(
+            order_qt_test_24 """ select * from HDFS(
                         "uri" = "${uri}",
                         "hadoop.username" = "${hdfsUserName}",
                         "format" = "parquet") limit 10; """
-                exception "Unsupported encoding BYTE_STREAM_SPLIT(type=FLOAT) 
in parquet decoder"
-            }
 
 
             uri = "${defaultFS}" + 
"/user/doris/tvf_data/test_hdfs_parquet/group4/part-00000-eebaa9a7-1a21-4e28-806c-24f24f8a0353-c000.snappy.parquet"


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to