This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git
The following commit(s) were added to refs/heads/master by this push:
new 73c4ec7167 Fix some typos in be/. (#9681)
73c4ec7167 is described below
commit 73c4ec7167ae0aa8746756aa28fa99b68e62a4f7
Author: Shuangchi He <[email protected]>
AuthorDate: Thu May 19 20:55:39 2022 +0800
Fix some typos in be/. (#9681)
---
be/src/common/config.h | 2 +-
be/src/exec/lzo_decompressor.cpp | 2 +-
be/src/exec/partitioned_aggregation_node.h | 4 ++--
be/src/gutil/strings/split.cc | 2 +-
be/src/olap/compaction.cpp | 2 +-
be/src/olap/memtable.cpp | 2 +-
be/src/olap/olap_index.cpp | 2 +-
be/src/olap/rowset/run_length_integer_writer.h | 2 +-
be/src/olap/rowset/segment_reader.cpp | 2 +-
be/src/olap/rowset/segment_v2/binary_dict_page.h | 2 +-
be/src/olap/rowset/segment_v2/segment_iterator.cpp | 4 ++--
be/src/runtime/datetime_value.h | 2 +-
be/src/runtime/disk_io_mgr.h | 4 ++--
be/src/runtime/mem_pool.h | 2 +-
be/src/runtime/spill_sorter.cc | 2 +-
be/src/util/cgroup_util.cpp | 2 +-
be/src/vec/olap/olap_data_convertor.cpp | 8 ++++----
be/src/vec/runtime/vdatetime_value.h | 2 +-
be/src/vec/sink/vtablet_sink.cpp | 2 +-
be/test/runtime/disk_io_mgr_test.cpp | 6 +++---
20 files changed, 28 insertions(+), 28 deletions(-)
diff --git a/be/src/common/config.h b/be/src/common/config.h
index d171af23ec..3f04a75ed9 100644
--- a/be/src/common/config.h
+++ b/be/src/common/config.h
@@ -704,7 +704,7 @@ CONF_Int32(max_minidump_file_number, "10");
// and the valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0.
CONF_String(kafka_broker_version_fallback, "0.10.0");
-// The the number of pool siz of routine load consumer.
+// The number of pool siz of routine load consumer.
// If you meet the error describe in
https://github.com/edenhill/librdkafka/issues/3608
// Change this size to 0 to fix it temporarily.
CONF_Int32(routine_load_consumer_pool_size, "10");
diff --git a/be/src/exec/lzo_decompressor.cpp b/be/src/exec/lzo_decompressor.cpp
index 43c0a0474a..a2af9e94fd 100644
--- a/be/src/exec/lzo_decompressor.cpp
+++ b/be/src/exec/lzo_decompressor.cpp
@@ -384,7 +384,7 @@ std::string LzopDecompressor::debug_info() {
<< " header size: " << _header_info.header_size
<< " header checksum type: " << _header_info.header_checksum_type
<< " input checksum type: " << _header_info.input_checksum_type
- << " ouput checksum type: " << _header_info.output_checksum_type;
+ << " output checksum type: " << _header_info.output_checksum_type;
return ss.str();
}
#endif // DORIS_WITH_LZO
diff --git a/be/src/exec/partitioned_aggregation_node.h
b/be/src/exec/partitioned_aggregation_node.h
index fc3c486ce1..847c96a8a3 100644
--- a/be/src/exec/partitioned_aggregation_node.h
+++ b/be/src/exec/partitioned_aggregation_node.h
@@ -405,7 +405,7 @@ private:
void Close(bool finalize_rows);
/// Spill this partition. 'more_aggregate_rows' = true means that more
aggregate rows
- /// may be appended to the the partition before appending unaggregated
rows. On
+ /// may be appended to the partition before appending unaggregated
rows. On
/// success, one of the streams is left with a write iterator: the
aggregated stream
/// if 'more_aggregate_rows' is true or the unaggregated stream
otherwise.
Status Spill(bool more_aggregate_rows);
@@ -528,7 +528,7 @@ private:
/// GetNext() using the agg fn evaluators' Serialize() or Finalize().
/// For the Finalize() case if the output tuple is different from the
intermediate
/// tuple, then a new tuple is allocated from 'pool' to hold the final
result.
- /// Grouping values are copied into the output tuple and the the output
tuple holding
+ /// Grouping values are copied into the output tuple and the output tuple
holding
/// the finalized/serialized aggregate values is returned.
/// TODO: Coordinate the allocation of new tuples with the release of
memory
/// so as not to make memory consumption blow up.
diff --git a/be/src/gutil/strings/split.cc b/be/src/gutil/strings/split.cc
index 62a6d33d56..846635e876 100644
--- a/be/src/gutil/strings/split.cc
+++ b/be/src/gutil/strings/split.cc
@@ -950,7 +950,7 @@ bool SplitStringIntoKeyValuePairs(const string& line, const
string& key_value_de
// values; just record that our split failed.
success = false;
}
- // we expect atmost one value because we passed in an empty vsep to
+ // we expect at most one value because we passed in an empty vsep to
// SplitStringIntoKeyValues
DCHECK_LE(value.size(), 1);
kv_pairs->push_back(make_pair(key, value.empty() ? "" : value[0]));
diff --git a/be/src/olap/compaction.cpp b/be/src/olap/compaction.cpp
index db197eb683..245a0fd4eb 100644
--- a/be/src/olap/compaction.cpp
+++ b/be/src/olap/compaction.cpp
@@ -205,7 +205,7 @@ void Compaction::gc_output_rowset() {
}
}
-// Find the longest consecutive version path in "rowset", from begining.
+// Find the longest consecutive version path in "rowset", from beginning.
// Two versions before and after the missing version will be saved in
missing_version,
// if missing_version is not null.
Status
Compaction::find_longest_consecutive_version(std::vector<RowsetSharedPtr>*
rowsets,
diff --git a/be/src/olap/memtable.cpp b/be/src/olap/memtable.cpp
index 769169a8ac..c7f94ead4f 100644
--- a/be/src/olap/memtable.cpp
+++ b/be/src/olap/memtable.cpp
@@ -245,7 +245,7 @@ void MemTable::_aggregate_two_row_in_block(RowInBlock*
new_row, RowInBlock* row_
vectorized::Block MemTable::_collect_vskiplist_results() {
VecTable::Iterator it(_vec_skip_list.get());
vectorized::Block in_block = _input_mutable_block.to_block();
- // TODO: should try to insert data by column, not by row. to opt the the
code
+ // TODO: should try to insert data by column, not by row. to opt the code
if (_keys_type == KeysType::DUP_KEYS) {
for (it.SeekToFirst(); it.Valid(); it.Next()) {
_output_mutable_block.add_row(&in_block, it.key()->_row_pos);
diff --git a/be/src/olap/olap_index.cpp b/be/src/olap/olap_index.cpp
index a674572d90..02d8a7d4eb 100644
--- a/be/src/olap/olap_index.cpp
+++ b/be/src/olap/olap_index.cpp
@@ -377,7 +377,7 @@ const OLAPIndexOffset MemIndex::find(const RowCursor& k,
RowCursor* helper_curso
BinarySearchIterator index_fin(_meta[off].count());
if (index_comparator.set_segment_id(off) != Status::OK()) {
- throw "index of of range";
+ throw "index of range";
}
if (!find_last) {
diff --git a/be/src/olap/rowset/run_length_integer_writer.h
b/be/src/olap/rowset/run_length_integer_writer.h
index a8f544f793..f4008d2816 100644
--- a/be/src/olap/rowset/run_length_integer_writer.h
+++ b/be/src/olap/rowset/run_length_integer_writer.h
@@ -260,7 +260,7 @@ private:
EncodingType _encoding;
uint16_t _num_literals;
int64_t _zig_zag_literals[MAX_SCOPE]; // for direct encoding
- int64_t _base_reduced_literals[MAX_SCOPE]; // for for patched base encoding
+ int64_t _base_reduced_literals[MAX_SCOPE]; // for patched base encoding
int64_t _adj_deltas[MAX_SCOPE - 1]; // for delta encoding
int64_t _fixed_delta;
uint32_t _zz_bits_90p;
diff --git a/be/src/olap/rowset/segment_reader.cpp
b/be/src/olap/rowset/segment_reader.cpp
index 02b4097cbd..1883e4bd01 100644
--- a/be/src/olap/rowset/segment_reader.cpp
+++ b/be/src/olap/rowset/segment_reader.cpp
@@ -400,7 +400,7 @@ Status SegmentReader::_pick_delete_row_groups(uint32_t
first_block, uint32_t las
if (true == del_not_satisfied || 0 ==
delete_condition.del_cond->columns().size()) {
//if state is DEL_PARTIAL_SATISFIED last_time, cannot be set
as DEL_NOT_SATISFIED
- //it is special for for delete condition
+ //it is special for delete condition
if (DEL_PARTIAL_SATISFIED == _include_blocks[j]) {
continue;
} else {
diff --git a/be/src/olap/rowset/segment_v2/binary_dict_page.h
b/be/src/olap/rowset/segment_v2/binary_dict_page.h
index 5fc4636c91..e1dcdba49b 100644
--- a/be/src/olap/rowset/segment_v2/binary_dict_page.h
+++ b/be/src/olap/rowset/segment_v2/binary_dict_page.h
@@ -47,7 +47,7 @@ enum { BINARY_DICT_PAGE_HEADER_SIZE = 4 };
// Either header + embedded codeword page, which can be encoded with any
// int PageBuilder, when mode_ = DICT_ENCODING.
// Or header + embedded BinaryPlainPage, when mode_ = PLAIN_ENCODING.
-// Data pages start with mode_ = DICT_ENCODING, when the the size of dictionary
+// Data pages start with mode_ = DICT_ENCODING, when the size of dictionary
// page go beyond the option_->dict_page_size, the subsequent data pages will
switch
// to string plain page automatically.
class BinaryDictPageBuilder : public PageBuilder {
diff --git a/be/src/olap/rowset/segment_v2/segment_iterator.cpp
b/be/src/olap/rowset/segment_v2/segment_iterator.cpp
index c8dbc44891..6d3bfcd5ec 100644
--- a/be/src/olap/rowset/segment_v2/segment_iterator.cpp
+++ b/be/src/olap/rowset/segment_v2/segment_iterator.cpp
@@ -797,7 +797,7 @@ void SegmentIterator::_init_current_block(
auto cid = _schema.column_id(i);
auto column_desc = _schema.column(cid);
- // the column in in block must clear() here to insert new data
+ // the column in block must clear() here to insert new data
if (_is_pred_column[cid] ||
i >= block->columns()) { //todo(wb) maybe we can release it after
output block
current_columns[cid]->clear();
@@ -818,7 +818,7 @@ void
SegmentIterator::_output_non_pred_columns(vectorized::Block* block) {
SCOPED_RAW_TIMER(&_opts.stats->output_col_ns);
for (auto cid : _non_predicate_columns) {
auto loc = _schema_block_id_map[cid];
- // if loc < block->block->columns() means the the column is delete
column and should
+ // if loc < block->block->columns() means the column is delete column
and should
// not output by block, so just skip the column.
if (loc < block->columns()) {
block->replace_by_position(loc,
std::move(_current_return_columns[cid]));
diff --git a/be/src/runtime/datetime_value.h b/be/src/runtime/datetime_value.h
index e0d7f7b8cc..a970e615c1 100644
--- a/be/src/runtime/datetime_value.h
+++ b/be/src/runtime/datetime_value.h
@@ -412,7 +412,7 @@ public:
// WEEK_YEAR (1)
// If not set:
// Week is in range 0-53
- // Week 0 is returned for the the last week of the previous year (for
+ // Week 0 is returned for the last week of the previous year (for
// a date at start of january) In this case one can get 53 for the
// first week of next year. This flag ensures that the week is
// relevant for the given year. Note that this flag is only
diff --git a/be/src/runtime/disk_io_mgr.h b/be/src/runtime/disk_io_mgr.h
index 0706b234a3..1b47d6f139 100644
--- a/be/src/runtime/disk_io_mgr.h
+++ b/be/src/runtime/disk_io_mgr.h
@@ -166,7 +166,7 @@ class MemTracker;
// the cached buffer is returned (BufferDescriptor::Return()).
//
// Remote filesystem support (e.g. S3):
-// Remote filesystems are modeled as "remote disks". That is, there is a
seperate disk
+// Remote filesystems are modeled as "remote disks". That is, there is a
separate disk
// queue for each supported remote filesystem type. In order to maximize
throughput,
// multiple connections are opened in parallel by having multiple threads
running per
// queue. Also note that reading from a remote filesystem service can be more
CPU
@@ -226,7 +226,7 @@ public:
};
// Buffer struct that is used by the caller and IoMgr to pass read buffers.
- // It is is expected that only one thread has ownership of this object at a
+ // It is expected that only one thread has ownership of this object at a
// time.
class BufferDescriptor {
public:
diff --git a/be/src/runtime/mem_pool.h b/be/src/runtime/mem_pool.h
index 6a211fcbcf..769e5d67ae 100644
--- a/be/src/runtime/mem_pool.h
+++ b/be/src/runtime/mem_pool.h
@@ -101,7 +101,7 @@ public:
~MemPool();
/// Allocates a section of memory of 'size' bytes with DEFAULT_ALIGNMENT
at the end
- /// of the the current chunk. Creates a new chunk if there aren't any
chunks
+ /// of the current chunk. Creates a new chunk if there aren't any chunks
/// with enough capacity.
uint8_t* allocate(int64_t size, Status* rst = nullptr) {
return allocate<false>(size, DEFAULT_ALIGNMENT, rst);
diff --git a/be/src/runtime/spill_sorter.cc b/be/src/runtime/spill_sorter.cc
index faab00f4d6..c86a9eaebe 100644
--- a/be/src/runtime/spill_sorter.cc
+++ b/be/src/runtime/spill_sorter.cc
@@ -121,7 +121,7 @@ private:
// into output_batch.
// If this run was unpinned, one block (2 if there are var-len slots) is
pinned while
// rows are filled into output_batch. The block is unpinned before the
next block is
- // pinned. Atmost 1 (2) block(s) will be pinned at any time.
+ // pinned. At most 1 (2) block(s) will be pinned at any time.
// If the run was pinned, the blocks are not unpinned (SpillSorter holds
on to the memory).
// In either case, all rows in output_batch will have their fixed and
var-len data from
// the same block.
diff --git a/be/src/util/cgroup_util.cpp b/be/src/util/cgroup_util.cpp
index e4ee9f5538..9258df1556 100644
--- a/be/src/util/cgroup_util.cpp
+++ b/be/src/util/cgroup_util.cpp
@@ -91,7 +91,7 @@ static Status read_cgroup_value(const string&
limit_file_path, int64_t* val) {
strings::Substitute("Error reading $0: $1", limit_file_path,
get_str_err_msg()));
}
StringParser::ParseResult pr;
- // Parse into an an int64_t If it overflows, returning the max value of
int64_t is ok because that
+ // Parse into an int64_t If it overflows, returning the max value of
int64_t is ok because that
// is effectively unlimited.
*val = StringParser::string_to_int<int64_t>(line.c_str(), line.size(),
&pr);
if ((pr != StringParser::PARSE_SUCCESS && pr !=
StringParser::PARSE_OVERFLOW)) {
diff --git a/be/src/vec/olap/olap_data_convertor.cpp
b/be/src/vec/olap/olap_data_convertor.cpp
index 0dbd95a2ae..10fe57174a 100644
--- a/be/src/vec/olap/olap_data_convertor.cpp
+++ b/be/src/vec/olap/olap_data_convertor.cpp
@@ -230,7 +230,7 @@ Status
OlapBlockDataConvertor::OlapColumnDataConvertorBitMap::convert_to_olap()
slice->size = slice_size;
raw_data += slice_size;
} else {
- // TODO: this may not be neccessary, check and remove later
+ // TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
@@ -307,7 +307,7 @@ Status
OlapBlockDataConvertor::OlapColumnDataConvertorHLL::convert_to_olap() {
slice->size = slice_size;
raw_data += slice_size;
} else {
- // TODO: this may not be neccessary, check and remove later
+ // TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
@@ -388,7 +388,7 @@ Status
OlapBlockDataConvertor::OlapColumnDataConvertorChar::convert_to_olap() {
slice->data = (char*)char_data + string_offset;
slice->size = string_length;
} else {
- // TODO: this may not be neccessary, check and remove later
+ // TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
@@ -475,7 +475,7 @@ Status
OlapBlockDataConvertor::OlapColumnDataConvertorVarChar::convert_to_olap()
"`string_type_length_soft_limit_bytes` in vec
engine.");
}
} else {
- // TODO: this may not be neccessary, check and remove later
+ // TODO: this may not be necessary, check and remove later
slice->data = nullptr;
slice->size = 0;
}
diff --git a/be/src/vec/runtime/vdatetime_value.h
b/be/src/vec/runtime/vdatetime_value.h
index 728556186c..3f2fa3a24b 100644
--- a/be/src/vec/runtime/vdatetime_value.h
+++ b/be/src/vec/runtime/vdatetime_value.h
@@ -411,7 +411,7 @@ public:
// WEEK_YEAR (1)
// If not set:
// Week is in range 0-53
- // Week 0 is returned for the the last week of the previous year (for
+ // Week 0 is returned for the last week of the previous year (for
// a date at start of january) In this case one can get 53 for the
// first week of next year. This flag ensures that the week is
// relevant for the given year. Note that this flag is only
diff --git a/be/src/vec/sink/vtablet_sink.cpp b/be/src/vec/sink/vtablet_sink.cpp
index 16edd09e77..bd048a3438 100644
--- a/be/src/vec/sink/vtablet_sink.cpp
+++ b/be/src/vec/sink/vtablet_sink.cpp
@@ -561,7 +561,7 @@ Status VOlapTableSink::_validate_data(RuntimeState* state,
vectorized::Block* bl
break;
}
- // Dispose the the column should do not contain the NULL value
+ // Dispose the column should do not contain the NULL value
// Only tow case:
// 1. column is nullable but the desc is not nullable
// 2. desc->type is BITMAP
diff --git a/be/test/runtime/disk_io_mgr_test.cpp
b/be/test/runtime/disk_io_mgr_test.cpp
index f370aed7ff..36c5cfda9c 100644
--- a/be/test/runtime/disk_io_mgr_test.cpp
+++ b/be/test/runtime/disk_io_mgr_test.cpp
@@ -625,13 +625,13 @@ TEST_F(DiskIoMgrTest, SingleReaderCancel) {
EXPECT_TRUE(status.ok());
std::atomic<int> num_ranges_processed;
- int num_succesful_ranges = ranges.size() / 2;
+ int num_successful_ranges = ranges.size() / 2;
// Read half the ranges
- for (int i = 0; i < num_succesful_ranges; ++i) {
+ for (int i = 0; i < num_successful_ranges; ++i) {
scan_range_thread(&io_mgr, reader, data, strlen(data),
Status::OK(), 1,
&num_ranges_processed);
}
- EXPECT_EQ(num_ranges_processed, num_succesful_ranges);
+ EXPECT_EQ(num_ranges_processed, num_successful_ranges);
// Start up some threads and then cancel
ThreadGroup threads;
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]