This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch clucene
in repository https://gitbox.apache.org/repos/asf/doris-thirdparty.git
The following commit(s) were added to refs/heads/clucene by this push:
new eb3c0f7 [Fix](chinese analyzer) fix memory leak in chinese analyzer
(#45)
eb3c0f7 is described below
commit eb3c0f7a684408d7d72bce1f05505860f4f6cc56
Author: airborne12 <[email protected]>
AuthorDate: Thu Mar 23 19:31:36 2023 +0800
[Fix](chinese analyzer) fix memory leak in chinese analyzer (#45)
1. Failure to implement the reusableTokenStream method results in token
stream being created each time, which can cause memory leaks.
2. Refactored the Jieba instance using singleton.
3. Replaced new norm with nullptr in TermScorer to save memory.
---
.../CLucene/analysis/LanguageBasedAnalyzer.cpp | 50 +++++++++++++++++++---
.../CLucene/analysis/LanguageBasedAnalyzer.h | 20 +++++----
.../CLucene/analysis/jieba/ChineseTokenizer.cpp | 18 +++-----
.../CLucene/analysis/jieba/ChineseTokenizer.h | 22 ++++++++--
src/core/CLucene/search/IndexSearcher.cpp | 4 +-
src/core/CLucene/search/TermQuery.cpp | 2 +-
src/core/CLucene/util/bkd/bkd_writer.cpp | 5 ++-
7 files changed, 84 insertions(+), 37 deletions(-)
diff --git a/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.cpp
b/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.cpp
index 1af3a9c..2db2e94 100644
--- a/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.cpp
+++ b/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.cpp
@@ -6,13 +6,13 @@
------------------------------------------------------------------------------*/
#include "CLucene/_ApiHeader.h"
-#include "LanguageBasedAnalyzer.h"
#include "CLucene/analysis/Analyzers.h"
#include "CLucene/analysis/cjk/CJKAnalyzer.h"
#include "CLucene/analysis/jieba/ChineseTokenizer.h"
#include "CLucene/analysis/standard/StandardFilter.h"
#include "CLucene/analysis/standard/StandardTokenizer.h"
#include "CLucene/snowball/SnowballFilter.h"
+#include "LanguageBasedAnalyzer.h"
CL_NS_USE(util)
CL_NS_USE2(analysis, cjk)
@@ -29,19 +29,57 @@ LanguageBasedAnalyzer::LanguageBasedAnalyzer(const TCHAR
*language, bool stem) {
_tcsncpy(lang, language, 100);
this->stem = stem;
}
-LanguageBasedAnalyzer::~LanguageBasedAnalyzer() {
-}
+
+LanguageBasedAnalyzer::~LanguageBasedAnalyzer() = default;
void LanguageBasedAnalyzer::setLanguage(const TCHAR *language) {
_tcsncpy(lang, language, 100);
}
-void LanguageBasedAnalyzer::setStem(bool stem) {
- this->stem = stem;
+
+void LanguageBasedAnalyzer::setStem(bool s) {
+ this->stem = s;
}
-void LanguageBasedAnalyzer::initDict(const std::string& dictPath) {
+
+void LanguageBasedAnalyzer::initDict(const std::string &dictPath) {
if (_tcscmp(lang, _T("chinese")) == 0) {
CL_NS2(analysis, jieba)::ChineseTokenizer::init(dictPath);
}
}
+
+TokenStream *LanguageBasedAnalyzer::reusableTokenStream(const TCHAR *
/*fieldName*/, CL_NS(util)::Reader *reader) {
+ TokenStream *tokenizer = getPreviousTokenStream();
+ if (tokenizer == nullptr) {
+ if (_tcscmp(lang, _T("cjk")) == 0) {
+ tokenizer = _CLNEW CL_NS2(analysis, cjk)::CJKTokenizer(reader);
+ } else if (_tcscmp(lang, _T("chinese")) == 0) {
+ tokenizer = _CLNEW CL_NS2(analysis,
jieba)::ChineseTokenizer(reader);
+ } else {
+ BufferedReader *bufferedReader = reader->__asBufferedReader();
+ if (bufferedReader == NULL)
+ tokenizer = _CLNEW StandardTokenizer(_CLNEW
FilteredBufferedReader(reader, false), true);
+ else
+ tokenizer = _CLNEW StandardTokenizer(bufferedReader);
+
+ tokenizer = _CLNEW StandardFilter(tokenizer, true);
+
+ if (stem)
+ tokenizer = _CLNEW SnowballFilter(tokenizer, lang,
true);//todo: should check whether snowball supports the language
+
+ if (stem)
//hmm... this could be configured seperately from stem
+ tokenizer = _CLNEW ISOLatin1AccentFilter(tokenizer,
true);//todo: this should really only be applied to latin languages...
+
+ //lower case after the latin1 filter
+ tokenizer = _CLNEW LowerCaseFilter(tokenizer, true);
+ }
+ setPreviousTokenStream(tokenizer);
+ } else {
+ auto t = dynamic_cast<Tokenizer *>(tokenizer);
+ if (t != nullptr) {
+ t->reset(reader);
+ }
+ }
+ return tokenizer;
+}
+
TokenStream *LanguageBasedAnalyzer::tokenStream(const TCHAR *fieldName, Reader
*reader) {
TokenStream *ret = NULL;
if (_tcscmp(lang, _T("cjk")) == 0) {
diff --git a/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.h
b/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.h
index 147dc1e..22fc3dd 100644
--- a/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.h
+++ b/src/contribs-lib/CLucene/analysis/LanguageBasedAnalyzer.h
@@ -11,16 +11,18 @@
CL_NS_DEF(analysis)
-class CLUCENE_CONTRIBS_EXPORT LanguageBasedAnalyzer: public
CL_NS(analysis)::Analyzer{
- TCHAR lang[100];
- bool stem;
+class CLUCENE_CONTRIBS_EXPORT LanguageBasedAnalyzer : public
CL_NS(analysis)::Analyzer {
+ TCHAR lang[100]{};
+ bool stem;
+
public:
- explicit LanguageBasedAnalyzer(const TCHAR* language=nullptr, bool
stem=true);
- ~LanguageBasedAnalyzer() override;
- void setLanguage(const TCHAR* language);
- void setStem(bool stem);
- void initDict(const std::string& dictPath);
- TokenStream* tokenStream(const TCHAR* fieldName, CL_NS(util)::Reader*
reader) override;
+ explicit LanguageBasedAnalyzer(const TCHAR *language = nullptr, bool stem
= true);
+ ~LanguageBasedAnalyzer() override;
+ void setLanguage(const TCHAR *language);
+ void setStem(bool s);
+ void initDict(const std::string &dictPath);
+ TokenStream *tokenStream(const TCHAR *fieldName, CL_NS(util)::Reader
*reader) override;
+ TokenStream *reusableTokenStream(const TCHAR * /*fieldName*/,
CL_NS(util)::Reader *reader) override;
};
CL_NS_END
diff --git a/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.cpp
b/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.cpp
index 35aea7b..60c9eaf 100644
--- a/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.cpp
+++ b/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.cpp
@@ -3,38 +3,30 @@
#include "CLucene/util/CLStreams.h"
#include <memory>
-CL_NS_DEF2(analysis,jieba)
+CL_NS_DEF2(analysis, jieba)
CL_NS_USE(analysis)
CL_NS_USE(util)
-std::unique_ptr<cppjieba::Jieba> ChineseTokenizer::cppjieba = nullptr;
ChineseTokenizer::ChineseTokenizer(lucene::util::Reader *reader) :
Tokenizer(reader) {
buffer[0] = 0;
}
void ChineseTokenizer::init(const std::string &dictPath) {
- if(cppjieba == nullptr) {
- cppjieba = std::make_unique<cppjieba::Jieba>(
- dictPath + "/" + "jieba.dict.utf8",
- dictPath + "/" + "hmm_model.utf8",
- dictPath + "/" + "user.dict.utf8",
- dictPath + "/" + "idf.utf8",
- dictPath + "/" + "stop_words.utf8");
- }
+ JiebaSingleton::getInstance(dictPath);
}
CL_NS(analysis)::Token *ChineseTokenizer::next(lucene::analysis::Token *token)
{
// try to read all words
- if (dataLen == 0) {
+ if (dataLen == 0 || bufferIndex >= dataLen) {
auto bufferLen = input->read((const void **) &ioBuffer, 1, 0);
if (bufferLen == -1) {
dataLen = 0;
+ bufferIndex = 0;
return NULL;
}
char tmp_buffer[4 * bufferLen];
lucene_wcsntoutf8(tmp_buffer, ioBuffer, bufferLen, 4 * bufferLen);
- init();
- cppjieba->Cut(tmp_buffer, tokens_text, true);
+ JiebaSingleton::getInstance().Cut(tmp_buffer, tokens_text, true);
dataLen = tokens_text.size();
}
if (bufferIndex < dataLen) {
diff --git a/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.h
b/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.h
index 61ab100..e642be8 100644
--- a/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.h
+++ b/src/contribs-lib/CLucene/analysis/jieba/ChineseTokenizer.h
@@ -10,6 +10,21 @@
CL_NS_DEF2(analysis,jieba)
+class JiebaSingleton {
+public:
+ static cppjieba::Jieba& getInstance(const std::string& dictPath = "") {
+ static cppjieba::Jieba instance(dictPath + "/" + "jieba.dict.utf8",
+ dictPath + "/" + "hmm_model.utf8",
+ dictPath + "/" + "user.dict.utf8",
+ dictPath + "/" + "idf.utf8",
+ dictPath + "/" + "stop_words.utf8");
+ return instance;
+ }
+
+private:
+ JiebaSingleton() = default;
+};
+
class ChineseTokenizer : public lucene::analysis::Tokenizer {
private:
/** word offset, used to imply which character(in ) is parsed */
@@ -33,19 +48,18 @@ private:
*/
const TCHAR* ioBuffer{};
std::vector<std::string> tokens_text;
- std::vector<std::unique_ptr<Token>> tokens;
+ //std::vector<std::unique_ptr<Token>> tokens;
public:
- static std::unique_ptr<cppjieba::Jieba> cppjieba;
// Constructor
explicit ChineseTokenizer(lucene::util::Reader *reader);
static void init(const std::string& dictPath="");
// Destructor
- ~ChineseTokenizer() override {}
+ ~ChineseTokenizer() override = default;
// Override the next method to tokenize Chinese text using Jieba
- lucene::analysis::Token* next(lucene::analysis::Token* token);
+ lucene::analysis::Token* next(lucene::analysis::Token* token) override;
};
CL_NS_END2
diff --git a/src/core/CLucene/search/IndexSearcher.cpp
b/src/core/CLucene/search/IndexSearcher.cpp
index 1461ea7..b2db731 100644
--- a/src/core/CLucene/search/IndexSearcher.cpp
+++ b/src/core/CLucene/search/IndexSearcher.cpp
@@ -378,8 +378,8 @@ CL_NS_DEF(search)
Scorer* scorer = NULL;
try
{
- Weight* weight = query->weight(this);
- Scorer* scorer = weight->scorer(reader);
+ weight = query->weight(this);
+ scorer = weight->scorer(reader);
if (scorer == NULL) {
Query* wq = weight->getQuery();
if (wq != query) // query was rewritten
diff --git a/src/core/CLucene/search/TermQuery.cpp
b/src/core/CLucene/search/TermQuery.cpp
index 79b4fd5..afc9a4d 100644
--- a/src/core/CLucene/search/TermQuery.cpp
+++ b/src/core/CLucene/search/TermQuery.cpp
@@ -147,7 +147,7 @@ CL_NS_DEF(search)
return NULL;
return _CLNEW TermScorer(this, termDocs, similarity,
-
reader->norms(_term->field()));
+ nullptr);
}
Explanation* TermWeight::explain(IndexReader* reader, int32_t doc){
diff --git a/src/core/CLucene/util/bkd/bkd_writer.cpp
b/src/core/CLucene/util/bkd/bkd_writer.cpp
index d4e6d16..dc58315 100644
--- a/src/core/CLucene/util/bkd/bkd_writer.cpp
+++ b/src/core/CLucene/util/bkd/bkd_writer.cpp
@@ -53,8 +53,9 @@ namespace bkd {
bytes_per_doc_ = packed_bytes_length_ + 4 + 4;
}
- max_points_sort_in_heap_ = (int32_t) (0.5 * (maxMBSortInHeap * 1024 *
1024) / (bytes_per_doc_ * numDataDims));
-
+ // because offline sort is not implemented yet, we just use heap for
all points
+ max_points_sort_in_heap_ = totalPointCount;
+ //max_points_sort_in_heap_ = (int32_t) (0.5 * (maxMBSortInHeap * 1024
* 1024) / (bytes_per_doc_ * numDataDims));
// Finally, we must be able to hold at least the leaf node in heap
during build:
if (max_points_sort_in_heap_ < maxPointsInLeafNode) {
auto msg = "maxMBSortInHeap=" + std::to_string(maxMBSortInHeap) +
" only allows for maxPointsSortInHeap=" +
std::to_string(max_points_sort_in_heap_) + ", but this is less than
maxPointsInLeafNode=" + std::to_string(maxPointsInLeafNode) + "; either
increase maxMBSortInHeap or decrease maxPointsInLeafNode";
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]