accessibility/source/standard/vclxaccessibletabcontrol.cxx | 11 bridges/Module_bridges.mk | 2 configure.ac | 38 cui/source/factory/init.cxx | 2 distro-configs/LibreOfficeEmscripten.conf | 54 download.lst | 2 external/icu/ExternalProject_icu.mk | 10 external/icu/UnpackedTarball_icu.mk | 10 external/icu/clang-cl.patch.0 | 26 external/icu/icu-ubsan.patch.0 | 11 external/icu/icu.changeset_36724.patch.1 | 39 external/icu/icu.changeset_36727.patch.1 | 55 external/icu/icu.changeset_36801.patch.1 | 1222 external/icu/icu4c-changeset-39671.patch.1 | 189 external/icu/icu4c-emscripten.patch.1 | 116 external/icu/icu4c-icu11451.patch.1 | 11 external/icu/khmerbreakengine.patch | 1110 external/icu/khmerdict.dict |binary filter/qa/cppunit/data/dxf/pass/loop-1.dxf |17320 +++++++++++++ filter/qa/cppunit/data/dxf/pass/loop-2.dxf |13974 ++++++++++ filter/qa/cppunit/filters-eps-test.cxx | 4 filter/source/graphicfilter/icgm/cgm.cxx | 8 filter/source/graphicfilter/icgm/class1.cxx | 17 filter/source/graphicfilter/icgm/class4.cxx | 25 filter/source/graphicfilter/idxf/dxf2mtf.cxx | 6 filter/source/graphicfilter/idxf/dxfentrd.cxx | 15 filter/source/graphicfilter/idxf/dxfentrd.hxx | 25 filter/source/graphicfilter/idxf/dxfgrprd.cxx | 4 filter/source/graphicfilter/idxf/dxfgrprd.hxx | 1 filter/source/graphicfilter/idxf/dxfreprd.cxx | 7 filter/source/graphicfilter/idxf/dxfreprd.hxx | 2 filter/source/graphicfilter/ieps/ieps.cxx | 259 filter/source/graphicfilter/ios2met/ios2met.cxx | 4 filter/source/graphicfilter/ipsd/ipsd.cxx | 14 filter/source/graphicfilter/itiff/ccidecom.cxx | 5 filter/source/graphicfilter/itiff/itiff.cxx | 85 filter/source/msfilter/msdffimp.cxx | 48 filter/source/msfilter/svdfppt.cxx | 45 hwpfilter/Library_hwp.mk | 1 hwpfilter/inc/pch/precompiled_hwp.hxx | 58 hwpfilter/source/attributes.cxx | 9 hwpfilter/source/attributes.hxx | 25 hwpfilter/source/cspline.cxx | 23 hwpfilter/source/datecode.h | 23 hwpfilter/source/drawdef.h | 8 hwpfilter/source/drawing.h | 56 hwpfilter/source/fontmap.cxx | 26 hwpfilter/source/formula.cxx | 29 hwpfilter/source/formula.h | 5 hwpfilter/source/grammar.cxx | 75 hwpfilter/source/hbox.cxx | 108 hwpfilter/source/hbox.h | 197 hwpfilter/source/hcode.cxx | 67 hwpfilter/source/hfont.cxx | 20 hwpfilter/source/hfont.h | 4 hwpfilter/source/hgzip.cxx | 24 hwpfilter/source/hgzip.h | 2 hwpfilter/source/hinfo.cxx | 137 hwpfilter/source/hinfo.h | 10 hwpfilter/source/hiodev.cxx | 66 hwpfilter/source/hiodev.h | 76 hwpfilter/source/hpara.cxx | 151 hwpfilter/source/hpara.h | 52 hwpfilter/source/hstream.cxx | 21 hwpfilter/source/hstream.hxx | 11 hwpfilter/source/hstyle.cxx | 17 hwpfilter/source/hstyle.h | 2 hwpfilter/source/htags.cxx | 33 hwpfilter/source/htags.h | 10 hwpfilter/source/hutil.cxx | 9 hwpfilter/source/hutil.h | 4 hwpfilter/source/hwpeq.cxx | 608 hwpfilter/source/hwpfile.cxx | 195 hwpfilter/source/hwpfile.h | 39 hwpfilter/source/hwplib.h | 15 hwpfilter/source/hwpread.cxx | 91 hwpfilter/source/hwpreader.cxx | 1336 - hwpfilter/source/hwpreader.hxx | 58 hwpfilter/source/lexer.cxx | 29 hwpfilter/source/list.hxx | 294 hwpfilter/source/mapping.h | 2 hwpfilter/source/mzstring.cxx | 46 hwpfilter/source/mzstring.h | 12 hwpfilter/source/nodes.h | 14 hwpfilter/source/solver.cxx | 59 i18npool/source/breakiterator/breakiterator_unicode.cxx | 4 include/osl/endian.h | 10 include/sal/alloca.h | 2 include/sal/config.h | 10 include/sal/log-areas.dox | 1 lotuswordpro/source/filter/lwpfilter.cxx | 5 lotuswordpro/source/filter/lwpframelayout.cxx | 7 lotuswordpro/source/filter/lwpframelayout.hxx | 1 lotuswordpro/source/filter/lwpgrfobj.cxx | 8 lotuswordpro/source/filter/lwpidxmgr.cxx | 4 lotuswordpro/source/filter/lwpobjstrm.cxx | 9 lotuswordpro/source/filter/lwpobjstrm.hxx | 1 sal/osl/unx/socket.cxx | 2 sal/osl/unx/system.hxx | 5 sal/textenc/tcvtjp6.tab | 6 sal/textenc/tcvtkr6.tab | 2 sc/source/filter/excel/xiescher.cxx | 2 sd/source/filter/ppt/pptin.cxx | 11 sd/source/filter/ppt/pptin.hxx | 10 sd/source/ui/view/drviews3.cxx | 351 solenv/gbuild/platform/EMSCRIPTEN_INTEL_emcc.mk | 18 svx/source/sdr/contact/viewcontactofsdrpathobj.cxx | 39 svx/source/table/tablelayouter.cxx | 35 sw/source/filter/ww8/ww8graf.cxx | 10 sw/source/filter/ww8/ww8par.hxx | 2 sw/source/filter/ww8/ww8par2.cxx | 44 sw/source/filter/ww8/ww8par6.cxx | 2 sw/source/filter/ww8/ww8scan.cxx | 5 sw/source/filter/ww8/ww8struc.hxx | 2 tools/inc/poly.h | 2 tools/source/generic/poly.cxx | 13 vcl/Library_vcl.mk | 6 vcl/source/control/edit.cxx | 2 vcl/source/filter/ixbm/xbmread.cxx | 4 vcl/source/filter/ixpm/xpmread.cxx | 4 vcl/source/filter/wmf/emfwr.cxx | 1 vcl/source/filter/wmf/winmtf.cxx | 4 vcl/source/filter/wmf/winwmf.cxx | 5 vcl/source/gdi/cvtsvm.cxx | 111 vcl/source/gdi/metaact.cxx | 2 vcl/source/gdi/pngread.cxx | 43 wizards/com/sun/star/wizards/web/BackgroundsDialog.py | 20 writerfilter/source/dmapper/DomainMapper_Impl.cxx | 16 128 files changed, 35497 insertions(+), 4277 deletions(-)
New commits: commit 7ce74a8546340434582d0c4482dcf526e9c0abd9 Author: Stephan Bergmann <[email protected]> Date: Tue Nov 3 14:48:35 2015 +0100 Use newly introduced gb_DEBUGINFO_FLAGS instead of hardcoded -g Change-Id: Iaaa80d997fa7babb9212787653c149b72d842a6c (cherry picked from commit c6822f927a73e5b2ee72208b139b6808116fe1e0) diff --git a/external/icu/ExternalProject_icu.mk b/external/icu/ExternalProject_icu.mk index 5817a6d4a7f0..0442997d869a 100644 --- a/external/icu/ExternalProject_icu.mk +++ b/external/icu/ExternalProject_icu.mk @@ -55,14 +55,14 @@ icu_CFLAGS:=" \ $(if $(debug),$(gb_DEBUG_CFLAGS),$(gb_COMPILEROPTFLAGS)) \ $(if $(ENABLE_LTO),$(gb_LTOFLAGS)) \ $(if $(filter GCC,$(COM)),-fno-strict-aliasing) \ - $(if $(filter $(true),$(gb_SYMBOL)),-g) \ + $(if $(filter $(true),$(gb_SYMBOL)),$(gb_DEBUGINFO_FLAGS)) \ $(if $(filter ANDROID,$(OS)),-fvisibility=hidden -fno-omit-frame-pointer)" icu_CXXFLAGS:="$(CXXFLAGS) $(CXXFLAGS_CXX11) \ $(if $(filter IOS,$(OS)),-DUCONFIG_NO_FILE_IO) \ $(if $(debug),$(gb_DEBUG_CFLAGS),$(gb_COMPILEROPTFLAGS)) \ $(if $(ENABLE_LTO),$(gb_LTOFLAGS)) \ $(if $(filter GCC,$(COM)),-fno-strict-aliasing) \ - $(if $(filter $(true),$(gb_SYMBOL)),-g) \ + $(if $(filter $(true),$(gb_SYMBOL)),$(gb_DEBUGINFO_FLAGS)) \ $(if $(filter ANDROID,$(OS)),-fvisibility=hidden -fno-omit-frame-pointer)" icu_LDFLAGS:=" \ $(if $(ENABLE_LTO),$(gb_LTOFLAGS)) \ commit 4666ee0c89209fb0bd6bcb85e8fcb073ba90b08f Author: Eike Rathke <[email protected]> Date: Thu Apr 20 22:06:23 2017 +0200 add ICU changeset-39671 fix for CVE-2017-7867 CVE-2017-7868 http://bugs.icu-project.org/trac/changeset/39671 https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=213 https://bugzilla.redhat.com/show_bug.cgi?id=1444101 Reviewed-on: https://gerrit.libreoffice.org/36754 Reviewed-by: Eike Rathke <[email protected]> Tested-by: Jenkins <[email protected]> (cherry picked from commit c7de8233d15ed0c90fef6c49a54d60cf10119f58) Backported to older MSVC using the UGLY_SIZEOF_MAPTOUCHARS macro instead of sizeof(UTF8Buf::mapToUChars). Change-Id: I4e776ad4fe63c77057b0c823f8672a2b6703346f Reviewed-on: https://gerrit.libreoffice.org/36776 Tested-by: Jenkins <[email protected]> Reviewed-by: Michael Stahl <[email protected]> (cherry picked from commit 91f5d002884cae1a60768e9caa9d182f41fb7be6) (cherry picked from commit 3cdac6bb2defce45342dff04400c7a37bb8a2453) diff --git a/external/icu/UnpackedTarball_icu.mk b/external/icu/UnpackedTarball_icu.mk index 4a6a11477af3..86369ef8e43b 100644 --- a/external/icu/UnpackedTarball_icu.mk +++ b/external/icu/UnpackedTarball_icu.mk @@ -28,6 +28,7 @@ $(eval $(call gb_UnpackedTarball_add_patches,icu,\ $(if $(filter-out ANDROID,$(OS)),external/icu/icu4c-icudata-stdlibs.diff) \ $(if $(filter EMSCRIPTEN,$(OS)),external/icu/icu4c-emscripten.patch.1) \ external/icu/khmerbreakengine.patch \ + external/icu/icu4c-changeset-39671.patch.1 \ )) $(eval $(call gb_UnpackedTarball_add_file,icu,source/data/brkitr/khmerdict.dict,external/icu/khmerdict.dict)) diff --git a/external/icu/icu4c-changeset-39671.patch.1 b/external/icu/icu4c-changeset-39671.patch.1 new file mode 100644 index 000000000000..b8ac1385364e --- /dev/null +++ b/external/icu/icu4c-changeset-39671.patch.1 @@ -0,0 +1,189 @@ +diff -ur icu.org/source/common/utext.cpp icu/source/common/utext.cpp +--- icu.org/source/common/utext.cpp 2016-06-15 20:58:17.000000000 +0200 ++++ icu/source/common/utext.cpp 2017-04-21 16:38:15.993398034 +0200 +@@ -847,9 +847,15 @@ + //------------------------------------------------------------------------------ + + // Chunk size. +-// Must be less than 85, because of byte mapping from UChar indexes to native indexes. +-// Worst case is three native bytes to one UChar. (Supplemenaries are 4 native bytes +-// to two UChars.) ++// Must be less than 42 (256/6), because of byte mapping from UChar indexes to native indexes. ++// Worst case there are six UTF-8 bytes per UChar. ++// obsolete 6 byte form fd + 5 trails maps to fffd ++// obsolete 5 byte form fc + 4 trails maps to fffd ++// non-shortest 4 byte forms maps to fffd ++// normal supplementaries map to a pair of utf-16, two utf8 bytes per utf-16 unit ++// mapToUChars array size must allow for the worst case, 6. ++// This could be brought down to 4, by treating fd and fc as pure illegal, ++// rather than obsolete lead bytes. But that is not compatible with the utf-8 access macros. + // + enum { UTF8_TEXT_CHUNK_SIZE=32 }; + +@@ -867,6 +873,15 @@ + // pair. Doing this is simpler than checking for the edge case. + // + ++// erAck: older MSVC used on libreoffice-5-3 and 5-2 bails out with ++// error C2070: 'unknown': illegal sizeof operand ++// for sizeof(UTF8Buf::mapToUChars) ++// so have an ugly workaround: ++// First define a macro of the original size expression, so a follow-up patch ++// on the original code would fail.. ++#define UGLY_MAPTOUCHARS_SIZE (UTF8_TEXT_CHUNK_SIZE*6+6) ++#define UGLY_SIZEOF_MAPTOUCHARS (sizeof(uint8_t)*(UGLY_MAPTOUCHARS_SIZE)) ++ + struct UTF8Buf { + int32_t bufNativeStart; // Native index of first char in UChar buf + int32_t bufNativeLimit; // Native index following last char in buf. +@@ -889,7 +904,7 @@ + // Requires two extra slots, + // one for a supplementary starting in the last normal position, + // and one for an entry for the buffer limit position. +- uint8_t mapToUChars[UTF8_TEXT_CHUNK_SIZE*3+6]; // Map native offset from bufNativeStart to ++ uint8_t mapToUChars[UGLY_MAPTOUCHARS_SIZE]; // Map native offset from bufNativeStart to + // correspoding offset in filled part of buf. + int32_t align; + }; +@@ -1032,6 +1047,7 @@ + // Requested index is in this buffer. + u8b = (UTF8Buf *)ut->p; // the current buffer + mapIndex = ix - u8b->toUCharsMapStart; ++ U_ASSERT(mapIndex < (int32_t)UGLY_SIZEOF_MAPTOUCHARS); + ut->chunkOffset = u8b->mapToUChars[mapIndex] - u8b->bufStartIdx; + return TRUE; + +@@ -1298,6 +1314,10 @@ + // Can only do this if the incoming index is somewhere in the interior of the string. + // If index is at the end, there is no character there to look at. + if (ix != ut->b) { ++ // Note: this function will only move the index back if it is on a trail byte ++ // and there is a preceding lead byte and the sequence from the lead ++ // through this trail could be part of a valid UTF-8 sequence ++ // Otherwise the index remains unchanged. + U8_SET_CP_START(s8, 0, ix); + } + +@@ -1311,7 +1331,10 @@ + UChar *buf = u8b->buf; + uint8_t *mapToNative = u8b->mapToNative; + uint8_t *mapToUChars = u8b->mapToUChars; +- int32_t toUCharsMapStart = ix - (UTF8_TEXT_CHUNK_SIZE*3 + 1); ++ int32_t toUCharsMapStart = ix - UGLY_SIZEOF_MAPTOUCHARS + 1; ++ // Note that toUCharsMapStart can be negative. Happens when the remaining ++ // text from current position to the beginning is less than the buffer size. ++ // + 1 because mapToUChars must have a slot at the end for the bufNativeLimit entry. + int32_t destIx = UTF8_TEXT_CHUNK_SIZE+2; // Start in the overflow region + // at end of buffer to leave room + // for a surrogate pair at the +@@ -1338,6 +1361,7 @@ + if (c<0x80) { + // Special case ASCII range for speed. + buf[destIx] = (UChar)c; ++ U_ASSERT(toUCharsMapStart <= srcIx); + mapToUChars[srcIx - toUCharsMapStart] = (uint8_t)destIx; + mapToNative[destIx] = (uint8_t)(srcIx - toUCharsMapStart); + } else { +@@ -1367,6 +1391,7 @@ + do { + mapToUChars[sIx-- - toUCharsMapStart] = (uint8_t)destIx; + } while (sIx >= srcIx); ++ U_ASSERT(toUCharsMapStart <= (srcIx+1)); + + // Set native indexing limit to be the current position. + // We are processing a non-ascii, non-native-indexing char now; +@@ -1541,6 +1566,7 @@ + U_ASSERT(index>=ut->chunkNativeStart+ut->nativeIndexingLimit); + U_ASSERT(index<=ut->chunkNativeLimit); + int32_t mapIndex = index - u8b->toUCharsMapStart; ++ U_ASSERT(mapIndex < (int32_t)UGLY_SIZEOF_MAPTOUCHARS); + int32_t offset = u8b->mapToUChars[mapIndex] - u8b->bufStartIdx; + U_ASSERT(offset>=0 && offset<=ut->chunkLength); + return offset; +diff -ur icu.org/source/test/intltest/utxttest.cpp icu/source/test/intltest/utxttest.cpp +--- icu.org/source/test/intltest/utxttest.cpp 2016-06-15 20:58:17.000000000 +0200 ++++ icu/source/test/intltest/utxttest.cpp 2017-04-21 16:14:57.383814739 +0200 +@@ -67,6 +67,8 @@ + if (exec) Ticket10983(); break; + case 7: name = "Ticket12130"; + if (exec) Ticket12130(); break; ++ case 8: name = "Ticket12888"; ++ if (exec) Ticket12888(); break; + default: name = ""; break; + } + } +@@ -1583,3 +1585,63 @@ + } + utext_close(&ut); + } ++ ++// Ticket 12888: bad handling of illegal utf-8 containing many instances of the archaic, now illegal, ++// six byte utf-8 forms. Original implementation had an assumption that ++// there would be at most three utf-8 bytes per UTF-16 code unit. ++// The five and six byte sequences map to a single replacement character. ++ ++void UTextTest::Ticket12888() { ++ const char *badString = ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80" ++ "\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80\xfd\x80\x80\x80\x80\x80"; ++ ++ UErrorCode status = U_ZERO_ERROR; ++ LocalUTextPointer ut(utext_openUTF8(NULL, badString, -1, &status)); ++ TEST_SUCCESS(status); ++ for (;;) { ++ UChar32 c = utext_next32(ut.getAlias()); ++ if (c == U_SENTINEL) { ++ break; ++ } ++ } ++ int32_t endIdx = utext_getNativeIndex(ut.getAlias()); ++ if (endIdx != (int32_t)strlen(badString)) { ++ errln("%s:%d expected=%d, actual=%d", __FILE__, __LINE__, strlen(badString), endIdx); ++ return; ++ } ++ ++ for (int32_t prevIndex = endIdx; prevIndex>0;) { ++ UChar32 c = utext_previous32(ut.getAlias()); ++ int32_t currentIndex = utext_getNativeIndex(ut.getAlias()); ++ if (c != 0xfffd) { ++ errln("%s:%d (expected, actual, index) = (%d, %d, %d)\n", ++ __FILE__, __LINE__, 0xfffd, c, currentIndex); ++ break; ++ } ++ if (currentIndex != prevIndex - 6) { ++ errln("%s:%d: wrong index. Expected, actual = %d, %d", ++ __FILE__, __LINE__, prevIndex - 6, currentIndex); ++ break; ++ } ++ prevIndex = currentIndex; ++ } ++} +diff -ur icu.org/source/test/intltest/utxttest.h icu/source/test/intltest/utxttest.h +--- icu.org/source/test/intltest/utxttest.h 2016-06-15 20:58:17.000000000 +0200 ++++ icu/source/test/intltest/utxttest.h 2017-04-21 16:14:57.383814739 +0200 +@@ -38,6 +38,7 @@ + void Ticket10562(); + void Ticket10983(); + void Ticket12130(); ++ void Ticket12888(); + + private: + struct m { // Map between native indices & code points. commit baf5bc046f148c931fe3229f270273f3c5480f69 Author: Martin Hosken <[email protected]> Date: Fri Apr 15 20:26:08 2016 +0200 reactivate ICU Khmer patch Patch has been upstreamed with https://ssl.icu-project.org/trac/ticket/12504 Change-Id: I1f3ddad87a2a6568ced3f9d2b2df3e0af0ee18aa Reviewed-on: https://gerrit.libreoffice.org/24117 Tested-by: Jenkins <[email protected]> Reviewed-by: Martin Hosken <[email protected]> Reviewed-by: Eike Rathke <[email protected]> Tested-by: Eike Rathke <[email protected]> (cherry picked from commit 4e066825d43400969041669c82d8a4e0bfd91adf) (cherry picked from commit 0f849dd2b9d789bc01890b9e22772dac2b5d74b3) diff --git a/external/icu/UnpackedTarball_icu.mk b/external/icu/UnpackedTarball_icu.mk index c48d02556d2d..4a6a11477af3 100644 --- a/external/icu/UnpackedTarball_icu.mk +++ b/external/icu/UnpackedTarball_icu.mk @@ -27,6 +27,9 @@ $(eval $(call gb_UnpackedTarball_add_patches,icu,\ external/icu/clang-cl.patch.0 \ $(if $(filter-out ANDROID,$(OS)),external/icu/icu4c-icudata-stdlibs.diff) \ $(if $(filter EMSCRIPTEN,$(OS)),external/icu/icu4c-emscripten.patch.1) \ + external/icu/khmerbreakengine.patch \ )) +$(eval $(call gb_UnpackedTarball_add_file,icu,source/data/brkitr/khmerdict.dict,external/icu/khmerdict.dict)) + # vim: set noet sw=4 ts=4: diff --git a/external/icu/khmerbreakengine.patch b/external/icu/khmerbreakengine.patch index 0687645e8790..8f81f315da3e 100644 --- a/external/icu/khmerbreakengine.patch +++ b/external/icu/khmerbreakengine.patch @@ -478,9 +478,9 @@ index f1c874d..3ad1b3f 100644 - UChar32 uc; - int32_t chars = 0; - for (;;) { -- int32_t pcIndex = utext_getNativeIndex(text); +- int32_t pcIndex = (int32_t)utext_getNativeIndex(text); - pc = utext_next32(text); -- int32_t pcSize = utext_getNativeIndex(text) - pcIndex; +- int32_t pcSize = (int32_t)utext_getNativeIndex(text) - pcIndex; - chars += pcSize; - remaining -= pcSize; - if (remaining <= 0) { @@ -1000,10 +1000,10 @@ index cb594c6..82f2e77 100644 + int32_t *prefix, UnicodeSet const* ignoreSet, int32_t minLength) const { UCharsTrie uct(characters); - int32_t startingTextIndex = utext_getNativeIndex(text); + int32_t startingTextIndex = (int32_t)utext_getNativeIndex(text); @@ -53,7 +53,13 @@ int32_t UCharsDictionaryMatcher::matches(UText *text, int32_t maxLength, int32_t UStringTrieResult result = (codePointsMatched == 0) ? uct.first(c) : uct.next(c); - int32_t lengthMatched = utext_getNativeIndex(text) - startingTextIndex; + int32_t lengthMatched = (int32_t)utext_getNativeIndex(text) - startingTextIndex; codePointsMatched += 1; + if (ignoreSet != NULL && ignoreSet->contains(c)) { + continue; @@ -1022,11 +1022,11 @@ index cb594c6..82f2e77 100644 - int32_t *prefix) const { + int32_t *prefix, UnicodeSet const* ignoreSet, int32_t minLength) const { BytesTrie bt(characters); - int32_t startingTextIndex = utext_getNativeIndex(text); + int32_t startingTextIndex = (int32_t)utext_getNativeIndex(text); int32_t wordCount = 0; @@ -120,7 +126,13 @@ int32_t BytesDictionaryMatcher::matches(UText *text, int32_t maxLength, int32_t UStringTrieResult result = (codePointsMatched == 0) ? bt.first(transform(c)) : bt.next(transform(c)); - int32_t lengthMatched = utext_getNativeIndex(text) - startingTextIndex; + int32_t lengthMatched = (int32_t)utext_getNativeIndex(text) - startingTextIndex; codePointsMatched += 1; + if (ignoreSet != NULL && ignoreSet->contains(c)) { + continue; @@ -1081,7 +1081,7 @@ diff --git a/source/data/Makefile.in b/source/data/Makefile.in index 816c82d..c637d70 100644 --- misc/icu/source/data/Makefile.in +++ build/icu/source/data/Makefile.in -@@ -179,7 +179,7 @@ endif +@@ -181,7 +181,7 @@ endif endif endif @@ -1090,17 +1090,17 @@ index 816c82d..c637d70 100644 ifneq ($(ENABLE_STATIC),) ifeq ($(PKGDATA_MODE),dll) $(PKGDATA_INVOKE) $(PKGDATA) -e $(ICUDATA_ENTRY_POINT) -T $(OUTTMPDIR) -p $(ICUDATA_NAME) $(PKGDATA_LIBSTATICNAME) -m static $(PKGDATA_VERSIONING) $(PKGDATA_LIST) -@@ -563,8 +563,14 @@ $(BRKBLDDIR)/burmesedict.dict: $(TOOLBINDIR)/gendict$(TOOLEXEEXT) $(DAT_FILES) - $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1000 -c -i $(BUILDDIR) $(BRKSRCDIR)/burmesedict.txt $(BRKBLDDIR)/burmesedict.dict +@@ -564,8 +564,14 @@ $(BRKBLDDIR)/burmesedict.dict: $(TOOLBINDIR)/gendict$(TOOLEXEEXT) $(DAT_FILES) + $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1000 -c -i $(BUILDDIR) $(DICTSRCDIR)/burmesedict.txt $(BRKBLDDIR)/burmesedict.dict # TODO: figure out why combining characters are here? -$(BRKBLDDIR)/khmerdict.dict: $(TOOLBINDIR)/gendict$(TOOLEXEEXT) $(DAT_FILES) -- $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(BRKSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict +- $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(DICTSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict +#$(BRKBLDDIR)/khmerdict.dict: $(TOOLBINDIR)/gendict$(TOOLEXEEXT) $(DAT_FILES) -+# $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(BRKSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict ++# $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(DICTSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict + +#$(MAINBUILDDIR)/khmerdict.stamp: $(TOOLBINDIR)/gendict$(TOOLEXEEXT) $(BRKSRCDIR)/khmerdict.txt build-local -+# $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(BRKSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict ++# $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(DICTSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict +$(MAINBUILDDIR)/khmerdict.stamp: $(BRKSRCDIR)/khmerdict.dict build-local + cp $< $(BRKBLDDIR) + echo "timestamp" > $@ commit 0a6ff6a3d24f29b045bfecc39d78b7c7a8af389d Author: Eike Rathke <[email protected]> Date: Wed Apr 13 22:24:25 2016 +0200 upgrade to ICU 57 This does not apply patches external/icu/khmerbreakengine.patch external/icu/khmerdict.dict anymore, as the khmerbreakengine.patch failed to apply with several hunks of which one was 16k. Asking the patch contributor to follow-up on this. Change-Id: I78d4371d04a7b03417d402a222bcd384f02a619e Reviewed-on: https://gerrit.libreoffice.org/24067 Tested-by: Jenkins <[email protected]> Reviewed-by: Eike Rathke <[email protected]> Tested-by: Eike Rathke <[email protected]> (cherry picked from commit c58655c5a221d986fa3c3eed2f28810269205721) (cherry picked from commit 05d9fd7b3416ef24e94973c1bdef458636ccaf7f) diff --git a/configure.ac b/configure.ac index fdaaf4b57381..0e1a0533470b 100644 --- a/configure.ac +++ b/configure.ac @@ -8935,7 +8935,7 @@ SYSTEM_GENBRK= SYSTEM_GENCCODE= SYSTEM_GENCMN= -ICU_MAJOR=56 +ICU_MAJOR=57 ICU_MINOR=1 ICU_RECLASSIFIED_CLOSE_PARENTHESIS="TRUE" ICU_RECLASSIFIED_PREPEND_SET_EMPTY="TRUE" diff --git a/download.lst b/download.lst index 21f2b25845e3..c993c7e81f6e 100644 --- a/download.lst +++ b/download.lst @@ -58,7 +58,7 @@ export HARFBUZZ_TARBALL := harfbuzz-0.9.40.tar.bz2 export HSQLDB_TARBALL := 17410483b5b5f267aa18b7e00b65e6e0-hsqldb_1_8_0.zip export HUNSPELL_TARBALL := 4967da60b23413604c9e563beacc63b4-hunspell-1.3.3.tar.gz export HYPHEN_TARBALL := 5ade6ae2a99bc1e9e57031ca88d36dad-hyphen-2.8.8.tar.gz -export ICU_TARBALL := c4a2d71ff56aec5ebfab2a3f059be99d-icu4c-56_1-src.tgz +export ICU_TARBALL := 976734806026a4ef8bdd17937c8898b9-icu4c-57_1-src.tgz export JFREEREPORT_FLOW_ENGINE_TARBALL := ba2930200c9f019c2d93a8c88c651a0f-flow-engine-0.9.4.zip export JFREEREPORT_FLUTE_TARBALL := d8bd5eed178db6e2b18eeed243f85aa8-flute-1.1.6.zip export JFREEREPORT_LIBBASE_TARBALL := eeb2c7ddf0d302fba4bfc6e97eac9624-libbase-1.1.6.zip diff --git a/external/icu/UnpackedTarball_icu.mk b/external/icu/UnpackedTarball_icu.mk index 4a6a11477af3..c48d02556d2d 100644 --- a/external/icu/UnpackedTarball_icu.mk +++ b/external/icu/UnpackedTarball_icu.mk @@ -27,9 +27,6 @@ $(eval $(call gb_UnpackedTarball_add_patches,icu,\ external/icu/clang-cl.patch.0 \ $(if $(filter-out ANDROID,$(OS)),external/icu/icu4c-icudata-stdlibs.diff) \ $(if $(filter EMSCRIPTEN,$(OS)),external/icu/icu4c-emscripten.patch.1) \ - external/icu/khmerbreakengine.patch \ )) -$(eval $(call gb_UnpackedTarball_add_file,icu,source/data/brkitr/khmerdict.dict,external/icu/khmerdict.dict)) - # vim: set noet sw=4 ts=4: commit 042586d8986429751c4c60e7bdab6efcd1dba483 Author: Martin Hosken <[email protected]> Date: Tue Mar 22 11:26:52 2016 +0700 Fix wrong pattern definitions in khmer dictionary breaker Change-Id: I0132196744046391759a6e5110d054feee3deea3 Reviewed-on: https://gerrit.libreoffice.org/23420 Tested-by: Jenkins <[email protected]> Reviewed-by: Martin Hosken <[email protected]> (cherry picked from commit 7f36f4ce9f9f3d430009ba472d275d038abecb16) (cherry picked from commit aa4b3ec51803ade29323273668a516e7f18bdf95) diff --git a/external/icu/khmerbreakengine.patch b/external/icu/khmerbreakengine.patch index bc0d287929b0..0687645e8790 100644 --- a/external/icu/khmerbreakengine.patch +++ b/external/icu/khmerbreakengine.patch @@ -15,8 +15,8 @@ index f1c874d..3ad1b3f 100644 + fViramaSet.applyPattern(UNICODE_STRING_SIMPLE("[[:ccc=VR:]]"), status); + + // note Skip Sets contain fIgnoreSet characters too. -+ fSkipStartSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=OP:][:lb=QU:]]\\u200C\\u200D\\u2060"), status); -+ fSkipEndSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CP:][:lb=QU:][:lb=EX:][:lb=CL:]]\\u200C\\u200D\\u2060"), status); ++ fSkipStartSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=OP:][:lb=QU:]\\u200C\\u200D\\u2060]"), status); ++ fSkipEndSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CP:][:lb=QU:][:lb=EX:][:lb=CL:]\\u200C\\u200D\\u2060]"), status); + fNBeforeSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CR:][:lb=LF:][:lb=NL:][:lb=SP:][:lb=ZW:][:lb=IS:][:lb=BA:][:lb=NS:]]"), status); } @@ -332,10 +332,10 @@ index f1c874d..3ad1b3f 100644 + startZwsp = scanBeforeStart(text, scanStart, breakStart); + } + utext_setNativeIndex(text, rangeStart); -+ scanFwdClusters(text, rangeStart, initAfter); ++ scanFwdClusters(text, rangeEnd, initAfter); + bool endZwsp = scanAfterEnd(text, utext_nativeLength(text), scanEnd, breakEnd); + utext_setNativeIndex(text, rangeEnd - 1); -+ scanBackClusters(text, rangeEnd, finalBefore); ++ scanBackClusters(text, rangeStart, finalBefore); + if (finalBefore < initAfter) { // the whole run is tented so no breaks + if (breakStart || fTypes < UBRK_LINE) + foundBreaks.push(rangeStart, status); @@ -539,7 +539,7 @@ index f1c874d..3ad1b3f 100644 + int32_t ln = lengths.elementAti(j); + utext_setNativeIndex(text, ln+ix); + int32_t c = utext_current32(text); -+ while (fPuncSet.contains(c) || fIgnoreSet.contains(c)) { ++ while ((fPuncSet.contains(c) || fIgnoreSet.contains(c)) && ln + i < numCodePts) { + ++ln; + utext_next32(text); + c = utext_current32(text); commit 3da9c44f67a0f4e8ac31ca2b64871ff58ab9c9cb Author: Martin Hosken <[email protected]> Date: Thu Mar 17 09:57:35 2016 +0700 Fix bug in khmr linebreaking and update dictionary Change-Id: I2b776925c2c95cb56ccd592d036823c26054e059 Reviewed-on: https://gerrit.libreoffice.org/23316 Tested-by: Jenkins <[email protected]> Reviewed-by: Martin Hosken <[email protected]> (cherry picked from commit a976a19ca82661d8b459b85f5514b0e4c9222d47) (cherry picked from commit 55dece94611e1b2a8a1974d11c10050d8d74b5f7) diff --git a/external/icu/khmerbreakengine.patch b/external/icu/khmerbreakengine.patch index ba3e392a27f3..bc0d287929b0 100644 --- a/external/icu/khmerbreakengine.patch +++ b/external/icu/khmerbreakengine.patch @@ -2,7 +2,7 @@ diff --git a/source/common/dictbe.cpp b/source/common/dictbe.cpp index f1c874d..3ad1b3f 100644 --- misc/icu/source/common/dictbe.cpp +++ build/icu/source/common/dictbe.cpp -@@ -27,8 +27,16 @@ U_NAMESPACE_BEGIN +@@ -27,8 +27,17 @@ U_NAMESPACE_BEGIN ****************************************************************** */ @@ -14,13 +14,14 @@ index f1c874d..3ad1b3f 100644 fTypes = breakTypes; + fViramaSet.applyPattern(UNICODE_STRING_SIMPLE("[[:ccc=VR:]]"), status); + ++ // note Skip Sets contain fIgnoreSet characters too. + fSkipStartSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=OP:][:lb=QU:]]\\u200C\\u200D\\u2060"), status); + fSkipEndSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CP:][:lb=QU:][:lb=EX:][:lb=CL:]]\\u200C\\u200D\\u2060"), status); + fNBeforeSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CR:][:lb=LF:][:lb=NL:][:lb=SP:][:lb=ZW:][:lb=IS:][:lb=BA:][:lb=NS:]]"), status); } DictionaryBreakEngine::~DictionaryBreakEngine() { -@@ -90,7 +98,7 @@ DictionaryBreakEngine::findBreaks( UText *text, +@@ -90,7 +99,7 @@ DictionaryBreakEngine::findBreaks( UText *text, result = divideUpDictionaryRange(text, rangeStart, rangeEnd, foundBreaks); utext_setNativeIndex(text, current); } @@ -29,7 +30,7 @@ index f1c874d..3ad1b3f 100644 return result; } -@@ -101,6 +109,163 @@ DictionaryBreakEngine::setCharacters( const UnicodeSet &set ) { +@@ -101,6 +110,169 @@ DictionaryBreakEngine::setCharacters( const UnicodeSet &set ) { fSet.compact(); } @@ -87,6 +88,8 @@ index f1c874d..3ad1b3f 100644 + } + for (int i = 0; i < clusterLimit; ++i) { // scan backwards clusterLimit clusters + while (start > textStart) { ++ while (fIgnoreSet.contains(c)) ++ c = utext_previous32(text); + if (!fMarkSet.contains(c)) { + if (fBaseSet.contains(c)) { + c = utext_previous32(text); @@ -125,6 +128,10 @@ index f1c874d..3ad1b3f 100644 + ++end; + } + for (int i = 0; i < clusterLimit; ++i) { // scan forwards clusterLimit clusters ++ while (fIgnoreSet.contains(c)) { ++ utext_next32(text); ++ c = utext_current32(text); ++ } + if (fBaseSet.contains(c)) { + while (end < textEnd) { + utext_next32(text); @@ -193,7 +200,7 @@ index f1c874d..3ad1b3f 100644 /* ****************************************************************** * PossibleWord -@@ -128,35 +293,35 @@ private: +@@ -128,35 +302,35 @@ private: public: PossibleWord() : count(0), prefix(0), offset(-1), mark(0), current(0) {}; ~PossibleWord() {}; @@ -238,242 +245,7 @@ index f1c874d..3ad1b3f 100644 // Dictionary leaves text after longest prefix, not longest word. Back up. if (count <= 0) { utext_setNativeIndex(text, start); -@@ -261,16 +426,16 @@ ThaiBreakEngine::divideUpDictionaryRange( UText *text, - int32_t current; - UErrorCode status = U_ZERO_ERROR; - PossibleWord words[THAI_LOOKAHEAD]; -- -+ - utext_setNativeIndex(text, rangeStart); -- -+ - while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { - cpWordLength = 0; - cuWordLength = 0; - - // Look for candidate words at the current position - int32_t candidates = words[wordsFound%THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); -- -+ - // If we found exactly one, use that - if (candidates == 1) { - cuWordLength = words[wordsFound % THAI_LOOKAHEAD].acceptMarked(text); -@@ -291,12 +456,12 @@ ThaiBreakEngine::divideUpDictionaryRange( UText *text, - words[wordsFound%THAI_LOOKAHEAD].markCurrent(); - wordsMatched = 2; - } -- -+ - // If we're already at the end of the range, we're done - if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { - goto foundBest; - } -- -+ - // See if any of the possible second words is followed by a third word - do { - // If we find a third word, stop right away -@@ -315,13 +480,13 @@ foundBest: - cpWordLength = words[wordsFound % THAI_LOOKAHEAD].markedCPLength(); - wordsFound += 1; - } -- -+ - // We come here after having either found a word or not. We look ahead to the - // next word. If it's not a dictionary word, we will combine it with the word we - // just found (if there is one), but only if the preceding word does not exceed - // the threshold. - // The text iterator should now be positioned at the end of the word we found. -- -+ - UChar32 uc = 0; - if ((int32_t)utext_getNativeIndex(text) < rangeEnd && cpWordLength < THAI_ROOT_COMBINE_THRESHOLD) { - // if it is a dictionary word, do nothing. If it isn't, then if there is -@@ -357,12 +522,12 @@ foundBest: - } - } - } -- -+ - // Bump the word count if there wasn't already one - if (cuWordLength <= 0) { - wordsFound += 1; - } -- -+ - // Update the length with the passed-over characters - cuWordLength += chars; - } -@@ -371,14 +536,14 @@ foundBest: - utext_setNativeIndex(text, current+cuWordLength); - } - } -- -+ - // Never stop before a combining mark. - int32_t currPos; - while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { - utext_next32(text); - cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; - } -- -+ - // Look ahead for possible suffixes if a dictionary word does not follow. - // We do this in code rather than using a rule so that the heuristic - // resynch continues to function. For example, one of the suffix characters -@@ -496,16 +661,16 @@ LaoBreakEngine::divideUpDictionaryRange( UText *text, - int32_t current; - UErrorCode status = U_ZERO_ERROR; - PossibleWord words[LAO_LOOKAHEAD]; -- -+ - utext_setNativeIndex(text, rangeStart); -- -+ - while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { - cuWordLength = 0; - cpWordLength = 0; - - // Look for candidate words at the current position - int32_t candidates = words[wordsFound%LAO_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); -- -+ - // If we found exactly one, use that - if (candidates == 1) { - cuWordLength = words[wordsFound % LAO_LOOKAHEAD].acceptMarked(text); -@@ -526,12 +691,12 @@ LaoBreakEngine::divideUpDictionaryRange( UText *text, - words[wordsFound%LAO_LOOKAHEAD].markCurrent(); - wordsMatched = 2; - } -- -+ - // If we're already at the end of the range, we're done - if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { - goto foundBest; - } -- -+ - // See if any of the possible second words is followed by a third word - do { - // If we find a third word, stop right away -@@ -549,7 +714,7 @@ foundBest: - cpWordLength = words[wordsFound % LAO_LOOKAHEAD].markedCPLength(); - wordsFound += 1; - } -- -+ - // We come here after having either found a word or not. We look ahead to the - // next word. If it's not a dictionary word, we will combine it withe the word we - // just found (if there is one), but only if the preceding word does not exceed -@@ -587,12 +752,12 @@ foundBest: - } - } - } -- -+ - // Bump the word count if there wasn't already one - if (cuWordLength <= 0) { - wordsFound += 1; - } -- -+ - // Update the length with the passed-over characters - cuWordLength += chars; - } -@@ -601,14 +766,14 @@ foundBest: - utext_setNativeIndex(text, current + cuWordLength); - } - } -- -+ - // Never stop before a combining mark. - int32_t currPos; - while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { - utext_next32(text); - cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; - } -- -+ - // Look ahead for possible suffixes if a dictionary word does not follow. - // We do this in code rather than using a rule so that the heuristic - // resynch continues to function. For example, one of the suffix characters -@@ -689,16 +854,16 @@ BurmeseBreakEngine::divideUpDictionaryRange( UText *text, - int32_t current; - UErrorCode status = U_ZERO_ERROR; - PossibleWord words[BURMESE_LOOKAHEAD]; -- -+ - utext_setNativeIndex(text, rangeStart); -- -+ - while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { - cuWordLength = 0; - cpWordLength = 0; - - // Look for candidate words at the current position - int32_t candidates = words[wordsFound%BURMESE_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); -- -+ - // If we found exactly one, use that - if (candidates == 1) { - cuWordLength = words[wordsFound % BURMESE_LOOKAHEAD].acceptMarked(text); -@@ -719,12 +884,12 @@ BurmeseBreakEngine::divideUpDictionaryRange( UText *text, - words[wordsFound%BURMESE_LOOKAHEAD].markCurrent(); - wordsMatched = 2; - } -- -+ - // If we're already at the end of the range, we're done - if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { - goto foundBest; - } -- -+ - // See if any of the possible second words is followed by a third word - do { - // If we find a third word, stop right away -@@ -742,7 +907,7 @@ foundBest: - cpWordLength = words[wordsFound % BURMESE_LOOKAHEAD].markedCPLength(); - wordsFound += 1; - } -- -+ - // We come here after having either found a word or not. We look ahead to the - // next word. If it's not a dictionary word, we will combine it withe the word we - // just found (if there is one), but only if the preceding word does not exceed -@@ -780,12 +945,12 @@ foundBest: - } - } - } -- -+ - // Bump the word count if there wasn't already one - if (cuWordLength <= 0) { - wordsFound += 1; - } -- -+ - // Update the length with the passed-over characters - cuWordLength += chars; - } -@@ -794,14 +959,14 @@ foundBest: - utext_setNativeIndex(text, current + cuWordLength); - } - } -- -+ - // Never stop before a combining mark. - int32_t currPos; - while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { - utext_next32(text); - cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; - } -- -+ - // Look ahead for possible suffixes if a dictionary word does not follow. - // We do this in code rather than using a rule so that the heuristic - // resynch continues to function. For example, one of the suffix characters -@@ -828,51 +993,28 @@ foundBest: +@@ -828,51 +1002,28 @@ foundBest: * KhmerBreakEngine */ @@ -536,7 +308,7 @@ index f1c874d..3ad1b3f 100644 } KhmerBreakEngine::~KhmerBreakEngine() { -@@ -884,180 +1027,204 @@ KhmerBreakEngine::divideUpDictionaryRange( UText *text, +@@ -884,180 +1036,204 @@ KhmerBreakEngine::divideUpDictionaryRange( UText *text, int32_t rangeStart, int32_t rangeEnd, UStack &foundBreaks ) const { @@ -560,10 +332,10 @@ index f1c874d..3ad1b3f 100644 + startZwsp = scanBeforeStart(text, scanStart, breakStart); + } + utext_setNativeIndex(text, rangeStart); -+ scanFwdClusters(text, rangeEnd, initAfter); ++ scanFwdClusters(text, rangeStart, initAfter); + bool endZwsp = scanAfterEnd(text, utext_nativeLength(text), scanEnd, breakEnd); + utext_setNativeIndex(text, rangeEnd - 1); -+ scanBackClusters(text, rangeStart, finalBefore); ++ scanBackClusters(text, rangeEnd, finalBefore); + if (finalBefore < initAfter) { // the whole run is tented so no breaks + if (breakStart || fTypes < UBRK_LINE) + foundBreaks.push(rangeStart, status); @@ -715,7 +487,7 @@ index f1c874d..3ad1b3f 100644 + if (count == 0) { + utext_setNativeIndex(text, ix); + int32_t c = utext_current32(text); -+ if (fPuncSet.contains(c) || c == ZWSP || c == WJ) { ++ if (fPuncSet.contains(c) || fIgnoreSet.contains(c) || c == ZWSP) { + values.setElementAt(0, count); + lengths.setElementAt(1, count++); + } else if (fBaseSet.contains(c)) { @@ -767,7 +539,7 @@ index f1c874d..3ad1b3f 100644 + int32_t ln = lengths.elementAti(j); + utext_setNativeIndex(text, ln+ix); + int32_t c = utext_current32(text); -+ while (fPuncSet.contains(c)) { ++ while (fPuncSet.contains(c) || fIgnoreSet.contains(c)) { + ++ln; + utext_next32(text); + c = utext_current32(text); @@ -887,71 +659,6 @@ index f1c874d..3ad1b3f 100644 } #if !UCONFIG_NO_NORMALIZATION -@@ -1121,7 +1288,7 @@ static inline int32_t utext_i32_flag(int32_t bitIndex) { - return (int32_t)1 << bitIndex; - } - -- -+ - /* - * @param text A UText representing the text - * @param rangeStart The start of the range of dictionary characters -@@ -1129,7 +1296,7 @@ static inline int32_t utext_i32_flag(int32_t bitIndex) { - * @param foundBreaks Output of C array of int32_t break positions, or 0 - * @return The number of breaks found - */ --int32_t -+int32_t - CjkBreakEngine::divideUpDictionaryRange( UText *inText, - int32_t rangeStart, - int32_t rangeEnd, -@@ -1192,7 +1359,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, - if (U_FAILURE(status)) { - return 0; - } -- -+ - UnicodeString fragment; - UnicodeString normalizedFragment; - for (int32_t srcI = 0; srcI < inString.length();) { // Once per normalization chunk -@@ -1261,7 +1428,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, - } - } - } -- -+ - // bestSnlp[i] is the snlp of the best segmentation of the first i - // code points in the range to be matched. - UVector32 bestSnlp(numCodePts + 1, status); -@@ -1271,7 +1438,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, - } - - -- // prev[i] is the index of the last CJK code point in the previous word in -+ // prev[i] is the index of the last CJK code point in the previous word in - // the best segmentation of the first i characters. - UVector32 prev(numCodePts + 1, status); - for(int32_t i = 0; i <= numCodePts; i++){ -@@ -1305,8 +1472,8 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, - // Note: lengths is filled with code point lengths - // The NULL parameter is the ignored code unit lengths. - -- // if there are no single character matches found in the dictionary -- // starting with this charcter, treat character as a 1-character word -+ // if there are no single character matches found in the dictionary -+ // starting with this charcter, treat character as a 1-character word - // with the highest value possible, i.e. the least likely to occur. - // Exclude Korean characters from this treatment, as they should be left - // together by default. -@@ -1380,7 +1547,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, - numBreaks++; - } - -- // Now that we're done, convert positions in t_boundary[] (indices in -+ // Now that we're done, convert positions in t_boundary[] (indices in - // the normalized input string) back to indices in the original input UText - // while reversing t_boundary and pushing values to foundBreaks. - for (int32_t i = numBreaks-1; i >= 0; i--) { diff --git a/source/common/dictbe.h b/source/common/dictbe.h index d3488cd..26caa75 100644 --- misc/icu/source/common/dictbe.h diff --git a/external/icu/khmerdict.dict b/external/icu/khmerdict.dict index c935cd088659..52605b65469d 100644 Binary files a/external/icu/khmerdict.dict and b/external/icu/khmerdict.dict differ commit 3ad820385901e9bcd976d1c90f74e77bc17a7cc8 Author: Martin Hosken <[email protected]> Date: Fri Jan 8 16:41:52 2016 +0700 Fix applying external dict to icu, and khmer break engine fixes Change-Id: Ib897e5fa5e80f75f501694dbf874aabd92253b25 Reviewed-on: https://gerrit.libreoffice.org/21247 Tested-by: Jenkins <[email protected]> Reviewed-by: Martin Hosken <[email protected]> (cherry picked from commit 39b718dd655220110523b7013e65ea4f821aedf7) (cherry picked from commit 15b4bad58196d19239d1dff615fa61fe7f15a07f) diff --git a/external/icu/khmerbreakengine.patch b/external/icu/khmerbreakengine.patch index 03e6079b19f0..ba3e392a27f3 100644 --- a/external/icu/khmerbreakengine.patch +++ b/external/icu/khmerbreakengine.patch @@ -14,8 +14,8 @@ index f1c874d..3ad1b3f 100644 fTypes = breakTypes; + fViramaSet.applyPattern(UNICODE_STRING_SIMPLE("[[:ccc=VR:]]"), status); + -+ fSkipStartSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=OP:][:lb=QU:]]"), status); -+ fSkipEndSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CP:][:lb=QU:][:lb=EX:][:lb=CL:]]"), status); ++ fSkipStartSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=OP:][:lb=QU:]]\\u200C\\u200D\\u2060"), status); ++ fSkipEndSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CP:][:lb=QU:][:lb=EX:][:lb=CL:]]\\u200C\\u200D\\u2060"), status); + fNBeforeSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CR:][:lb=LF:][:lb=NL:][:lb=SP:][:lb=ZW:][:lb=IS:][:lb=BA:][:lb=NS:]]"), status); } @@ -473,7 +473,7 @@ index f1c874d..3ad1b3f 100644 // Look ahead for possible suffixes if a dictionary word does not follow. // We do this in code rather than using a rule so that the heuristic // resynch continues to function. For example, one of the suffix characters -@@ -828,51 +993,29 @@ foundBest: +@@ -828,51 +993,28 @@ foundBest: * KhmerBreakEngine */ @@ -506,7 +506,7 @@ index f1c874d..3ad1b3f 100644 setCharacters(fKhmerWordSet); } fMarkSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]&[:M:]]"), status); - fMarkSet.add(0x0020); +- fMarkSet.add(0x0020); - fEndWordSet = fKhmerWordSet; - fBeginWordSet.add(0x1780, 0x17B3); - //fBeginWordSet.add(0x17A3, 0x17A4); // deprecated vowels @@ -522,7 +522,7 @@ index f1c874d..3ad1b3f 100644 -// fSuffixSet.add(THAI_MAIYAMOK); + fIgnoreSet.add(0x2060); // WJ + fIgnoreSet.add(0x200C, 0x200D); // ZWJ, ZWNJ -+ fBaseSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:^M:]]"), status); ++ fBaseSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:lb=SA:]&[:^M:]]"), status); + fPuncSet.applyPattern(UNICODE_STRING_SIMPLE("[\\u17D4\\u17D5\\u17D6\\u17D7\\u17D9:]"), status); // Compact for caching. @@ -750,7 +750,7 @@ index f1c874d..3ad1b3f 100644 - if (cuWordLength <= 0) { - wordsFound += 1; - } -+ } while (fMarkSet.contains(c)); ++ } while (fMarkSet.contains(c) || fIgnoreSet.contains(c)); + values.setElementAt(BADSNLP, count); + lengths.setElementAt(utext_getNativeIndex(text) - currix, count++); + } else { @@ -775,7 +775,7 @@ index f1c874d..3ad1b3f 100644 - else { - // Back up to where we were for next iteration - utext_setNativeIndex(text, current+cuWordLength); -+ int32_t ln_j_i = ln + i; ++ int32_t ln_j_i = ln + i; // yes really i! + if (newSnlp < bestSnlp.elementAti(ln_j_i)) { + if (v == BADSNLP) { + int32_t p = prev.elementAti(i); @@ -1395,7 +1395,7 @@ index 816c82d..c637d70 100644 +#$(MAINBUILDDIR)/khmerdict.stamp: $(TOOLBINDIR)/gendict$(TOOLEXEEXT) $(BRKSRCDIR)/khmerdict.txt build-local +# $(INVOKE) $(TOOLBINDIR)/gendict --bytes --transform offset-0x1780 -c -i $(BUILDDIR) $(BRKSRCDIR)/khmerdict.txt $(BRKBLDDIR)/khmerdict.dict +$(MAINBUILDDIR)/khmerdict.stamp: $(BRKSRCDIR)/khmerdict.dict build-local -+ cp $< $(MAINBUILDDIR) ++ cp $< $(BRKBLDDIR) + echo "timestamp" > $@ #################################################### CFU commit 526bb0b07133fcf779149947dcfa2850a27c0b25 Author: Martin Hosken <[email protected]> Date: Sat Dec 12 11:36:53 2015 +0700 Use .dict files since below the 500K limit Reviewed-on: https://gerrit.libreoffice.org/20748 Tested-by: Jenkins <[email protected]> Reviewed-by: Martin Hosken <[email protected]> (cherry picked from commit fbb00383d82da5ce375f1b034d3fb9ebdd9a8f0e) (cherry picked from commit 85d5174a862c78561c4cf85aa7c6ef2ba99d5352) Change-Id: Iec71ad4918cd333f0a44d372017ecee300e3aca9 diff --git a/external/icu/UnpackedTarball_icu.mk b/external/icu/UnpackedTarball_icu.mk index c48d02556d2d..4a6a11477af3 100644 --- a/external/icu/UnpackedTarball_icu.mk +++ b/external/icu/UnpackedTarball_icu.mk @@ -27,6 +27,9 @@ $(eval $(call gb_UnpackedTarball_add_patches,icu,\ external/icu/clang-cl.patch.0 \ $(if $(filter-out ANDROID,$(OS)),external/icu/icu4c-icudata-stdlibs.diff) \ $(if $(filter EMSCRIPTEN,$(OS)),external/icu/icu4c-emscripten.patch.1) \ + external/icu/khmerbreakengine.patch \ )) +$(eval $(call gb_UnpackedTarball_add_file,icu,source/data/brkitr/khmerdict.dict,external/icu/khmerdict.dict)) + # vim: set noet sw=4 ts=4: diff --git a/external/icu/khmerbreakengine.patch b/external/icu/khmerbreakengine.patch new file mode 100644 index 000000000000..03e6079b19f0 --- /dev/null +++ b/external/icu/khmerbreakengine.patch @@ -0,0 +1,1403 @@ +diff --git a/source/common/dictbe.cpp b/source/common/dictbe.cpp +index f1c874d..3ad1b3f 100644 +--- misc/icu/source/common/dictbe.cpp ++++ build/icu/source/common/dictbe.cpp +@@ -27,8 +27,16 @@ U_NAMESPACE_BEGIN + ****************************************************************** + */ + +-DictionaryBreakEngine::DictionaryBreakEngine(uint32_t breakTypes) { ++DictionaryBreakEngine::DictionaryBreakEngine(uint32_t breakTypes) : ++ clusterLimit(3) ++{ ++ UErrorCode status = U_ZERO_ERROR; + fTypes = breakTypes; ++ fViramaSet.applyPattern(UNICODE_STRING_SIMPLE("[[:ccc=VR:]]"), status); ++ ++ fSkipStartSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=OP:][:lb=QU:]]"), status); ++ fSkipEndSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CP:][:lb=QU:][:lb=EX:][:lb=CL:]]"), status); ++ fNBeforeSet.applyPattern(UNICODE_STRING_SIMPLE("[[:lb=CR:][:lb=LF:][:lb=NL:][:lb=SP:][:lb=ZW:][:lb=IS:][:lb=BA:][:lb=NS:]]"), status); + } + + DictionaryBreakEngine::~DictionaryBreakEngine() { +@@ -90,7 +98,7 @@ DictionaryBreakEngine::findBreaks( UText *text, + result = divideUpDictionaryRange(text, rangeStart, rangeEnd, foundBreaks); + utext_setNativeIndex(text, current); + } +- ++ + return result; + } + +@@ -101,6 +109,163 @@ DictionaryBreakEngine::setCharacters( const UnicodeSet &set ) { + fSet.compact(); + } + ++bool ++DictionaryBreakEngine::scanBeforeStart(UText *text, int32_t& start, bool &doBreak) const { ++ UErrorCode status = U_ZERO_ERROR; ++ UText* ut = utext_clone(NULL, text, false, true, &status); ++ utext_setNativeIndex(ut, start); ++ UChar32 c = utext_current32(ut); ++ bool res = false; ++ doBreak = true; ++ while (start >= 0) { ++ if (!fSkipStartSet.contains(c)) { ++ res = (c == ZWSP); ++ break; ++ } ++ --start; ++ c = utext_previous32(ut); ++ doBreak = false; ++ } ++ utext_close(ut); ++ return res; ++} ++ ++bool ++DictionaryBreakEngine::scanAfterEnd(UText *text, int32_t textEnd, int32_t& end, bool &doBreak) const { ++ UErrorCode status = U_ZERO_ERROR; ++ UText* ut = utext_clone(NULL, text, false, true, &status); ++ utext_setNativeIndex(ut, end); ++ UChar32 c = utext_current32(ut); ++ bool res = false; ++ doBreak = !fNBeforeSet.contains(c); ++ while (end < textEnd) { ++ if (!fSkipEndSet.contains(c)) { ++ res = (c == ZWSP); ++ break; ++ } ++ ++end; ++ c = utext_next32(ut); ++ doBreak = false; ++ } ++ utext_close(ut); ++ return res; ++} ++ ++void ++DictionaryBreakEngine::scanBackClusters(UText *text, int32_t textStart, int32_t& start) const { ++ UChar32 c = 0; ++ start = utext_getNativeIndex(text); ++ while (start > textStart) { ++ c = utext_previous32(text); ++ --start; ++ if (!fSkipEndSet.contains(c)) ++ break; ++ } ++ for (int i = 0; i < clusterLimit; ++i) { // scan backwards clusterLimit clusters ++ while (start > textStart) { ++ if (!fMarkSet.contains(c)) { ++ if (fBaseSet.contains(c)) { ++ c = utext_previous32(text); ++ if (!fViramaSet.contains(c)) { // Virama (e.g. coeng) preceding base. Treat sequence as a mark ++ utext_next32(text); ++ c = utext_current32(text); ++ break; ++ } else { ++ --start; ++ } ++ } else { ++ break; ++ } ++ } ++ c = utext_previous32(text); ++ --start; ++ } ++ if (!fBaseSet.contains(c) || start < textStart) { // not a cluster start so finish ++ break; ++ } ++ c = utext_previous32(text); ++ --start; // go round again ++ } // ignore hitting previous inhibitor since scanning for it should have found us! ++ ++start; // counteract --before ++} ++ ++void ++DictionaryBreakEngine::scanFwdClusters(UText *text, int32_t textEnd, int32_t& end) const { ++ UChar32 c = utext_current32(text); ++ end = utext_getNativeIndex(text); ++ while (end < textEnd) { ++ if (!fSkipStartSet.contains(c)) ++ break; ++ utext_next32(text); ++ c = utext_current32(text); ++ ++end; ++ } ++ for (int i = 0; i < clusterLimit; ++i) { // scan forwards clusterLimit clusters ++ if (fBaseSet.contains(c)) { ++ while (end < textEnd) { ++ utext_next32(text); ++ c = utext_current32(text); ++ ++end; ++ if (!fMarkSet.contains(c)) ++ break; ++ else if (fViramaSet.contains(c)) { // handle coeng + base as mark ++ utext_next32(text); ++ c = utext_current32(text); ++ ++end; ++ if (!fBaseSet.contains(c)) ++ break; ++ } ++ } ++ } else { ++ --end; // bad char so break after char before it ++ break; ++ } ++ } ++} ++ ++bool ++DictionaryBreakEngine::scanWJ(UText *text, int32_t &start, int32_t end, int32_t &before, int32_t &after) const { ++ UErrorCode status = U_ZERO_ERROR; ++ UText* ut = utext_clone(NULL, text, false, true, &status); ++ int32_t nat = start; ++ utext_setNativeIndex(ut, nat); ++ bool foundFirst = true; ++ int32_t curr = start; ++ while (nat < end) { ++ UChar32 c = utext_current32(ut); ++ if (c == ZWSP || c == WJ) { ++ curr = nat + 1; ++ if (foundFirst) // only scan backwards for first inhibitor ++ scanBackClusters(ut, start, before); ++ foundFirst = false; // don't scan backwards if we go around again. Also marks found something ++ ++ utext_next32(ut); ++ scanFwdClusters(ut, end, after); ++ nat = after + 1; ++ ++ if (c == ZWSP || c == WJ) { // did we hit another one? ++ continue; ++ } else { ++ break; ++ } ++ } ++ ++ ++nat; // keep hunting ++ utext_next32(ut); ++ } ++ ++ utext_close(ut); ++ ++ if (nat >= end && foundFirst) { ++ start = before = after = nat; ++ return false; // failed to find anything ++ } ++ else { ++ start = curr; ++ } ++ return true; // yup hit one ++} ++ + /* + ****************************************************************** + * PossibleWord +@@ -128,35 +293,35 @@ private: + public: + PossibleWord() : count(0), prefix(0), offset(-1), mark(0), current(0) {}; + ~PossibleWord() {}; +- ++ + // Fill the list of candidates if needed, select the longest, and return the number found +- int32_t candidates( UText *text, DictionaryMatcher *dict, int32_t rangeEnd ); +- ++ int32_t candidates( UText *text, DictionaryMatcher *dict, int32_t rangeEnd, UnicodeSet const *ignoreSet = NULL, int32_t minLength = 0 ); ++ + // Select the currently marked candidate, point after it in the text, and invalidate self + int32_t acceptMarked( UText *text ); +- ++ + // Back up from the current candidate to the next shorter one; return TRUE if that exists + // and point the text after it + UBool backUp( UText *text ); +- ++ + // Return the longest prefix this candidate location shares with a dictionary word + // Return value is in code points. + int32_t longestPrefix() { return prefix; }; +- ++ + // Mark the current candidate as the one we like + void markCurrent() { mark = current; }; +- ++ + // Get length in code points of the marked word. + int32_t markedCPLength() { return cpLengths[mark]; }; + }; + + +-int32_t PossibleWord::candidates( UText *text, DictionaryMatcher *dict, int32_t rangeEnd ) { ++int32_t PossibleWord::candidates( UText *text, DictionaryMatcher *dict, int32_t rangeEnd, UnicodeSet const *ignoreSet, int32_t minLength) { + // TODO: If getIndex is too slow, use offset < 0 and add discardAll() + int32_t start = (int32_t)utext_getNativeIndex(text); + if (start != offset) { + offset = start; +- count = dict->matches(text, rangeEnd-start, UPRV_LENGTHOF(cuLengths), cuLengths, cpLengths, NULL, &prefix); ++ count = dict->matches(text, rangeEnd-start, UPRV_LENGTHOF(cuLengths), cuLengths, cpLengths, NULL, &prefix, ignoreSet, minLength); + // Dictionary leaves text after longest prefix, not longest word. Back up. + if (count <= 0) { + utext_setNativeIndex(text, start); +@@ -261,16 +426,16 @@ ThaiBreakEngine::divideUpDictionaryRange( UText *text, + int32_t current; + UErrorCode status = U_ZERO_ERROR; + PossibleWord words[THAI_LOOKAHEAD]; +- ++ + utext_setNativeIndex(text, rangeStart); +- ++ + while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { + cpWordLength = 0; + cuWordLength = 0; + + // Look for candidate words at the current position + int32_t candidates = words[wordsFound%THAI_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); +- ++ + // If we found exactly one, use that + if (candidates == 1) { + cuWordLength = words[wordsFound % THAI_LOOKAHEAD].acceptMarked(text); +@@ -291,12 +456,12 @@ ThaiBreakEngine::divideUpDictionaryRange( UText *text, + words[wordsFound%THAI_LOOKAHEAD].markCurrent(); + wordsMatched = 2; + } +- ++ + // If we're already at the end of the range, we're done + if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { + goto foundBest; + } +- ++ + // See if any of the possible second words is followed by a third word + do { + // If we find a third word, stop right away +@@ -315,13 +480,13 @@ foundBest: + cpWordLength = words[wordsFound % THAI_LOOKAHEAD].markedCPLength(); + wordsFound += 1; + } +- ++ + // We come here after having either found a word or not. We look ahead to the + // next word. If it's not a dictionary word, we will combine it with the word we + // just found (if there is one), but only if the preceding word does not exceed + // the threshold. + // The text iterator should now be positioned at the end of the word we found. +- ++ + UChar32 uc = 0; + if ((int32_t)utext_getNativeIndex(text) < rangeEnd && cpWordLength < THAI_ROOT_COMBINE_THRESHOLD) { + // if it is a dictionary word, do nothing. If it isn't, then if there is +@@ -357,12 +522,12 @@ foundBest: + } + } + } +- ++ + // Bump the word count if there wasn't already one + if (cuWordLength <= 0) { + wordsFound += 1; + } +- ++ + // Update the length with the passed-over characters + cuWordLength += chars; + } +@@ -371,14 +536,14 @@ foundBest: + utext_setNativeIndex(text, current+cuWordLength); + } + } +- ++ + // Never stop before a combining mark. + int32_t currPos; + while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { + utext_next32(text); + cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; + } +- ++ + // Look ahead for possible suffixes if a dictionary word does not follow. + // We do this in code rather than using a rule so that the heuristic + // resynch continues to function. For example, one of the suffix characters +@@ -496,16 +661,16 @@ LaoBreakEngine::divideUpDictionaryRange( UText *text, + int32_t current; + UErrorCode status = U_ZERO_ERROR; + PossibleWord words[LAO_LOOKAHEAD]; +- ++ + utext_setNativeIndex(text, rangeStart); +- ++ + while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { + cuWordLength = 0; + cpWordLength = 0; + + // Look for candidate words at the current position + int32_t candidates = words[wordsFound%LAO_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); +- ++ + // If we found exactly one, use that + if (candidates == 1) { + cuWordLength = words[wordsFound % LAO_LOOKAHEAD].acceptMarked(text); +@@ -526,12 +691,12 @@ LaoBreakEngine::divideUpDictionaryRange( UText *text, + words[wordsFound%LAO_LOOKAHEAD].markCurrent(); + wordsMatched = 2; + } +- ++ + // If we're already at the end of the range, we're done + if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { + goto foundBest; + } +- ++ + // See if any of the possible second words is followed by a third word + do { + // If we find a third word, stop right away +@@ -549,7 +714,7 @@ foundBest: + cpWordLength = words[wordsFound % LAO_LOOKAHEAD].markedCPLength(); + wordsFound += 1; + } +- ++ + // We come here after having either found a word or not. We look ahead to the + // next word. If it's not a dictionary word, we will combine it withe the word we + // just found (if there is one), but only if the preceding word does not exceed +@@ -587,12 +752,12 @@ foundBest: + } + } + } +- ++ + // Bump the word count if there wasn't already one + if (cuWordLength <= 0) { + wordsFound += 1; + } +- ++ + // Update the length with the passed-over characters + cuWordLength += chars; + } +@@ -601,14 +766,14 @@ foundBest: + utext_setNativeIndex(text, current + cuWordLength); + } + } +- ++ + // Never stop before a combining mark. + int32_t currPos; + while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { + utext_next32(text); + cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; + } +- ++ + // Look ahead for possible suffixes if a dictionary word does not follow. + // We do this in code rather than using a rule so that the heuristic + // resynch continues to function. For example, one of the suffix characters +@@ -689,16 +854,16 @@ BurmeseBreakEngine::divideUpDictionaryRange( UText *text, + int32_t current; + UErrorCode status = U_ZERO_ERROR; + PossibleWord words[BURMESE_LOOKAHEAD]; +- ++ + utext_setNativeIndex(text, rangeStart); +- ++ + while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { + cuWordLength = 0; + cpWordLength = 0; + + // Look for candidate words at the current position + int32_t candidates = words[wordsFound%BURMESE_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); +- ++ + // If we found exactly one, use that + if (candidates == 1) { + cuWordLength = words[wordsFound % BURMESE_LOOKAHEAD].acceptMarked(text); +@@ -719,12 +884,12 @@ BurmeseBreakEngine::divideUpDictionaryRange( UText *text, + words[wordsFound%BURMESE_LOOKAHEAD].markCurrent(); + wordsMatched = 2; + } +- ++ + // If we're already at the end of the range, we're done + if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { + goto foundBest; + } +- ++ + // See if any of the possible second words is followed by a third word + do { + // If we find a third word, stop right away +@@ -742,7 +907,7 @@ foundBest: + cpWordLength = words[wordsFound % BURMESE_LOOKAHEAD].markedCPLength(); + wordsFound += 1; + } +- ++ + // We come here after having either found a word or not. We look ahead to the + // next word. If it's not a dictionary word, we will combine it withe the word we + // just found (if there is one), but only if the preceding word does not exceed +@@ -780,12 +945,12 @@ foundBest: + } + } + } +- ++ + // Bump the word count if there wasn't already one + if (cuWordLength <= 0) { + wordsFound += 1; + } +- ++ + // Update the length with the passed-over characters + cuWordLength += chars; + } +@@ -794,14 +959,14 @@ foundBest: + utext_setNativeIndex(text, current + cuWordLength); + } + } +- ++ + // Never stop before a combining mark. + int32_t currPos; + while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { + utext_next32(text); + cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; + } +- ++ + // Look ahead for possible suffixes if a dictionary word does not follow. + // We do this in code rather than using a rule so that the heuristic + // resynch continues to function. For example, one of the suffix characters +@@ -828,51 +993,29 @@ foundBest: + * KhmerBreakEngine + */ + +-// How many words in a row are "good enough"? +-static const int32_t KHMER_LOOKAHEAD = 3; +- +-// Will not combine a non-word with a preceding dictionary word longer than this +-static const int32_t KHMER_ROOT_COMBINE_THRESHOLD = 3; +- +-// Will not combine a non-word that shares at least this much prefix with a +-// dictionary word, with a preceding word +-static const int32_t KHMER_PREFIX_COMBINE_THRESHOLD = 3; +- +-// Minimum word size +-static const int32_t KHMER_MIN_WORD = 2; +- +-// Minimum number of characters for two words +-static const int32_t KHMER_MIN_WORD_SPAN = KHMER_MIN_WORD * 2; +- + KhmerBreakEngine::KhmerBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status) + : DictionaryBreakEngine((1 << UBRK_WORD) | (1 << UBRK_LINE)), + fDictionary(adoptDictionary) + { +- fKhmerWordSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]]"), status); ++ ++ clusterLimit = 3; ++ ++ fKhmerWordSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]\\u2060\\u200C\\u200D]"), status); + if (U_SUCCESS(status)) { + setCharacters(fKhmerWordSet); + } + fMarkSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:LineBreak=SA:]&[:M:]]"), status); + fMarkSet.add(0x0020); +- fEndWordSet = fKhmerWordSet; +- fBeginWordSet.add(0x1780, 0x17B3); +- //fBeginWordSet.add(0x17A3, 0x17A4); // deprecated vowels +- //fEndWordSet.remove(0x17A5, 0x17A9); // Khmer independent vowels that can't end a word +- //fEndWordSet.remove(0x17B2); // Khmer independent vowel that can't end a word +- fEndWordSet.remove(0x17D2); // KHMER SIGN COENG that combines some following characters +- //fEndWordSet.remove(0x17B6, 0x17C5); // Remove dependent vowels +-// fEndWordSet.remove(0x0E31); // MAI HAN-AKAT +-// fEndWordSet.remove(0x0E40, 0x0E44); // SARA E through SARA AI MAIMALAI +-// fBeginWordSet.add(0x0E01, 0x0E2E); // KO KAI through HO NOKHUK +-// fBeginWordSet.add(0x0E40, 0x0E44); // SARA E through SARA AI MAIMALAI +-// fSuffixSet.add(THAI_PAIYANNOI); +-// fSuffixSet.add(THAI_MAIYAMOK); ++ fIgnoreSet.add(0x2060); // WJ ++ fIgnoreSet.add(0x200C, 0x200D); // ZWJ, ZWNJ ++ fBaseSet.applyPattern(UNICODE_STRING_SIMPLE("[[:Khmr:]&[:^M:]]"), status); ++ fPuncSet.applyPattern(UNICODE_STRING_SIMPLE("[\\u17D4\\u17D5\\u17D6\\u17D7\\u17D9:]"), status); + + // Compact for caching. + fMarkSet.compact(); +- fEndWordSet.compact(); +- fBeginWordSet.compact(); +-// fSuffixSet.compact(); ++ fIgnoreSet.compact(); ++ fBaseSet.compact(); ++ fPuncSet.compact(); + } + + KhmerBreakEngine::~KhmerBreakEngine() { +@@ -884,180 +1027,204 @@ KhmerBreakEngine::divideUpDictionaryRange( UText *text, + int32_t rangeStart, + int32_t rangeEnd, + UStack &foundBreaks ) const { +- if ((rangeEnd - rangeStart) < KHMER_MIN_WORD_SPAN) { +- return 0; // Not enough characters for two words ++ uint32_t wordsFound = foundBreaks.size(); ++ UErrorCode status = U_ZERO_ERROR; ++ int32_t before = 0; ++ int32_t after = 0; ++ int32_t finalBefore = 0; ++ int32_t initAfter = 0; ++ int32_t scanStart = rangeStart; ++ int32_t scanEnd = rangeEnd; ++ ++ bool startZwsp = false; ++ bool breakStart = false; ++ bool breakEnd = false; ++ ++ if (rangeStart > 0) { ++ --scanStart; ++ startZwsp = scanBeforeStart(text, scanStart, breakStart); ++ } ++ utext_setNativeIndex(text, rangeStart); ++ scanFwdClusters(text, rangeEnd, initAfter); ++ bool endZwsp = scanAfterEnd(text, utext_nativeLength(text), scanEnd, breakEnd); ++ utext_setNativeIndex(text, rangeEnd - 1); ++ scanBackClusters(text, rangeStart, finalBefore); ++ if (finalBefore < initAfter) { // the whole run is tented so no breaks ++ if (breakStart || fTypes < UBRK_LINE) ++ foundBreaks.push(rangeStart, status); ++ if (breakEnd || fTypes < UBRK_LINE) ++ foundBreaks.push(rangeEnd, status); ++ return foundBreaks.size() - wordsFound; + } + +- uint32_t wordsFound = 0; +- int32_t cpWordLength = 0; +- int32_t cuWordLength = 0; +- int32_t current; +- UErrorCode status = U_ZERO_ERROR; +- PossibleWord words[KHMER_LOOKAHEAD]; ++ scanStart = rangeStart; ++ scanWJ(text, scanStart, rangeEnd, before, after); ++ if (startZwsp || initAfter >= before) { ++ after = initAfter; ++ before = 0; ++ } ++ if (!endZwsp && after > finalBefore && after < rangeEnd) ++ endZwsp = true; ++ if (endZwsp && before > finalBefore) ++ before = finalBefore; + + utext_setNativeIndex(text, rangeStart); ++ int32_t numCodePts = rangeEnd - rangeStart; ++ // bestSnlp[i] is the snlp of the best segmentation of the first i ++ // code points in the range to be matched. ++ UVector32 bestSnlp(numCodePts + 1, status); ++ bestSnlp.addElement(0, status); ++ for(int32_t i = 1; i <= numCodePts; i++) { ++ bestSnlp.addElement(kuint32max, status); ++ } + +- while (U_SUCCESS(status) && (current = (int32_t)utext_getNativeIndex(text)) < rangeEnd) { +- cuWordLength = 0; +- cpWordLength = 0; ++ // prev[i] is the index of the last code point in the previous word in ++ // the best segmentation of the first i characters. Note negative implies ++ // that the code point is part of an unknown word. ++ UVector32 prev(numCodePts + 1, status); ++ for(int32_t i = 0; i <= numCodePts; i++) { ++ prev.addElement(kuint32max, status); ++ } + +- // Look for candidate words at the current position +- int32_t candidates = words[wordsFound%KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); ++ const int32_t maxWordSize = 20; ++ UVector32 values(maxWordSize, status); ++ values.setSize(maxWordSize); ++ UVector32 lengths(maxWordSize, status); ++ lengths.setSize(maxWordSize); + +- // If we found exactly one, use that +- if (candidates == 1) { +- cuWordLength = words[wordsFound % KHMER_LOOKAHEAD].acceptMarked(text); +- cpWordLength = words[wordsFound % KHMER_LOOKAHEAD].markedCPLength(); +- wordsFound += 1; +- } ++ // Dynamic programming to find the best segmentation. + +- // If there was more than one, see which one can take us forward the most words +- else if (candidates > 1) { +- // If we're already at the end of the range, we're done +- if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { +- goto foundBest; +- } +- do { +- int32_t wordsMatched = 1; +- if (words[(wordsFound + 1) % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) > 0) { +- if (wordsMatched < 2) { +- // Followed by another dictionary word; mark first word as a good candidate +- words[wordsFound % KHMER_LOOKAHEAD].markCurrent(); +- wordsMatched = 2; +- } ++ // In outer loop, i is the code point index, ++ // ix is the corresponding string (code unit) index. ++ // They differ when the string contains supplementary characters. ++ int32_t ix = rangeStart; ++ for (int32_t i = 0; i < numCodePts; ++i, utext_setNativeIndex(text, ++ix)) { ++ if ((uint32_t)bestSnlp.elementAti(i) == kuint32max) { ++ continue; ++ } + +- // If we're already at the end of the range, we're done +- if ((int32_t)utext_getNativeIndex(text) >= rangeEnd) { +- goto foundBest; +- } ++ int32_t count; ++ count = fDictionary->matches(text, numCodePts - i, maxWordSize, ++ NULL, lengths.getBuffer(), values.getBuffer(), NULL, &fIgnoreSet, 2); ++ // Note: lengths is filled with code point lengths ++ // The NULL parameter is the ignored code unit lengths. + +- // See if any of the possible second words is followed by a third word +- do { +- // If we find a third word, stop right away +- if (words[(wordsFound + 2) % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd)) { +- words[wordsFound % KHMER_LOOKAHEAD].markCurrent(); +- goto foundBest; +- } +- } +- while (words[(wordsFound + 1) % KHMER_LOOKAHEAD].backUp(text)); +- } ++ for (int32_t j = 0; j < count; j++) { ++ int32_t ln = lengths.elementAti(j); ++ if (ln + i >= numCodePts) ++ continue; ++ utext_setNativeIndex(text, ln+ix); ++ int32_t c = utext_current32(text); ++ if (fMarkSet.contains(c) || c == 0x17D2) { // Coeng ++ lengths.removeElementAt(j); ++ values.removeElementAt(j); ++ --j; ++ --count; + } +- while (words[wordsFound % KHMER_LOOKAHEAD].backUp(text)); +-foundBest: +- cuWordLength = words[wordsFound % KHMER_LOOKAHEAD].acceptMarked(text); +- cpWordLength = words[wordsFound % KHMER_LOOKAHEAD].markedCPLength(); +- wordsFound += 1; + } +- +- // We come here after having either found a word or not. We look ahead to the +- // next word. If it's not a dictionary word, we will combine it with the word we +- // just found (if there is one), but only if the preceding word does not exceed +- // the threshold. +- // The text iterator should now be positioned at the end of the word we found. +- if ((int32_t)utext_getNativeIndex(text) < rangeEnd && cpWordLength < KHMER_ROOT_COMBINE_THRESHOLD) { +- // if it is a dictionary word, do nothing. If it isn't, then if there is +- // no preceding word, or the non-word shares less than the minimum threshold +- // of characters with a dictionary word, then scan to resynchronize +- if (words[wordsFound % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0 +- && (cuWordLength == 0 +- || words[wordsFound % KHMER_LOOKAHEAD].longestPrefix() < KHMER_PREFIX_COMBINE_THRESHOLD)) { +- // Look for a plausible word boundary +- int32_t remaining = rangeEnd - (current+cuWordLength); +- UChar32 pc; +- UChar32 uc; +- int32_t chars = 0; +- for (;;) { +- int32_t pcIndex = utext_getNativeIndex(text); +- pc = utext_next32(text); +- int32_t pcSize = utext_getNativeIndex(text) - pcIndex; +- chars += pcSize; +- remaining -= pcSize; +- if (remaining <= 0) { ++ if (count == 0) { ++ utext_setNativeIndex(text, ix); ++ int32_t c = utext_current32(text); ++ if (fPuncSet.contains(c) || c == ZWSP || c == WJ) { ++ values.setElementAt(0, count); ++ lengths.setElementAt(1, count++); ++ } else if (fBaseSet.contains(c)) { ++ int32_t currix = utext_getNativeIndex(text); ++ do { ++ utext_next32(text); ++ c = utext_current32(text); ++ if (utext_getNativeIndex(text) >= rangeEnd) + break; +- } +- uc = utext_current32(text); +- if (fEndWordSet.contains(pc) && fBeginWordSet.contains(uc)) { +- // Maybe. See if it's in the dictionary. +- int32_t candidates = words[(wordsFound + 1) % KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd); +- utext_setNativeIndex(text, current+cuWordLength+chars); +- if (candidates > 0) { ++ if (c == 0x17D2) { // Coeng ++ utext_next32(text); ++ c = utext_current32(text); ++ if (!fBaseSet.contains(c) || utext_getNativeIndex(text) >= rangeEnd) { + break; ++ } else { ++ utext_next32(text); ++ c = utext_current32(text); ++ if (utext_getNativeIndex(text) >= rangeEnd) ++ break; + } + } +- } +- +- // Bump the word count if there wasn't already one +- if (cuWordLength <= 0) { +- wordsFound += 1; +- } ++ } while (fMarkSet.contains(c)); ++ values.setElementAt(BADSNLP, count); ++ lengths.setElementAt(utext_getNativeIndex(text) - currix, count++); ++ } else { ++ values.setElementAt(BADSNLP, count); ++ lengths.setElementAt(1, count++); ++ } ++ } + +- // Update the length with the passed-over characters +- cuWordLength += chars; ++ for (int32_t j = 0; j < count; j++) { ++ uint32_t v = values.elementAti(j); ++ int32_t newSnlp = bestSnlp.elementAti(i) + v; ++ int32_t ln = lengths.elementAti(j); ++ utext_setNativeIndex(text, ln+ix); ++ int32_t c = utext_current32(text); ++ while (fPuncSet.contains(c)) { ++ ++ln; ++ utext_next32(text); ++ c = utext_current32(text); + } +- else { +- // Back up to where we were for next iteration +- utext_setNativeIndex(text, current+cuWordLength); ++ int32_t ln_j_i = ln + i; ++ if (newSnlp < bestSnlp.elementAti(ln_j_i)) { ++ if (v == BADSNLP) { ++ int32_t p = prev.elementAti(i); ++ if (p < 0) ++ prev.setElementAt(p, ln_j_i); ++ else ++ prev.setElementAt(-i, ln_j_i); ++ } ++ else ++ prev.setElementAt(i, ln_j_i); ++ bestSnlp.setElementAt(newSnlp, ln_j_i); + } + } ++ } ++ // Start pushing the optimal offset index into t_boundary (t for tentative). ++ // prev[numCodePts] is guaranteed to be meaningful. ++ // We'll first push in the reverse order, i.e., ++ // t_boundary[0] = numCodePts, and afterwards do a swap. ++ UVector32 t_boundary(numCodePts+1, status); + +- // Never stop before a combining mark. +- int32_t currPos; +- while ((currPos = (int32_t)utext_getNativeIndex(text)) < rangeEnd && fMarkSet.contains(utext_current32(text))) { +- utext_next32(text); +- cuWordLength += (int32_t)utext_getNativeIndex(text) - currPos; ++ int32_t numBreaks = 0; ++ // No segmentation found, set boundary to end of range ++ while (numCodePts >= 0 && (uint32_t)bestSnlp.elementAti(numCodePts) == kuint32max) { ++ --numCodePts; ++ } ++ if (numCodePts < 0) { ++ t_boundary.addElement(numCodePts, status); ++ numBreaks++; ++ } else { ++ for (int32_t i = numCodePts; (uint32_t)i != kuint32max; i = prev.elementAti(i)) { ++ if (i < 0) i = -i; ++ t_boundary.addElement(i, status); ++ numBreaks++; + } ++ U_ASSERT(prev.elementAti(t_boundary.elementAti(numBreaks - 1)) == 0); ++ } + +- // Look ahead for possible suffixes if a dictionary word does not follow. +- // We do this in code rather than using a rule so that the heuristic +- // resynch continues to function. For example, one of the suffix characters +- // could be a typo in the middle of a word. +-// if ((int32_t)utext_getNativeIndex(text) < rangeEnd && wordLength > 0) { +-// if (words[wordsFound%KHMER_LOOKAHEAD].candidates(text, fDictionary, rangeEnd) <= 0 +-// && fSuffixSet.contains(uc = utext_current32(text))) { +-// if (uc == KHMER_PAIYANNOI) { +-// if (!fSuffixSet.contains(utext_previous32(text))) { +-// // Skip over previous end and PAIYANNOI +-// utext_next32(text); +-// utext_next32(text); +-// wordLength += 1; // Add PAIYANNOI to word +-// uc = utext_current32(text); // Fetch next character +-// } +-// else { +-// // Restore prior position +-// utext_next32(text); +-// } +-// } +-// if (uc == KHMER_MAIYAMOK) { +-// if (utext_previous32(text) != KHMER_MAIYAMOK) { +-// // Skip over previous end and MAIYAMOK +-// utext_next32(text); +-// utext_next32(text); +-// wordLength += 1; // Add MAIYAMOK to word +-// } +-// else { +-// // Restore prior position +-// utext_next32(text); +-// } +-// } +-// } +-// else { +-// utext_setNativeIndex(text, current+wordLength); +-// } +-// } +- +- // Did we find a word on this iteration? If so, push it on the break stack +- if (cuWordLength > 0) { +- foundBreaks.push((current+cuWordLength), status); ++ // Now that we're done, convert positions in t_boundary[] (indices in ++ // the normalized input string) back to indices in the original input UText ++ // while reversing t_boundary and pushing values to foundBreaks. ++ for (int32_t i = numBreaks-1; i >= 0; i--) { ++ int32_t cpPos = t_boundary.elementAti(i); ++ if (cpPos == 0 && !breakStart && fTypes >= UBRK_LINE) continue; ++ int32_t utextPos = cpPos + rangeStart; ++ while (utextPos > after && scanWJ(text, utextPos, scanEnd, before, after)); ++ if (utextPos < before) { ++ // Boundaries are added to foundBreaks output in ascending order. ++ U_ASSERT(foundBreaks.size() == 0 ||foundBreaks.peeki() < utextPos); ++ foundBreaks.push(utextPos, status); + } + } +- ++ + // Don't return a break for the end of the dictionary range if there is one there. +- if (foundBreaks.peeki() >= rangeEnd) { ++ if (!breakEnd && fTypes >= UBRK_LINE && foundBreaks.peeki() >= rangeEnd) { + (void) foundBreaks.popi(); +- wordsFound -= 1; + } +- +- return wordsFound; ++ return foundBreaks.size() - wordsFound; + } + + #if !UCONFIG_NO_NORMALIZATION +@@ -1121,7 +1288,7 @@ static inline int32_t utext_i32_flag(int32_t bitIndex) { + return (int32_t)1 << bitIndex; + } + +- ++ + /* + * @param text A UText representing the text + * @param rangeStart The start of the range of dictionary characters +@@ -1129,7 +1296,7 @@ static inline int32_t utext_i32_flag(int32_t bitIndex) { + * @param foundBreaks Output of C array of int32_t break positions, or 0 + * @return The number of breaks found + */ +-int32_t ++int32_t + CjkBreakEngine::divideUpDictionaryRange( UText *inText, + int32_t rangeStart, + int32_t rangeEnd, +@@ -1192,7 +1359,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, + if (U_FAILURE(status)) { + return 0; + } +- ++ + UnicodeString fragment; + UnicodeString normalizedFragment; + for (int32_t srcI = 0; srcI < inString.length();) { // Once per normalization chunk +@@ -1261,7 +1428,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, + } + } + } +- ++ + // bestSnlp[i] is the snlp of the best segmentation of the first i + // code points in the range to be matched. + UVector32 bestSnlp(numCodePts + 1, status); +@@ -1271,7 +1438,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, + } + + +- // prev[i] is the index of the last CJK code point in the previous word in ++ // prev[i] is the index of the last CJK code point in the previous word in + // the best segmentation of the first i characters. + UVector32 prev(numCodePts + 1, status); + for(int32_t i = 0; i <= numCodePts; i++){ +@@ -1305,8 +1472,8 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, + // Note: lengths is filled with code point lengths + // The NULL parameter is the ignored code unit lengths. + +- // if there are no single character matches found in the dictionary +- // starting with this charcter, treat character as a 1-character word ++ // if there are no single character matches found in the dictionary ++ // starting with this charcter, treat character as a 1-character word + // with the highest value possible, i.e. the least likely to occur. + // Exclude Korean characters from this treatment, as they should be left + // together by default. +@@ -1380,7 +1547,7 @@ CjkBreakEngine::divideUpDictionaryRange( UText *inText, + numBreaks++; + } + +- // Now that we're done, convert positions in t_boundary[] (indices in ++ // Now that we're done, convert positions in t_boundary[] (indices in + // the normalized input string) back to indices in the original input UText + // while reversing t_boundary and pushing values to foundBreaks. + for (int32_t i = numBreaks-1; i >= 0; i--) { +diff --git a/source/common/dictbe.h b/source/common/dictbe.h +index d3488cd..26caa75 100644 +--- misc/icu/source/common/dictbe.h ++++ build/icu/source/common/dictbe.h +@@ -32,6 +32,15 @@ class Normalizer2; + */ + class DictionaryBreakEngine : public LanguageBreakEngine { + private: ++ ++ /** ++ * <p>Default constructor.</p> ++ * ++ */ ++ DictionaryBreakEngine(); ++ ++ protected: ++ + /** + * The set of characters handled by this engine + * @internal +@@ -46,11 +55,63 @@ class DictionaryBreakEngine : public LanguageBreakEngine { + + uint32_t fTypes; + ++ const int32_t WJ = 0x2060; ++ const int32_t ZWSP = 0x200B; ++ + /** +- * <p>Default constructor.</p> +- * ++ * A Unicode set of all viramas ++ * @internal + */ +- DictionaryBreakEngine(); ++ UnicodeSet fViramaSet; ++ ++ /** ++ * A Unicode set of all base characters ++ * @internal ++ */ ++ UnicodeSet fBaseSet; ++ ++ /** ++ * A Unicode set of all marks ++ * @internal ++ */ ++ UnicodeSet fMarkSet; ++ ++ /** ++ * A Unicode set of all characters ignored ignored in dictionary matching ++ * @internal ++ */ ++ UnicodeSet fIgnoreSet; ++ ++ /** ++ * A Unicode set of all characters ignored ignored in dictionary matching ++ * @internal ++ */ ++ UnicodeSet fSkipStartSet; ++ ++ /** ++ * A Unicode set of all characters ignored ignored in dictionary matching ++ * @internal ++ */ ++ UnicodeSet fSkipEndSet; ++ ++ /** ++ * A Unicode set of all characters that should not be broken before ++ * @internal ++ */ ++ UnicodeSet fNBeforeSet; ++ ++ /** ++ * The number of clusters within which breaks are inhibited ++ * @internal ++ */ ++ int32_t clusterLimit; ++ ++ bool scanWJ(UText *text, int32_t &start, int32_t end, int32_t &before, int32_t &after) const; ++ ++ bool scanBeforeStart(UText *text, int32_t& start, bool &doBreak) const; ++ bool scanAfterEnd(UText *text, int32_t rangeEnd, int32_t& end, bool &doBreak) const; ++ void scanBackClusters(UText *text, int32_t textStart, int32_t& start) const; ++ void scanFwdClusters(UText *text, int32_t textEnd, int32_t& end) const; + + public: + +@@ -81,7 +142,7 @@ class DictionaryBreakEngine : public LanguageBreakEngine { + * <p>Find any breaks within a run in the supplied text.</p> + * + * @param text A UText representing the text. The iterator is left at +- * the end of the run of characters which the engine is capable of handling ++ * the end of the run of characters which the engine is capable of handling + * that starts from the first (or last) character in the range. + * @param startPos The start of the run within the supplied text. + * @param endPos The end of the run within the supplied text. +@@ -243,118 +304,120 @@ class LaoBreakEngine : public DictionaryBreakEngine { + + }; + +-/******************************************************************* +- * BurmeseBreakEngine +- */ +- +-/** +- * <p>BurmeseBreakEngine is a kind of DictionaryBreakEngine that uses a +- * DictionaryMatcher and heuristics to determine Burmese-specific breaks.</p> +- * +- * <p>After it is constructed a BurmeseBreakEngine may be shared between +- * threads without synchronization.</p> +- */ +-class BurmeseBreakEngine : public DictionaryBreakEngine { +- private: +- /** +- * The set of characters handled by this engine +- * @internal +- */ +- +- UnicodeSet fBurmeseWordSet; +- UnicodeSet fEndWordSet; +- UnicodeSet fBeginWordSet; +- UnicodeSet fMarkSet; +- DictionaryMatcher *fDictionary; +- +- public: +- +- /** +- * <p>Default constructor.</p> +- * +- * @param adoptDictionary A DictionaryMatcher to adopt. Deleted when the +- * engine is deleted. +- */ +- BurmeseBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status); +- +- /** +- * <p>Virtual destructor.</p> +- */ +- virtual ~BurmeseBreakEngine(); +- +- protected: +- /** +- * <p>Divide up a range of known dictionary characters.</p> +- * +- * @param text A UText representing the text +- * @param rangeStart The start of the range of dictionary characters +- * @param rangeEnd The end of the range of dictionary characters +- * @param foundBreaks Output of C array of int32_t break positions, or 0 +- * @return The number of breaks found +- */ +- virtual int32_t divideUpDictionaryRange( UText *text, +- int32_t rangeStart, +- int32_t rangeEnd, +- UStack &foundBreaks ) const; +- +-}; +- +-/******************************************************************* +- * KhmerBreakEngine +- */ +- +-/** +- * <p>KhmerBreakEngine is a kind of DictionaryBreakEngine that uses a +- * DictionaryMatcher and heuristics to determine Khmer-specific breaks.</p> +- * +- * <p>After it is constructed a KhmerBreakEngine may be shared between +- * threads without synchronization.</p> +- */ +-class KhmerBreakEngine : public DictionaryBreakEngine { +- private: +- /** +- * The set of characters handled by this engine +- * @internal +- */ +- +- UnicodeSet fKhmerWordSet; +- UnicodeSet fEndWordSet; +- UnicodeSet fBeginWordSet; +- UnicodeSet fMarkSet; +- DictionaryMatcher *fDictionary; +- +- public: +- +- /** +- * <p>Default constructor.</p> +- * +- * @param adoptDictionary A DictionaryMatcher to adopt. Deleted when the +- * engine is deleted. +- */ +- KhmerBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status); +- +- /** +- * <p>Virtual destructor.</p> +- */ +- virtual ~KhmerBreakEngine(); +- +- protected: +- /** +- * <p>Divide up a range of known dictionary characters.</p> +- * +- * @param text A UText representing the text +- * @param rangeStart The start of the range of dictionary characters +- * @param rangeEnd The end of the range of dictionary characters +- * @param foundBreaks Output of C array of int32_t break positions, or 0 +- * @return The number of breaks found +- */ +- virtual int32_t divideUpDictionaryRange( UText *text, +- int32_t rangeStart, +- int32_t rangeEnd, +- UStack &foundBreaks ) const; +- +-}; +- ++/******************************************************************* ++ * BurmeseBreakEngine ++ */ ++ ++/** ++ * <p>BurmeseBreakEngine is a kind of DictionaryBreakEngine that uses a ++ * DictionaryMatcher and heuristics to determine Burmese-specific breaks.</p> ++ * ++ * <p>After it is constructed a BurmeseBreakEngine may be shared between ++ * threads without synchronization.</p> ++ */ ++class BurmeseBreakEngine : public DictionaryBreakEngine { ++ private: ++ /** ++ * The set of characters handled by this engine ++ * @internal ++ */ ++ ++ UnicodeSet fBurmeseWordSet; ++ UnicodeSet fEndWordSet; ++ UnicodeSet fBeginWordSet; ++ UnicodeSet fMarkSet; ++ DictionaryMatcher *fDictionary; ++ ++ public: ++ ++ /** ++ * <p>Default constructor.</p> ++ * ++ * @param adoptDictionary A DictionaryMatcher to adopt. Deleted when the ++ * engine is deleted. ++ */ ++ BurmeseBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status); ++ ++ /** ++ * <p>Virtual destructor.</p> ++ */ ++ virtual ~BurmeseBreakEngine(); ++ ++ protected: ++ /** ++ * <p>Divide up a range of known dictionary characters.</p> ++ * ++ * @param text A UText representing the text ++ * @param rangeStart The start of the range of dictionary characters ++ * @param rangeEnd The end of the range of dictionary characters ++ * @param foundBreaks Output of C array of int32_t break positions, or 0 ++ * @return The number of breaks found ++ */ ++ virtual int32_t divideUpDictionaryRange( UText *text, ++ int32_t rangeStart, ++ int32_t rangeEnd, ++ UStack &foundBreaks ) const; ++ ++}; ++ ++/******************************************************************* ++ * KhmerBreakEngine ++ */ ++ ++/** ++ * <p>KhmerBreakEngine is a kind of DictionaryBreakEngine that uses a ++ * DictionaryMatcher and heuristics to determine Khmer-specific breaks.</p> ++ * ++ * <p>After it is constructed a KhmerBreakEngine may be shared between ++ * threads without synchronization.</p> ++ */ ++class KhmerBreakEngine : public DictionaryBreakEngine { ++ private: ++ /** ++ * The set of characters handled by this engine ++ * @internal ++ */ ++ ++ UnicodeSet fKhmerWordSet; ++ UnicodeSet fBeginWordSet; ++ UnicodeSet fPuncSet; ++ DictionaryMatcher *fDictionary; ++ ++ const uint32_t BADSNLP = 256 * 20; ++ const uint32_t kuint32max = 0x7FFFFFFF; ++ ++ public: ++ ++ /** ++ * <p>Default constructor.</p> ++ * ++ * @param adoptDictionary A DictionaryMatcher to adopt. Deleted when the ++ * engine is deleted. ++ */ ++ KhmerBreakEngine(DictionaryMatcher *adoptDictionary, UErrorCode &status); ++ ++ /** ... etc. - the rest is truncated _______________________________________________ Libreoffice-commits mailing list [email protected] https://lists.freedesktop.org/mailman/listinfo/libreoffice-commits
