This is an automated email from the ASF dual-hosted git repository. sergeykamov pushed a commit to branch NLPCRAFT-287 in repository https://gitbox.apache.org/repos/asf/incubator-nlpcraft.git
commit a271ab884f460672eb0a322d95bc506e3c95637f Author: Sergey Kamov <[email protected]> AuthorDate: Tue Apr 6 14:39:38 2021 +0300 WIP. --- .../org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala | 9 +++++---- .../probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala | 14 +++++--------- 2 files changed, 10 insertions(+), 13 deletions(-) diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala index 22b1b12..c0923ae 100644 --- a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala +++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala @@ -174,10 +174,11 @@ class NCNlpSentenceNote(private val values: Map[String, java.io.Serializable]) e values.toSeq.sortBy(t ⇒ { // Don't show internal ID. val typeSort = t._1 match { case "noteType" ⇒ 0 - case "wordIndexes" ⇒ 1 - case "direct" ⇒ 2 - case "sparsity" ⇒ 3 - case "parts" ⇒ 4 + case "origText" ⇒ 1 + case "wordIndexes" ⇒ 2 + case "direct" ⇒ 3 + case "sparsity" ⇒ 4 + case "parts" ⇒ 5 case _ ⇒ 100 } diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala index 30f5084..4464a1b 100644 --- a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala +++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala @@ -133,7 +133,7 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala { parts: Seq[TokType] ) extends Ordered[ElementMatch] { // Tokens sparsity. - lazy val sparsity = U.calcSparsity(tokens.map(_.index)) + lazy val sparsity: Int = U.calcSparsity(tokens.map(_.index)) // Number of tokens. lazy val length: Int = tokens.size @@ -297,10 +297,7 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala { } private def mkCache(): Cache = - mutable.HashMap.empty[ - String, - mutable.ArrayBuffer[Seq[Int]] - ].withDefault(_ ⇒ mutable.ArrayBuffer.empty[Seq[Int]]) + mutable.HashMap.empty[String, mutable.ArrayBuffer[Seq[Int]]].withDefault(_ ⇒ mutable.ArrayBuffer.empty[Seq[Int]]) private def toNlpTokens(tows: Seq[NCDslContent], ns: NCNlpSentence): Seq[NlpToken] = ( @@ -421,11 +418,11 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala { // Attempt to match each element. for ( elm ← mdl.elements.values; - elemId = elm.getId; + elemId = elm.getId + if !alreadyMarked(toks, elm.getId); sparseEnabled = !cacheSparse(elemId).exists(_.containsSlice(indexes)); notSparseEnabled = !cacheNotSparse(elemId).exists(_.containsSlice(indexes)) - - if !alreadyMarked(toks, elm.getId) && (sparseEnabled || notSparseEnabled) + if sparseEnabled || notSparseEnabled ) { var found = false @@ -527,7 +524,6 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala { val matchCnt = matchesNorm.size - // TODO:matchesNorm // Add notes for all remaining (non-intersecting) matches. for ((m, idx) ← matches.zipWithIndex) {
