This is an automated email from the ASF dual-hosted git repository.

aradzinski pushed a commit to branch scala-2.13
in repository https://gitbox.apache.org/repos/asf/incubator-nlpcraft.git


The following commit(s) were added to refs/heads/scala-2.13 by this push:
     new dc4936f  WIP.
dc4936f is described below

commit dc4936f1b0b75efcbcd02d6f1f704e2d9db52b45
Author: Aaron Radzinzski <[email protected]>
AuthorDate: Thu May 20 11:11:45 2021 -0700

    WIP.
---
 .../apache/nlpcraft/common/debug/NCLogHolder.scala |  2 +-
 .../nlpcraft/common/nlp/NCNlpSentenceNote.scala    | 10 +++---
 .../org/apache/nlpcraft/common/util/NCUtils.scala  |  2 +-
 .../apache/nlpcraft/model/impl/NCTokenLogger.scala |  1 -
 .../nlpcraft/probe/mgrs/NCProbeVariants.scala      | 38 +++++++++++-----------
 .../nlp/core/opennlp/NCOpenNlpNerEnricher.scala    | 26 +++++++--------
 .../nlp/enrichers/NCServerEnrichmentManager.scala  |  2 +-
 7 files changed, 39 insertions(+), 42 deletions(-)

diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/debug/NCLogHolder.scala 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/debug/NCLogHolder.scala
index 6fb5f55..76f9b79 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/debug/NCLogHolder.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/debug/NCLogHolder.scala
@@ -23,7 +23,7 @@ import java.util
 import com.google.gson.Gson
 import org.apache.nlpcraft.model._
 
-import scala.jdk.CollectionConverters.{CollectionHasAsScala, 
IterableHasAsJava, MapHasAsJava, SeqHasAsJava}
+import scala.jdk.CollectionConverters._
 
 //
 // NOTE: these classes are specifically designed for JSON marshalling.
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala
index a29d3ed..7d6b860 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceNote.scala
@@ -216,12 +216,12 @@ object NCNlpSentenceNote {
       * @param params Parameters.
       */
     def apply(
-        indexes: Seq[Int],
-        wordIndexesOpt: Option[Seq[Int]],
+        indexes: collection.Seq[Int],
+        wordIndexesOpt: Option[collection.Seq[Int]],
         typ: String,
         params: (String, Any)*
     ): NCNlpSentenceNote = {
-        def calc(seq: Seq[Int]): (Int, Int, Int, java.util.List[Int], Int) =
+        def calc(seq: collection.Seq[Int]): (Int, Int, Int, 
java.util.List[Int], Int) =
             (U.calcSparsity(seq), seq.min, seq.max, seq.asJava, seq.length)
 
         val (sparsity, tokMinIndex, tokMaxIndex, tokWordIndexes, len) = 
calc(wordIndexesOpt.getOrElse(indexes))
@@ -249,7 +249,7 @@ object NCNlpSentenceNote {
       * @param typ Type of the node.
       * @param params Parameters.
       */
-    def apply(indexes: Seq[Int], typ: String, params: (String, Any)*): 
NCNlpSentenceNote =
+    def apply(indexes: collection.Seq[Int], typ: String, params: (String, 
Any)*): NCNlpSentenceNote =
         apply(indexes, None, typ, params: _*)
 
     /**
@@ -260,6 +260,6 @@ object NCNlpSentenceNote {
       * @param typ Type of the node.
       * @param params Parameters.
       */
-    def apply(indexes: Seq[Int], wordIndexes: Seq[Int], typ: String, params: 
(String, Any)*): NCNlpSentenceNote =
+    def apply(indexes: collection.Seq[Int], wordIndexes: collection.Seq[Int], 
typ: String, params: (String, Any)*): NCNlpSentenceNote =
         apply(indexes, Some(wordIndexes), typ, params: _*)
 }
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/util/NCUtils.scala 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/util/NCUtils.scala
index 6b3b25c..588af4f 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/util/NCUtils.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/util/NCUtils.scala
@@ -2095,7 +2095,7 @@ object NCUtils extends LazyLogging {
       * @param idx Sequence of indexes.
       * @return
       */
-    def calcSparsity(idx: Seq[Int]): Int =
+    def calcSparsity(idx: collection.Seq[Int]): Int =
         idx.zipWithIndex.tail.map { case (v, i) => Math.abs(v - idx(i - 1)) 
}.sum - idx.length + 1
 
     /**
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/model/impl/NCTokenLogger.scala 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/model/impl/NCTokenLogger.scala
index 72f8a35..c3a83a6 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/model/impl/NCTokenLogger.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/model/impl/NCTokenLogger.scala
@@ -27,7 +27,6 @@ import org.apache.nlpcraft.model.NCToken
 import org.apache.nlpcraft.model.impl.NCTokenPimp._
 import org.apache.nlpcraft.common.ansi.NCAnsi._
 
-import scala.collection.mutable.ArrayBuffer
 import scala.jdk.CollectionConverters._
 
 /**
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeVariants.scala 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeVariants.scala
index cd349ba..dcba4da 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeVariants.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeVariants.scala
@@ -17,8 +17,8 @@
 
 package org.apache.nlpcraft.probe.mgrs
 
+import org.apache.nlpcraft.common.nlp.{NCNlpSentence, NCNlpSentenceNote, 
NCNlpSentenceToken}
 import org.apache.nlpcraft.common.nlp.pos.NCPennTreebank
-import org.apache.nlpcraft.common.nlp.{NCNlpSentence => NlpSentence, 
NCNlpSentenceNote => NlpNote, NCNlpSentenceToken => NlpToken}
 import org.apache.nlpcraft.common.{NCE, TOK_META_ALIASES_KEY}
 import org.apache.nlpcraft.model.impl.{NCTokenImpl, NCTokenLogger, 
NCVariantImpl}
 import org.apache.nlpcraft.model.{NCToken, NCVariant}
@@ -26,8 +26,8 @@ import org.apache.nlpcraft.model.{NCToken, NCVariant}
 import java.io.{Serializable => JSerializable}
 import java.util
 import java.util.Collections.singletonList
-import scala.collection.JavaConverters._
 import scala.collection.{Seq, mutable}
+import scala.jdk.CollectionConverters._
 
 /**
   * Sentence to variants converter.
@@ -82,10 +82,10 @@ object NCProbeVariants {
       */
     private def findDeletedToken(
         key: Key,
-        delNotes: Map[NlpNote, Seq[NlpToken]],
+        delNotes: Map[NCNlpSentenceNote, Seq[NCNlpSentenceToken]],
         noteTypePred: String => Boolean
-    ): Option[NlpToken] =
-        delNotes.toStream.
+    ): Option[NCNlpSentenceNote] =
+        delNotes.to(LazyList).
             flatMap { case (delNote, delNoteToks) =>
                 if (noteTypePred(delNote.noteType)) {
                     val toks =
@@ -98,12 +98,12 @@ object NCProbeVariants {
                     toks.size match {
                         case 0 => None
                         case _ =>
-                            val artTok = NlpToken(IDX)
+                            val artTok = NCNlpSentenceToken(IDX)
 
                             artTok.add(mkNote(toks))
 
                             if (key.id != "nlpcraft:nlp") {
-                                val ps = mkNlpNoteParams()
+                                var ps = mkNlpNoteParams()
 
                                 delNote.noteType match {
                                     case "nlpcraft:relation" | 
"nlpcraft:limit" => ps += "indexes" -> IDXS
@@ -125,17 +125,17 @@ object NCProbeVariants {
       *
       * @return
       */
-    private def mkNlpNoteParams(): mutable.ArrayBuffer[(String, 
JSerializable)] =
-        mutable.ArrayBuffer.empty[(String, JSerializable)] ++ 
Seq("tokMinIndex" -> IDX, "tokMaxIndex" -> IDX)
+    private def mkNlpNoteParams(): Seq[(String, JSerializable)] =
+        Seq("tokMinIndex" -> IDX, "tokMaxIndex" -> IDX)
 
     /**
       *
       * @param srcToks
       * @return
       */
-    private def mkNote(srcToks: Seq[NlpToken]): NlpNote = {
+    private def mkNote(srcToks: Seq[NCNlpSentenceToken]): NCNlpSentenceNote = {
         // Note, it adds stop-words too.
-        def mkValue(get: NlpToken => String): String = {
+        def mkValue(get: NCNlpSentenceToken => String): String = {
             val buf = mutable.Buffer.empty[String]
 
             val n = srcToks.size - 1
@@ -150,10 +150,10 @@ object NCProbeVariants {
             buf.mkString
         }
 
-        def all(is: NlpToken => Boolean): Boolean = srcToks.forall(is)
-        def exists(is: NlpToken => Boolean): Boolean = srcToks.exists(is)
+        def all(is: NCNlpSentenceToken => Boolean): Boolean = 
srcToks.forall(is)
+        def exists(is: NCNlpSentenceToken => Boolean): Boolean = 
srcToks.exists(is)
 
-        val origText = mkValue((t: NlpToken) => t.origText)
+        val origText = mkValue((t: NCNlpSentenceToken) => t.origText)
 
         val params = Seq(
             "index" -> IDX,
@@ -175,7 +175,7 @@ object NCProbeVariants {
             "swear" -> exists(_.isSwearWord)
         )
 
-        NlpNote(Seq(IDX.intValue()), 
srcToks.flatMap(_.wordIndexes).distinct.sorted, "nlpcraft:nlp", params: _*)
+        NCNlpSentenceNote(Seq(IDX.intValue()), 
srcToks.flatMap(_.wordIndexes).distinct.sorted, "nlpcraft:nlp", params: _*)
     }
 
     /**
@@ -186,12 +186,12 @@ object NCProbeVariants {
       * @param nlpSens Sentences.
       * @param lastPhase Flag.
       */
-    def convert(srvReqId: String, mdl: NCProbeModel, nlpSens: 
Seq[NlpSentence], lastPhase: Boolean = false): Seq[NCVariant] = {
+    def convert(srvReqId: String, mdl: NCProbeModel, nlpSens: 
Seq[NCNlpSentence], lastPhase: Boolean = false): Seq[NCVariant] = {
         var vars =
             nlpSens.flatMap(nlpSen => {
                 var ok = true
 
-                def mkToken(nlpTok: NlpToken): NCTokenImpl = {
+                def mkToken(nlpTok: NCNlpSentenceToken): NCTokenImpl = {
                     val ncTok = NCTokenImpl(mdl, srvReqId, nlpTok)
 
                     nlpSen.addNlpToken(nlpTok)
@@ -202,7 +202,7 @@ object NCProbeVariants {
                 val toks = nlpSen.map(mkToken)
                 val keys2Toks = toks.map(t => Key(t) -> t).toMap
 
-                def process(tok: NCTokenImpl, tokNlp: NlpToken): Unit = {
+                def process(tok: NCTokenImpl, tokNlp: NCNlpSentenceToken): 
Unit = {
                     val optList: Option[util.List[util.HashMap[String, 
JSerializable]]] =
                         tokNlp.find(_.isUser) match {
                             case Some(u) => u.dataOpt("parts")
@@ -232,7 +232,7 @@ object NCProbeVariants {
                                             case None =>
                                                 
nlpSen.getInitialNlpNote(key.from, key.to) match {
                                                     case Some(nlpNote) =>
-                                                        val artTok = 
NlpToken(IDX)
+                                                        val artTok = 
NCNlpSentenceToken(IDX)
 
                                                         
artTok.add(nlpNote.clone(mkNlpNoteParams(): _*))
 
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/core/opennlp/NCOpenNlpNerEnricher.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/core/opennlp/NCOpenNlpNerEnricher.scala
index 1e95e6d..586641b 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/core/opennlp/NCOpenNlpNerEnricher.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/core/opennlp/NCOpenNlpNerEnricher.scala
@@ -124,20 +124,18 @@ object NCOpenNlpNerEnricher extends NCService with 
NCNlpNerEnricher with NCIgnit
     
             case class Holder(start: Int, end: Int, name: String, probability: 
Double)
     
-            val hs =
-                this.
-                    synchronized {
-                        val res = nerFinders.
-                            filter { case (_, tokName) => 
ebiTokens.contains(tokName)}.
-                            flatMap {
-                                case (finder, name) =>
-                                    finder.find(words).map(p => 
Holder(p.getStart, p.getEnd - 1, name, p.getProb))
-                            }
-    
-                            nerFinders.keys.foreach(_.clearAdaptiveData())
-    
-                            res
-                    }.toSeq
+            val hs = this. synchronized {
+                val res = nerFinders.
+                    filter { case (_, tokName) => 
ebiTokens.contains(tokName)}.toSeq.
+                    flatMap {
+                        case (finder, name) =>
+                            finder.find(words).map(p => Holder(p.getStart, 
p.getEnd - 1, name, p.getProb))
+                    }
+
+                    nerFinders.keys.foreach(_.clearAdaptiveData())
+
+                    res
+            }
     
             hs.
                 filter(h => ebiTokens.contains(h.name)).
diff --git 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/NCServerEnrichmentManager.scala
 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/NCServerEnrichmentManager.scala
index dee1e6b..9fc7697 100644
--- 
a/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/NCServerEnrichmentManager.scala
+++ 
b/nlpcraft/src/main/scala/org/apache/nlpcraft/server/nlp/enrichers/NCServerEnrichmentManager.scala
@@ -205,7 +205,7 @@ object NCServerEnrichmentManager extends NCService with 
NCIgniteInstance {
                 getOrElse(throw new NCE(s"Header not found for: 
${hdr.noteType}"))._2
 
             (x._1 * 100) + x._2.indexOf(hdr.noteName)
-        })
+        }).toSeq
 
         val tbl = NCAsciiTable(headers.map(_.header): _*)
 

Reply via email to