You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@nlpcraft.apache.org by se...@apache.org on 2021/03/11 21:08:40 UTC
[incubator-nlpcraft] branch master updated: DSL synonyms processing
refactoring.
This is an automated email from the ASF dual-hosted git repository.
sergeykamov pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-nlpcraft.git
The following commit(s) were added to refs/heads/master by this push:
new 3146084 DSL synonyms processing refactoring.
3146084 is described below
commit 314608446e9a6108c711d4b92d502d5ad47e827e
Author: Sergey Kamov <sk...@gmail.com>
AuthorDate: Fri Mar 12 00:07:55 2021 +0300
DSL synonyms processing refactoring.
---
.../nlpcraft/common/nlp/NCNlpSentenceToken.scala | 9 +-
.../nlpcraft/examples/sql/db/SqlAccess.scala | 2 +-
.../nlpcraft/probe/mgrs/NCProbeSynonym.scala | 16 +-
.../mgrs/nlp/enrichers/model/NCModelEnricher.scala | 273 +++++++++++++--------
.../scala/org/apache/nlpcraft/NCTestContext.scala | 2 +-
.../org/apache/nlpcraft/NCTestEnvironment.java | 6 +
.../nlpcraft/examples/sql/NCSqlExampleSpec.scala | 2 +-
.../nlpcraft/examples/sql/NCSqlModelSpec.scala | 2 +-
.../model/NCEnricherNestedModelSpec4.scala | 2 +-
9 files changed, 198 insertions(+), 116 deletions(-)
diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceToken.scala b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceToken.scala
index 7be0bee..6017a4b 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceToken.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/common/nlp/NCNlpSentenceToken.scala
@@ -62,6 +62,13 @@ case class NCNlpSentenceToken(
def getNotes(noteType: String): Iterable[NCNlpSentenceNote] = notes.filter(_.noteType == noteType)
/**
+ *
+ * @param noteType
+ * @return
+ */
+ def exists(noteType: String): Boolean = notes.exists(_.noteType == noteType)
+
+ /**
* Clones note.
* Shallow copy.
*/
@@ -163,7 +170,7 @@ case class NCNlpSentenceToken(
*
* @param types Note type(s) to check.
*/
- def isTypeOf(types: String*): Boolean = types.exists(t ⇒ getNotes(t).nonEmpty)
+ def isTypeOf(types: String*): Boolean = types.exists(exists)
/**
* Adds element.
diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/examples/sql/db/SqlAccess.scala b/nlpcraft/src/main/scala/org/apache/nlpcraft/examples/sql/db/SqlAccess.scala
index b3dbb45..8ee31d1 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/examples/sql/db/SqlAccess.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/examples/sql/db/SqlAccess.scala
@@ -30,7 +30,7 @@ import resource.managed
* Ad-hoc querying for H2 Database. This is a simple, single thread implementation.
*/
object SqlAccess extends LazyLogging {
- private final val LOG_ROWS = 10
+ private final val LOG_ROWS = 3
private var conn: Connection = _
diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
index b8b7dc6..6f7e35a 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/NCProbeSynonym.scala
@@ -19,6 +19,7 @@ package org.apache.nlpcraft.probe.mgrs
import org.apache.nlpcraft.common.nlp.{NCNlpSentenceToken, NCNlpSentenceTokenBuffer}
import org.apache.nlpcraft.model._
+import org.apache.nlpcraft.probe.mgrs.NCProbeSynonym.NCDslContent
import org.apache.nlpcraft.probe.mgrs.NCProbeSynonymChunkKind._
import scala.collection.mutable.ArrayBuffer
@@ -77,7 +78,10 @@ class NCProbeSynonym(
case (tok, chunk) ⇒
chunk.kind match {
case TEXT ⇒ chunk.wordStem == tok.stem
- case REGEX ⇒ chunk.regex.matcher(tok.origText).matches() || chunk.regex.matcher(tok.normText).matches()
+ case REGEX ⇒
+ val regex = chunk.regex
+
+ regex.matcher(tok.origText).matches() || regex.matcher(tok.normText).matches()
case DSL ⇒ throw new AssertionError()
case _ ⇒ throw new AssertionError()
}
@@ -92,17 +96,13 @@ class NCProbeSynonym(
* @param tows
* @return
*/
- def isMatch(tows: Seq[Either[NCToken, NCNlpSentenceToken]]): Boolean = {
+ def isMatch(tows: Seq[NCDslContent]): Boolean = {
require(tows != null)
- type Token = NCToken
- type Word = NCNlpSentenceToken
- type TokenOrWord = Either[Token, Word]
-
if (tows.length == length && tows.count(_.isLeft) >= dslChunks)
tows.zip(this).sortBy(p ⇒ getSort(p._2.kind)).forall {
case (tow, chunk) ⇒
- def get0[T](fromToken: Token ⇒ T, fromWord: Word ⇒ T): T =
+ def get0[T](fromToken: NCToken ⇒ T, fromWord: NCNlpSentenceToken ⇒ T): T =
if (tow.isLeft) fromToken(tow.left.get) else fromWord(tow.right.get)
chunk.kind match {
@@ -205,6 +205,8 @@ class NCProbeSynonym(
}
object NCProbeSynonym {
+ type NCDslContent = Either[NCToken, NCNlpSentenceToken]
+
/**
*
* @param isElementId
diff --git a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
index 0a11314..26821ca 100644
--- a/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
+++ b/nlpcraft/src/main/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCModelEnricher.scala
@@ -21,13 +21,12 @@ import io.opencensus.trace.Span
import org.apache.nlpcraft.common._
import org.apache.nlpcraft.common.nlp.{NCNlpSentenceToken, NCNlpSentenceTokenBuffer, _}
import org.apache.nlpcraft.model._
-import org.apache.nlpcraft.model.impl.NCTokenLogger
+import org.apache.nlpcraft.probe.mgrs.NCProbeSynonym.NCDslContent
import org.apache.nlpcraft.probe.mgrs.NCProbeSynonymChunkKind.{NCSynonymChunkKind, TEXT}
import org.apache.nlpcraft.probe.mgrs.nlp.NCProbeEnricher
import org.apache.nlpcraft.probe.mgrs.nlp.impl.NCRequestImpl
import org.apache.nlpcraft.probe.mgrs.sentence.NCSentenceManager
import org.apache.nlpcraft.probe.mgrs.{NCProbeModel, NCProbeSynonym, NCProbeVariants}
-import org.apache.nlpcraft.probe.mgrs.{NCProbeModel, NCProbeSynonym, NCProbeVariants}
import java.io.Serializable
import java.util
@@ -41,25 +40,87 @@ import scala.compat.java8.OptionConverters._
* Model elements enricher.
*/
object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
- case class Complex(data: Either[NCToken, NCNlpSentenceToken]) {
- lazy val isToken: Boolean = data.isLeft
- lazy val isWord: Boolean = data.isRight
- lazy val token: NCToken = data.left.get
- lazy val word: NCNlpSentenceToken = data.right.get
- lazy val origText: String = if (isToken) token.origText else word.origText
- lazy val wordIndexes: Seq[Int] = if (isToken) token.wordIndexes else word.wordIndexes
+ object Complex {
+ def apply(t: NCToken): Complex =
+ Complex(
+ data = Left(t),
+ isToken = true,
+ isWord = false,
+ token = t,
+ word = null,
+ origText = t.origText,
+ wordIndexes = t.wordIndexes.toSet,
+ minIndex = t.wordIndexes.head,
+ maxIndex = t.wordIndexes.last
+ )
+
+ def apply(t: NCNlpSentenceToken): Complex =
+ Complex(
+ data = Right(t),
+ isToken = false,
+ isWord = true,
+ token = null,
+ word = t,
+ origText = t.origText,
+ wordIndexes = t.wordIndexes.toSet,
+ minIndex = t.wordIndexes.head,
+ maxIndex = t.wordIndexes.last
+ )
+ }
- private lazy val hash = if (isToken) token.hashCode() else word.hashCode()
+ case class Complex(
+ data: NCDslContent,
+ isToken: Boolean,
+ isWord: Boolean,
+ token: NCToken,
+ word: NCNlpSentenceToken,
+ origText: String,
+ wordIndexes: Set[Int],
+ minIndex: Int,
+ maxIndex: Int
+ ) {
+ private final val hash = if (isToken) Seq(wordIndexes, token.getId).hashCode() else wordIndexes.hashCode()
override def hashCode(): Int = hash
+
+ def isSubsetOf(minIndex: Int, maxIndex: Int, indexes: Set[Int]): Boolean =
+ if (this.minIndex > maxIndex || this.maxIndex < minIndex)
+ false
+ else
+ wordIndexes.subsetOf(indexes)
+
override def equals(obj: Any): Boolean = obj match {
- case x: Complex ⇒ isToken && x.isToken && token == x.token || isWord && x.isWord && word == x.word
+ case x: Complex ⇒
+ hash == x.hash && (isToken && x.isToken && token == x.token || isWord && x.isWord && word == x.word)
case _ ⇒ false
}
// Added for debug reasons.
- override def toString: String =
- if (isToken) s"Token: '${token.origText} (${token.getId})'" else s"Word: '${word.origText}'"
+ override def toString: String = {
+ val idxs = wordIndexes.mkString(",")
+
+ if (isToken) s"'$origText' (${token.getId}) [$idxs]]" else s"'$origText' [$idxs]"
+ }
+ }
+
+ object ComplexSeq {
+ def apply(all: Seq[Complex]): ComplexSeq = ComplexSeq(all.filter(_.isToken), all.flatMap(_.wordIndexes).toSet)
+ }
+
+ case class ComplexSeq(tokensComplexes: Seq[Complex], wordsIndexes: Set[Int]) {
+ private val (idxsSet: Set[Int], minIndex: Int, maxIndex: Int) = {
+ val seq = tokensComplexes.flatMap(_.wordIndexes).distinct.sorted
+
+ (seq.toSet, seq.head, seq.last)
+ }
+
+ def isIntersect(minIndex: Int, maxIndex: Int, idxsSet: Set[Int]): Boolean =
+ if (this.minIndex > maxIndex || this.maxIndex < minIndex)
+ false
+ else
+ this.idxsSet.exists(idxsSet.contains)
+
+ override def toString: String = tokensComplexes.mkString(" | ")
}
// Found-by-synonym model element.
@@ -76,8 +137,7 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
// Number of tokens.
lazy val length: Int = tokens.size
-
- private lazy val tokensSet = tokens.toSet
+ private lazy val tokensSet: Set[NCNlpSentenceToken] = tokens.toSet
def isSubSet(toks: Set[NCNlpSentenceToken]): Boolean = toks.subsetOf(tokensSet)
@@ -105,19 +165,19 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
}
/**
- *
- * @param parent Optional parent span.
- * @return
- */
+ *
+ * @param parent Optional parent span.
+ * @return
+ */
override def start(parent: Span = null): NCService = startScopedSpan("start", parent) { _ ⇒
ackStarting()
ackStarted()
}
/**
- *
- * @param parent Optional parent span.
- */
+ *
+ * @param parent Optional parent span.
+ */
override def stop(parent: Span = null): Unit = startScopedSpan("stop", parent) { _ ⇒
ackStopping()
ackStopped()
@@ -129,7 +189,7 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
*
* @param ns NLP sentence to jiggle.
* @param factor Distance of left or right jiggle, i.e. how far can an individual token move
- * left or right in the sentence.
+ * left or right in the sentence.
*/
private def jiggle(ns: NCNlpSentenceTokenBuffer, factor: Int): Iterator[NCNlpSentenceTokenBuffer] = {
require(factor >= 0)
@@ -269,44 +329,6 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
private def combos[T](toks: Seq[T]): Seq[Seq[T]] =
(for (n ← toks.size until 0 by -1) yield toks.sliding(n)).flatten.map(p ⇒ p)
- /**
- *
- * @param initialSen
- * @param collapsedSen
- * @param nlpToks
- */
- private def convert(
- initialSen: NCNlpSentence, collapsedSen: Seq[Seq[NCToken]], nlpToks: Seq[NCNlpSentenceToken]
- ): Seq[Seq[Complex]] = {
- val nlpWordIdxs = nlpToks.flatMap(_.wordIndexes)
-
- def in(t: NCToken): Boolean = t.wordIndexes.exists(nlpWordIdxs.contains)
- def inStrict(t: NCToken): Boolean = t.wordIndexes.forall(nlpWordIdxs.contains)
- def isSingleWord(t: NCToken): Boolean = t.wordIndexes.length == 1
-
- collapsedSen.
- map(_.filter(in)).
- filter(_.nonEmpty).flatMap(varToks ⇒
- // Tokens splitting.
- // For example sentence "A B С D E" (5 words) processed as 3 tokens on first phase after collapsing
- // 'A B' (2 words), 'C D' (2 words) and 'E' (1 word)
- // So, result combinations will be:
- // Token(AB) + Token(CD) + Token(E)
- // Token(AB) + Word(C) + Word(D) + Token(E)
- // Word(A) + Word(B) + Token(CD) + Token(E)
- // Word(A) + Word(B) + Word(C) + Word(D) + Token(E)
- combos(varToks).map(toksComb ⇒
- varToks.flatMap(t ⇒
- // Single word token is not split as words - token.
- // Partly (not strict in) token - word.
- if (inStrict(t) && (toksComb.contains(t) || isSingleWord(t)))
- Seq(Complex(Left(t)))
- else
- t.wordIndexes.filter(nlpWordIdxs.contains).map(i ⇒ Complex(Right(initialSen(i))))
- )
- ).filter(_.exists(_.isToken)) // Drops without tokens (DSL part works with tokens).
- ).distinct
- }
/**
*
@@ -335,34 +357,51 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
* Gets synonyms sorted in descending order by their weight (already prepared),
* i.e. first synonym in the sequence is the most important one.
*
- * @param fastMap
+ * @param fastMap {Element ID → {Synonym length → T}}
* @param elmId
* @param len
*/
- def fastAccess[T](
- fastMap: Map[String /*Element ID*/, Map[Int /*Synonym length*/, T]],
- elmId: String,
- len: Int
- ): Option[T] =
- fastMap.get(elmId) match {
- case Some(m) ⇒ m.get(len)
- case None ⇒ None
- }
+ def fastAccess[T](fastMap: Map[String, Map[Int, T]], elmId: String, len: Int): Option[T] =
+ fastMap.getOrElse(elmId, Map.empty[Int, T]).get(len)
/**
*
* @param toks
* @return
*/
- def tokString(toks: Seq[NCNlpSentenceToken]): String =
- toks.map(t ⇒ (t.origText, t.index)).mkString(" ")
+ def tokString(toks: Seq[NCNlpSentenceToken]): String = toks.map(t ⇒ (t.origText, t.index)).mkString(" ")
var permCnt = 0
- lazy val collapsedSens = NCProbeVariants.convert(
- ns.srvReqId,
- mdl,
- NCSentenceManager.collapse(mdl.model, ns.clone())
- ).map(_.asScala)
+
+ val collapsedSens =
+ NCProbeVariants.convert(ns.srvReqId, mdl, NCSentenceManager.collapse(mdl.model, ns.clone())).map(_.asScala)
+ val complexesWords = ns.map(Complex(_))
+ val complexes =
+ collapsedSens.
+ flatMap(sen ⇒
+ // Tokens splitting.
+ // For example sentence "A B С D E" (5 words) processed as 3 tokens on first phase after collapsing
+ // 'A B' (2 words), 'C D' (2 words) and 'E' (1 word)
+ // So, result combinations will be:
+ // Token(AB) + Token(CD) + Token(E)
+ // Token(AB) + Word(C) + Word(D) + Token(E)
+ // Word(A) + Word(B) + Token(CD) + Token(E)
+ // Word(A) + Word(B) + Word(C) + Word(D) + Token(E)
+ combos(sen).
+ map(senPartComb ⇒ {
+ sen.flatMap(t ⇒
+ // Single word token is not split as words - token.
+ // Partly (not strict in) token - word.
+ if (senPartComb.contains(t) || t.wordIndexes.length == 1)
+ Seq(Complex(t))
+ else
+ t.wordIndexes.map(complexesWords)
+ )
+ // Drops without tokens (DSL part works with tokens).
+ }).filter(_.exists(_.isToken)).map(ComplexSeq(_)).distinct
+ )
+
+ val tokIdxs = ns.map(t ⇒ t → t.wordIndexes).toMap
/**
*
@@ -377,19 +416,49 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
if (!cache.contains(key)) {
cache += key
- lazy val dslCombs = convert(ns, collapsedSens, toks).groupBy(_.length)
+ val idxsSeq = toks.flatMap(tokIdxs)
+ val idxsSorted = idxsSeq.sorted
+ val idxs = idxsSeq.toSet
+ val idxMin = idxsSorted.head
+ val idxMax = idxsSorted.last
+
+ lazy val sorted = idxsSorted.zipWithIndex.toMap
+
+ lazy val dslCombs =
+ complexes.par.
+ flatMap(complexSeq ⇒ {
+ val rec = complexSeq.tokensComplexes.filter(_.isSubsetOf(idxMin, idxMax, idxs))
+
+ // Drops without tokens (DSL part works with tokens).
+ if (rec.nonEmpty)
+ Some(
+ rec ++
+ (
+ complexSeq.wordsIndexes.intersect(idxs) -- rec.flatMap(_.wordIndexes)
+
+ ).map(complexesWords)
+ )
+ else
+ None
+ }).
+ map(_.sortBy(p ⇒ sorted(p.wordIndexes.head))).seq.groupBy(_.length)
+
lazy val sparsity = U.calcSparsity(key)
+ lazy val tokStems = toks.map(_.stem).mkString(" ")
// Attempt to match each element.
for (elm ← mdl.elements.values if !alreadyMarked(toks, elm.getId)) {
var found = false
def addMatch(
- elm: NCElement, toks: Seq[NCNlpSentenceToken], syn: NCProbeSynonym, parts: Seq[(NCToken, NCSynonymChunkKind)]
+ elm: NCElement,
+ toks: Seq[NCNlpSentenceToken],
+ syn: NCProbeSynonym,
+ parts: Seq[(NCToken, NCSynonymChunkKind)]
): Unit =
if (
(elm.getJiggleFactor.isEmpty || elm.getJiggleFactor.get() >= sparsity) &&
- !matches.exists(m ⇒ m.element == elm && m.isSubSet(toks.toSet))
+ !matches.exists(m ⇒ m.element == elm && m.isSubSet(toks.toSet))
) {
found = true
@@ -400,10 +469,8 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
if (mdl.synonyms.nonEmpty && !ns.exists(_.isUser))
fastAccess(mdl.synonyms, elm.getId, toks.length) match {
case Some(h) ⇒
- val stems = toks.map(_.stem).mkString(" ")
-
def tryMap(synsMap: Map[String, NCProbeSynonym], notFound: () ⇒ Unit): Unit =
- synsMap.get(stems) match {
+ synsMap.get(tokStems) match {
case Some(syn) ⇒
addMatch(elm, toks, syn, Seq.empty)
@@ -437,9 +504,9 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
for (
(len, seq) ← dslCombs;
- syn ← fastAccess(mdl.synonymsDsl, elm.getId, len).getOrElse(Seq.empty);
- comb ← seq if !found;
- data = comb.map(_.data)
+ syn ← fastAccess(mdl.synonymsDsl, elm.getId, len).getOrElse(Seq.empty);
+ comb ← seq if !found;
+ data = comb.map(_.data)
)
if (syn.isMatch(data)) {
val parts = comb.zip(syn.map(_.kind)).flatMap {
@@ -480,23 +547,23 @@ object NCModelEnricher extends NCProbeEnricher with DecorateAsScala {
// 0-3 will be deleted because for 0 and 3 tokens best variants found for same element with same tokens length.
val matchesNorm =
matches.
- flatMap(m ⇒ m.tokens.map(_ → m)).
- groupBy { case (t, m) ⇒ (m.element.getId, m.length, t) }.
- flatMap { case (_, seq) ⇒
- def perm[T](list: List[List[T]]): List[List[T]] =
- list match {
- case Nil ⇒ List(Nil)
- case head :: tail ⇒ for (h ← head; t ← perm(tail)) yield h :: t
- }
+ flatMap(m ⇒ m.tokens.map(_ → m)).
+ groupBy { case (t, m) ⇒ (m.element.getId, m.length, t) }.
+ flatMap { case (_, seq) ⇒
+ def perm[T](list: List[List[T]]): List[List[T]] =
+ list match {
+ case Nil ⇒ List(Nil)
+ case head :: tail ⇒ for (h ← head; t ← perm(tail)) yield h :: t
+ }
- // Optimization by sparsity sum for each tokens set for one element found with same tokens count.
- perm(
- seq.groupBy { case (tok, _) ⇒ tok }.
- map { case (_, seq) ⇒ seq.map { case (_, m) ⇒ m} .toList }.toList
- ).minBy(_.map(_.sparsity).sum)
- }.
- toSeq.
- distinct
+ // Optimization by sparsity sum for each tokens set for one element found with same tokens count.
+ perm(
+ seq.groupBy { case (tok, _) ⇒ tok }.
+ map { case (_, seq) ⇒ seq.map { case (_, m) ⇒ m }.toList }.toList
+ ).minBy(_.map(_.sparsity).sum)
+ }.
+ toSeq.
+ distinct
val matchCnt = matchesNorm.size
diff --git a/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestContext.scala b/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestContext.scala
index 76ab38d..42b140d 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestContext.scala
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestContext.scala
@@ -81,7 +81,7 @@ abstract class NCTestContext {
probeStarted = true
if (ann.startClient()) {
- cli = new NCTestClientBuilder().newBuilder.build
+ cli = new NCTestClientBuilder().newBuilder.setResponseLog(ann.clientLog()).build
cli.open(NCModelManager.getAllModels().head.model.getId)
}
diff --git a/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestEnvironment.java b/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestEnvironment.java
index 8388991..d7be5f9 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestEnvironment.java
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/NCTestEnvironment.java
@@ -42,4 +42,10 @@ public @interface NCTestEnvironment {
* @return
*/
boolean startClient() default false;
+
+ /**
+ *
+ * @return
+ */
+ boolean clientLog() default true;
}
\ No newline at end of file
diff --git a/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlExampleSpec.scala b/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlExampleSpec.scala
index 5505d3c..7536041 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlExampleSpec.scala
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlExampleSpec.scala
@@ -35,7 +35,7 @@ import scala.compat.java8.OptionConverters.RichOptionalGeneric
*
* @see SqlModel
*/
-@NCTestEnvironment(model = classOf[SqlModel], startClient = true)
+@NCTestEnvironment(model = classOf[SqlModel], startClient = true, clientLog = false)
class NCSqlExampleSpec extends NCTestContext {
private val GSON = new Gson
private val TYPE_RESP = new TypeToken[util.Map[String, Object]]() {}.getType
diff --git a/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlModelSpec.scala b/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlModelSpec.scala
index 40987f5..3483bd4 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlModelSpec.scala
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/examples/sql/NCSqlModelSpec.scala
@@ -33,7 +33,7 @@ class NCSqlModelWrapper extends NCDefaultTestModel {
override def getMacros: util.Map[String, String] = delegate.getMacros
}
-@NCTestEnvironment(model = classOf[NCSqlModelWrapper], startClient = true)
+@NCTestEnvironment(model = classOf[NCSqlModelWrapper], startClient = true, clientLog = false)
class NCSqlModelSpec extends NCEnricherBaseSpec {
// org.apache.nlpcraft.examples.sql.SqlModel.SqlModel initialized via DB.
// (org.apache.nlpcraft.examples.sql.db.SqlValueLoader configured in its model yaml file.)
diff --git a/nlpcraft/src/test/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCEnricherNestedModelSpec4.scala b/nlpcraft/src/test/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCEnricherNestedModelSpec4.scala
index b354533..b240a47 100644
--- a/nlpcraft/src/test/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCEnricherNestedModelSpec4.scala
+++ b/nlpcraft/src/test/scala/org/apache/nlpcraft/probe/mgrs/nlp/enrichers/model/NCEnricherNestedModelSpec4.scala
@@ -49,5 +49,5 @@ class NCNestedTestModel4 extends NCModelAdapter(
@NCTestEnvironment(model = classOf[NCNestedTestModel4], startClient = true)
class NCEnricherNestedModelSpec4 extends NCTestContext {
@Test
- def test(): Unit = checkIntent("the a " * 8, "onE2")
+ def test(): Unit = checkIntent("the a " * 9, "onE2")
}
\ No newline at end of file