Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
OLED | OLED-master/src/main/scala/oled/functions/SingleCoreOLEDFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.functions
import app.runutils.{Globals, RunningOptions}
import com.mongodb.casbah.MongoClient
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import logic.{AnswerSet, Clause, Theory}
import utils.ASP.getCoverageDirectives
import utils.DataUtils.{DataAsExamples, DataAsIntervals, DataFunction, TrainingSet}
import utils.Implicits._
import utils.{ASP, CaviarUtils, Database, Utils}
import scala.util.Random
/**
* Created by nkatz on 6/21/17.
*/
/**
*
* This object contains functionality used by the single-core version of OLED only.
*
*/
object SingleCoreOLEDFunctions extends CoreFunctions {
def processExample(topTheory: Theory, e: Example, targetClass: String,
inps: RunningOptions, logger: org.slf4j.Logger, learningWeights: Boolean = false) = {
var newTopTheory = topTheory
var scoringTime = 0.0
var newRuleTestTime = 0.0
var compressRulesTime = 0.0
var expandRulesTime = 0.0
var newRuleGenerationTime = 0.0
val initorterm: String =
if (targetClass == "initiated") "initiatedAt"
else if (targetClass == "terminated") "terminatedAt"
else inps.globals.MODEHS.head.varbed.tostring
val withInertia = Globals.glvalues("with-inertia").toBoolean
val startNew =
if (withInertia) {
/*-------------------------------------------------------------------*/
// This works, but it takes too long. The reason is that it tries
// to abduce at almost every example. See the comment above the isSat
// method in SingleCoreOLEDFunctions for more details. See also the related
// comment in Globals.scala.
//if (e.annotation.isEmpty) false else ! isSat(e, inps.globals, this.jep)
/*-------------------------------------------------------------------*/
// growNewRuleTest here works with inertia in both the initiation and the
// termination cases.
if (e.annotation.isEmpty) false else newTopTheory.growNewRuleTest(e, initorterm, inps.globals)
} else {
//if (inps.tryMoreRules && targetClass == "terminated") true // Always try to find extra termination rules, they are more rare.
if (inps.tryMoreRules) {
//val r = scala.util.Random
//val coinFlip = r.nextFloat
//if (coinFlip >= 0.5) true else false
true // Sometimes, depending on the order of the examples, it's necessary to try more even for initiation.
} else {
val r = Utils.time{ newTopTheory.growNewRuleTest(e, initorterm, inps.globals) }
if (inps.showStats) logger.info(s"grow new rule test time: ${r._2}")
newRuleTestTime += r._2
r._1
}
}
if (startNew) {
val newRules_ = Utils.time {
if (withInertia) {
getnerateNewBottomClauses_withInertia(topTheory, e, initorterm, inps.globals)
} else {
if (inps.tryMoreRules) {
// Don't use the current theory here to force the system to generate new rules
generateNewRules(Theory(), e, initorterm, inps.globals)
} else {
generateNewRules(topTheory, e, initorterm, inps.globals)
}
}
}
val newRules__ = newRules_._1
if (inps.showStats) logger.info(s"New rules generation time: ${newRules_._2}")
newRuleGenerationTime += newRules_._2
// Just to be on the safe side...
val newRules = newRules__.filter(x => x.head.functor == initorterm)
if (newRules.nonEmpty) logger.info(s"Generated ${newRules.length} new rules.")
val o1 = System.nanoTime()
if (inps.compressNewRules) {
newTopTheory = topTheory.clauses ++ filterTriedRules(topTheory, newRules, logger)
} else {
newTopTheory = topTheory.clauses ++ newRules
}
val o2 = System.nanoTime()
if (inps.showStats) logger.info(s"compressing rules time: ${(o2 - o1) / 1000000000.0}")
compressRulesTime += (o2 - o1) / 1000000000.0
}
if (newTopTheory.clauses.nonEmpty) {
// If we're learning weights rule scoring takes place on the weight learner side
// so that the scores correspond to the weighted versions of the rules.
if (!learningWeights) {
val t = Utils.time { newTopTheory.scoreRules(e, inps.globals) }
if (inps.showStats) logger.info(s"Scoring rules time: ${t._2}")
scoringTime += t._2
}
/*
val expanded = Utils.time { expandRules(newTopTheory, inps, logger) }
if (inps.showStats) logger.info(s"Expanding rules time: ${expanded._2}")
expandRulesTime += expanded._2
*/
///*
val expanded =
if (!learningWeights) {
Utils.time { expandRules(newTopTheory, inps, logger) }
} else {
((newTopTheory, false), 0.0)
}
//*/
if (inps.showStats) logger.info(s"Expanding rules time: ${expanded._2}")
expandRulesTime += expanded._2
if (inps.onlinePruning) {
//val pruned = pruneRules(expanded._1, inps, logger)
val pruned = pruneRulesNaive(expanded._1._1, inps, logger)
(pruned, scoringTime, newRuleTestTime, compressRulesTime, expandRulesTime, newRuleGenerationTime)
} else {
(expanded._1._1, scoringTime, newRuleTestTime, compressRulesTime, expandRulesTime, newRuleGenerationTime)
}
} else {
(newTopTheory, scoringTime, newRuleTestTime, compressRulesTime, expandRulesTime, newRuleGenerationTime)
}
}
def rightWay(parentRule: Clause, inps: RunningOptions) = {
if (true) { //parentRule.precision <= inps.preprune ||| parentRule.score <= inps.preprune
val (observedDiff, best, secondBest) = parentRule.meanDiff(inps.scoringFun)
val epsilon = Utils.hoeffding(inps.delta, parentRule.seenExmplsNum)
//println(parentRule.refinements.map(x => x.score))
//println(observedDiff, epsilon)
//logger.info(s"\n(observedDiff, epsilon, bestScore, secondBestScore): ($observedDiff, $epsilon, ${best.score}, ${secondBest.score})")
val passesTest = if (epsilon < observedDiff) true else false
//val tie = if (epsilon <= breakTiesThreshold && parentRule.seenExmplsNum >= minSeenExmpls) true else false
val tie = if (observedDiff < epsilon && epsilon < inps.breakTiesThreshold && parentRule.seenExmplsNum >= inps.minSeenExmpls) true else false
//println(s"best score: ${best.score} 2nd-best: ${secondBest.score} $observedDiff < $epsilon && $epsilon < ${inps.breakTiesThreshold} ${parentRule.seenExmplsNum} >= ${inps.minSeenExmpls} $tie")
val couldExpand =
if (inps.minTpsRequired != 0) {
// The best.mlnWeight >= parentRule.mlnWeight condition doesn't work of course...
(passesTest || tie) && (best.getTotalTPs >= parentRule.getTotalTPs * inps.minTpsRequired / 100.0) //&& best.mlnWeight >= parentRule.mlnWeight
} else {
// The best.mlnWeight >= parentRule.mlnWeight condition doesn't work of course...
passesTest || tie //&& best.mlnWeight >= parentRule.mlnWeight
}
(couldExpand, epsilon, observedDiff, best, secondBest)
} else {
(false, 0.0, 0.0, parentRule, parentRule)
}
}
def expandRules(topTheory: Theory, inps: RunningOptions, logger: org.slf4j.Logger): (Theory, Boolean) = {
//val t0 = System.nanoTime()
var expanded = false
val out = topTheory.clauses flatMap { parentRule =>
val (couldExpand, epsilon, observedDiff, best, secondBest) = rightWay(parentRule, inps)
//println(best.score,best.tps, best.fps, best.fns, " ", secondBest.score, secondBest.tps, secondBest.fps, secondBest.fns)
couldExpand match {
case true =>
// This is the extra test that I added at Feedzai
val extraTest =
if (inps.scoringFun != "foilgain") {
if (secondBest != parentRule) (best.score > parentRule.score) && (best.score - parentRule.score > epsilon)
else best.score > parentRule.score
} else {
// We want the refinement to have some gain. We do not expand for no gain
best.score > 0 //true
}
extraTest match { //&& (1.0/best.body.size+1 > 1.0/parentRule.body.size+1) match {
case true =>
val refinedRule = best
logger.info(showInfo(parentRule, best, secondBest, epsilon, observedDiff, parentRule.seenExmplsNum, inps))
refinedRule.seenExmplsNum = 0 // zero the counter
refinedRule.supportSet = parentRule.supportSet // only one clause here
refinedRule.generateCandidateRefs(inps.globals)
expanded = true
List(refinedRule)
case _ => List(parentRule)
}
case _ => List(parentRule)
}
}
//val t1 = System.nanoTime()
//println(s"expandRules time: ${(t1-t0)/1000000000.0}")
(Theory(out), expanded)
}
def processExampleNoEC(topTheory: Theory, e: Example, inps: RunningOptions, logger: org.slf4j.Logger) = {
var scoringTime = 0.0
var newRuleTestTime = 0.0
var compressRulesTime = 0.0
var expandRulesTime = 0.0
var newRuleGenerationTime = 0.0
var newTopTheory = topTheory
val startNew = if (e.annotation.isEmpty) false else newTopTheory.growNewRuleTestNoEC(e, inps.globals)
if (startNew) {
val newRules_ = Utils.time{
if (inps.tryMoreRules) {
// Don't use the current theory here to force the system to generate new rules
generateNewRulesNoEC(Theory(), e, inps.globals)
} else {
generateNewRulesNoEC(topTheory, e, inps.globals)
}
}
val newRules__ = newRules_._1
if (inps.showStats) logger.info(s"New rules generation time: ${newRules_._2}")
newRuleGenerationTime += newRules_._2
val newRules = newRules__
if (newRules.nonEmpty) logger.info(s"Generated ${newRules.length} new rules.")
val o1 = System.nanoTime()
if (inps.compressNewRules) {
newTopTheory = topTheory.clauses ++ filterTriedRules(topTheory, newRules, logger)
} else {
newTopTheory = topTheory.clauses ++ newRules
}
val o2 = System.nanoTime()
if (inps.showStats) logger.info(s"compressing rules time: ${(o2 - o1) / 1000000000.0}")
compressRulesTime += (o2 - o1) / 1000000000.0
}
if (newTopTheory.clauses.nonEmpty) {
val t = Utils.time { newTopTheory.scoreRulesNoEC(e, inps.globals) }
if (inps.showStats) logger.info(s"Scoring rules time: ${t._2}")
scoringTime += t._2
val expanded = Utils.time { expandRules(newTopTheory, inps, logger) }
if (inps.showStats) logger.info(s"Expanding rules time: ${expanded._2}")
expandRulesTime += expanded._2
if (inps.onlinePruning) {
val pruned = pruneRules(expanded._1._1, inps, logger)
(pruned, scoringTime, newRuleTestTime, compressRulesTime, expandRulesTime, newRuleGenerationTime)
} else {
(expanded._1._1, scoringTime, newRuleTestTime, compressRulesTime, expandRulesTime, newRuleGenerationTime)
}
} else {
(newTopTheory, scoringTime, newRuleTestTime, compressRulesTime, expandRulesTime, newRuleGenerationTime)
}
}
def getTrainingData(params: RunningOptions, data: TrainingSet, targetClass: String): Iterator[Example] = {
val mc = MongoClient()
val collection = mc(params.train)("examples")
def getData = utils.CaviarUtils.getDataAsChunks(collection, params.chunkSize, targetClass)
data match {
case x: DataAsIntervals =>
if (data.isEmpty) {
getData
} else {
/* Optionally shuffle the training data */
if (params.shuffleData) {
val shuffled = List(data.asInstanceOf[DataAsIntervals].trainingSet.head) ++ Random.shuffle(data.asInstanceOf[DataAsIntervals].trainingSet.tail)
CaviarUtils.getDataFromIntervals(collection, params.targetHLE, shuffled, params.chunkSize)
} else {
// No shuffling:
CaviarUtils.getDataFromIntervals(collection, params.targetHLE, data.asInstanceOf[DataAsIntervals].trainingSet, params.chunkSize)
}
}
case x: DataAsExamples => data.asInstanceOf[DataAsExamples].trainingSet.toIterator
case x: DataFunction =>
data.asInstanceOf[DataFunction].function(params.train, params.targetHLE, params.chunkSize, DataAsIntervals())
case _ => throw new RuntimeException(s"${data.getClass}: Don't know what to do with this data container!")
}
}
def reScore(params: RunningOptions, data: Iterator[Example], theory: Theory, targetClass: String, logger: org.slf4j.Logger) = {
theory.clauses foreach (p => p.clearStatistics) // zero all counters before re-scoring
for (x <- data) {
if (Globals.glvalues("with-ec").toBoolean) {
theory.scoreRules(x, params.globals, postPruningMode = true)
} else {
theory.scoreRulesNoEC(x, params.globals, postPruningMode = true)
}
}
logger.debug(theory.clauses map { p => s"score: ${p.score}, tps: ${p.tps}, fps: ${p.fps}, fns: ${p.fns}\n${p.tostring}" } mkString "\n")
}
def generateNewRules(topTheory: Theory, e: Example, initorterm: String, globals: Globals) = {
val bcs = generateNewBottomClauses(topTheory, e, initorterm, globals)
bcs map { x =>
val c = Clause(head = x.head, body = List())
c.addToSupport(x)
c
}
}
def generateNewRulesNoEC(topTheory: Theory, e: Example, globals: Globals) = {
val bcs = generateNewBottomClausesNoEC(topTheory, e, globals)
bcs map { x =>
val c = Clause(head = x.head, body = List())
c.addToSupport(x)
c
}
}
def reScoreAndPrune(inps: RunningOptions, data: Iterator[Example], finalTheory: Theory, targetClass: String, logger: org.slf4j.Logger) = {
logger.info(s"Starting post-pruning for $targetClass")
logger.info(s"Rescoring $targetClass theory")
if (finalTheory != Theory()) reScore(inps, data, finalTheory, targetClass, logger)
logger.info(s"\nLearned hypothesis (before pruning):\n${finalTheory.showWithStats}")
val pruned = finalTheory.clauses.filter(x => x.score > inps.pruneThreshold && x.seenExmplsNum > inps.minEvalOn)
logger.debug(s"\nPruned hypothesis:\n${pruned.showWithStats}")
Theory(pruned)
}
/* Used by the monolithic OLED when learning with inertia.*/
def check_SAT_withInertia(theory: Theory, example: Example, globals: Globals): Boolean = {
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
val exConstr = getCoverageDirectives(withCWA = Globals.glvalues("cwa"), globals = globals).mkString("\n")
val t = theory.map(x => x.withTypePreds(globals).tostring).mkString("\n")
val f = Utils.getTempFile("sat", ".lp")
Utils.writeToFile(f, "append")(
p => List(e, exConstr, t, s"\n#include " + "\"" + globals.ABDUCE_WITH_INERTIA + "\".\n") foreach p.println
)
val inFile = f.getCanonicalPath
val out = ASP.solve(Globals.CHECKSAT, Map(), new java.io.File(inFile), example.toMapASP)
if (out != Nil && out.head == AnswerSet.UNSAT) {
false
} else {
true
}
}
/* Used by the monolithic OLED when learning with inertia.
* After processing each example, form a joint current theory by
* putting together the best specialization of each existing rule so far.
* If you just use each top rule, chances are that over-general rules will
* screw things up.*/
def updateGlobalTheoryStore(theory: Theory, target: String, gl: Globals) = {
def getBestClause(c: Clause) = {
val allSorted = (List(c) ++ c.refinements).sortBy { x => (-x.score, x.body.length + 1) }
val best = allSorted.take(1)
best.head
}
def getBestClauses(T: Theory) = {
T.clauses.map(x => getBestClause(x))
}
if (target == "initiatedAt") {
Globals.CURRENT_THEORY_INITIATED = getBestClauses(theory).toVector
} else if (target == "terminatedAt") {
Globals.CURRENT_THEORY_TERMINATED = getBestClauses(theory).toVector
} else {
throw new RuntimeException(s"Unknown target predicate: $target")
}
}
/* Used by the monolithic OLED when learning with inertia.*/
// The problem with deciding when to learn a new clause with
// isSat here is that almost always, the decision will be yes!
// That's because, even if we form the current theory in isSat
// by selecting the best specialization from each existing clause.
// chances are that there will be imperfect rules, which are not
// specialized quickly enough, so we end-up with an unsatisfiable program.
// We'd need something more in the style of ILED for this. Reacting fast
// to every mistake, specializing immediately, so in the absence of noise, we quickly
// learn the correct definitions. (Note that in any case, if there is noise
// in the strongly-initiated setting, there is not chance to learn anything.
// See also the related comment in Globals.scala
def isSat(example: Example, globals: Globals) = {
val jointTheory = Theory((Globals.CURRENT_THEORY_INITIATED ++ Globals.CURRENT_THEORY_TERMINATED).toList)
check_SAT_withInertia(jointTheory, example, globals)
}
}
| 17,978 | 40.909091 | 199 | scala |
OLED | OLED-master/src/main/scala/oled/functions/WeightLearningFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.functions
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic._
import lomrf.logic.AtomSignature
import lomrf.mln.model.mrf.MRF
import lomrf.mln.model.{AtomIdentityFunctionOps, EvidenceDB, KB, MLN}
import utils.{ASP, Utils}
object WeightLearningFunctions {
class MLNInfoContainer(val mln: MLN, val labelAtoms: Set[String],
val fluentsMap: Map[String, String], val clauseIds: List[Int],
val annotationDB: EvidenceDB, val exmplCount: Int)
/*
*
* types example: List("time", "person", "dist", "event", "fluent")
* annotationTemplate example: "InitiatedAt(fluent,time)"
*
* This method returns an MLN the evidenceDB and the examples count from the current batch to use in the Hoeffding test.
*
* */
def getGroundTheory(clauses: Vector[Clause], e: Example,
inps: RunningOptions, targetClass: String, clauseIds: List[Int] = Nil) = {
/*
def proxyAtom(lit: PosLiteral) = {
if (lit.functor == "terminatedAt") Literal(functor = "terminatedAt1", terms = lit.terms)
else lit
}
def proxyFunctor(pred: String) = if (pred == "terminatedAt") "terminatedAt1" else pred
*/
val enum = if (clauseIds.isEmpty) clauses zip (1 to clauses.length) else clauses zip clauseIds
val p = enum map { x =>
val (clause, index) = (x._1, x._2)
val typeAtoms = clause.toLiteralList.flatMap(x => x.getTypePredicates(inps.globals)).distinct.map(x => Literal.parse(x))
val markedHead = Literal(predSymbol = clause.head.functor, terms = clause.head.terms :+ Constant(s"ruleId_$index"))
val markedClause = Clause(head = markedHead, body = clause.body ++ typeAtoms)
/*
// We don't need the body terms
val unsatClauseHead = Literal(functor = "false_clause", terms = List(Constant(s"ruleId_$index"), proxyAtom(clause.head)) )
// the body is the negation of the head
val unsatClauseBody = Literal(functor = proxyFunctor(clause.head.functor), terms = clause.head.terms :+ Constant(s"ruleId_$index"), isNAF = true)
val unsatClause = Clause(head = unsatClauseHead.asPosLiteral, body = List(unsatClauseBody)++ typeAtoms)
// We also need the satisfied clauses
val satClauseHead = Literal(functor = "true_clause", terms = List(Constant(s"ruleId_$index"), proxyAtom(clause.head)) )
// the body is the negation of the head
val satClauseBody = Literal(functor = proxyFunctor(clause.head.functor), terms = clause.head.terms :+ Constant(s"ruleId_$index"))
val satClause = Clause(head = satClauseHead.asPosLiteral, body = List(satClauseBody)++ typeAtoms)
*/
// We don't need the body terms
val unsatClauseHead = Literal(predSymbol = "false_clause", terms = List(Constant(s"ruleId_$index"), clause.head))
// the body is the negation of the head
val unsatClauseBody = Literal(predSymbol = clause.head.functor, terms = clause.head.terms :+ Constant(s"ruleId_$index"), isNAF = true)
val unsatClause = Clause(head = unsatClauseHead.asPosLiteral, body = List(unsatClauseBody) ++ typeAtoms)
// We also need the satisfied clauses
val satClauseHead = Literal(predSymbol = "true_clause", terms = List(Constant(s"ruleId_$index"), clause.head))
// the body is the negation of the head
val satClauseBody = Literal(predSymbol = clause.head.functor, terms = clause.head.terms :+ Constant(s"ruleId_$index"))
val satClause = Clause(head = satClauseHead.asPosLiteral, body = List(satClauseBody) ++ typeAtoms)
(markedClause, unsatClause, satClause)
}
val atoms_timed = Utils.time(ASP.solveMLNGrounding(inps, e, p, targetClass))
val atoms = atoms_timed._1
/* post-process */
//val (trueGroundingsAtoms, trueFalseGrndClauseAtoms) = atoms.partition(x => x.startsWith("true_groundings"))
val (trueGroundingsAtoms, totalExmplCount_, trueFalseGrndClauseAtoms, annotation_, fnsTerminated_, tpsTerminated_) =
atoms.foldLeft(Vector[String](), Vector[String](), Vector[String](),
Vector[String](), Vector[String](), Vector[String]()) { (x, atom) =>
if (atom.startsWith("true_groundings")) (x._1 :+ atom, x._2, x._3, x._4, x._5, x._6)
else if (atom.startsWith("exmplCount")) (x._1, x._2 :+ atom, x._3, x._4, x._5, x._6)
else if (atom.startsWith("false_clause") || atom.startsWith("true_clause")) (x._1, x._2, x._3 :+ atom, x._4, x._5, x._6)
else if (atom.startsWith("annotation")) (x._1, x._2, x._3, x._4 :+ atom, x._5, x._6)
else if (atom.startsWith("fns_terminated")) (x._1, x._2, x._3, x._4, x._5 :+ atom, x._6)
else if (atom.startsWith("tps_terminated")) (x._1, x._2, x._3, x._4, x._5, x._6 :+ atom)
else throw new RuntimeException(s"Unexpected atom $atom")
}
if (totalExmplCount_.length != 1)
throw new RuntimeException(s"Problem with the total example count (mun of target predicate groundings): ${totalExmplCount_.mkString(" ")}")
val totalExmplCount = totalExmplCount_.head.split("\\(")(1).split("\\)")(0).toInt
val trueGroundingsPerClause = {
val pairs = trueGroundingsAtoms map { atom =>
val s = atom.split(",")
val id = s(0).split("\\(")(1).split("_")(1).toInt
val count = s(1).split("\\)")(0).toInt
(id, count)
}
pairs.toList.groupBy(x => x._1).map{ case (k, v) => (k, v.map(_._2)) }
}
val annotation = annotation_.map{ x =>
val a = Literal.parseWPB2(x).terms.head.asInstanceOf[Literal]
val derivedFromClause = a.terms.last.name.split("_")(1).toInt
val b = Literal(predSymbol = a.predSymbol, terms = a.terms, derivedFrom = derivedFromClause)
Literal.toMLNFlat(b)
}
/* This is the same as annotation, factor it out in a function! */
val incorrectlyTerminated = fnsTerminated_.map { x =>
val a = Literal.parseWPB2(x).terms.head.asInstanceOf[Literal]
val derivedFromClause = a.terms.last.name.split("_")(1).toInt
val b = Literal(predSymbol = a.predSymbol, terms = a.terms, derivedFrom = derivedFromClause)
Literal.toMLNFlat(b)
}
val correctlyNotTerminated = tpsTerminated_.map { x =>
val a = Literal.parseWPB2(x).terms.head.asInstanceOf[Literal]
val derivedFromClause = a.terms.last.name.split("_")(1).toInt
val b = Literal(predSymbol = a.predSymbol, terms = a.terms, derivedFrom = derivedFromClause)
Literal.toMLNFlat(b)
}
// There are two rules that produce the true groundings counts, see the ground-initiated.lp bk file.
// However the constraint below is problematic. There are cases where both rules that produce
// groundings for a rule have the same number of groundings (e.g. they both return zero
// groundings, so we have something like true_groundings(ruleId_5, 0), true_groundings(ruleId_5, 0)
// which collapse to the same atom). For instance, consider a batch with no annotation (so
// actually_initiated is never true) and a rule that fires almost always (e.g. a rule with a
// "not happensAt(disappear) in the body"). Then no true groundings of this rule are produced.
/*
if (trueGroundingsPerClause.values.exists(_.length != 2))
throw new RuntimeException("There is a potential problem with the true counts per clause returned by Clingo!")
*/
val groundNetwork = trueFalseGrndClauseAtoms map { x =>
//val p = Literal.parse(x)
val p = Literal.parseWPB2(x) // much faster than parser combinators.
if (p.predSymbol != "false_clause" && p.predSymbol != "true_clause") throw new RuntimeException(s"Don't know what to do with this atom: $x")
val clauseId = p.terms.head.name.split("_")(1).toInt
val clauseHead = p.terms(1).asInstanceOf[Literal]
if (p.predSymbol == "true_clause") {
val l = Literal(predSymbol = clauseHead.predSymbol, terms = clauseHead.terms :+ p.terms.head, derivedFrom = clauseId)
Literal.toMLNFlat(l)
} else {
val l = Literal(predSymbol = clauseHead.predSymbol, terms = clauseHead.terms :+ p.terms.head, isNAF = true, derivedFrom = clauseId)
Literal.toMLNFlat(l)
}
}
(groundNetwork, trueGroundingsPerClause, totalExmplCount, annotation, incorrectlyTerminated, correctlyNotTerminated)
}
def getMRFAsStringSet(mrf: MRF): scala.collection.mutable.Set[String] = {
import lomrf.mln.model.AtomIdentityFunction._
val constraintIterator = mrf.constraints.iterator()
var groundClausesSet = scala.collection.mutable.Set.empty[String]
while (constraintIterator.hasNext) {
constraintIterator.advance()
val currentConstraint = constraintIterator.value()
groundClausesSet += currentConstraint.literals.map { lit => decodeLiteral(lit)(mrf.mln).getOrElse(throw new RuntimeException(s"Fuck this shit"))
}.mkString(" v ")
}
groundClausesSet
}
}
| 9,601 | 47.251256 | 151 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/ClassicSleepingExpertsHedge.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import app.runutils.RunningOptions
import logic.{Clause, Theory}
import logic.Examples.Example
import oled.functions.SingleCoreOLEDFunctions
import oled.mwua.AuxFuncs.{increaseWeights, reduceWeights, updateRulesScore}
import oled.mwua.ExpertAdviceFunctions._
import oled.mwua.HelperClasses.AtomTobePredicted
/**
* Created by nkatz at 20/3/2019
*/
/* This is the classical implementation of Hedge with sleeping experts from
* "Using and Combining Predictors which Specialize". Predictions are made
* either with weighted majority, as in "Context-Sensitive Learning Methods for Text Categorization"
* or with randomization, although the latter does not make much sense in the classification context
*
* */
object ClassicSleepingExpertsHedge {
def splitAwakeAsleep(rulesToSplit: List[Clause], awakeIds: Set[String]) = {
val rulesToSplitIds = rulesToSplit.map(_##).toSet
val (topLevelAwakeRules, topLevelAsleepRules) = rulesToSplit.foldLeft(Vector.empty[Clause], Vector.empty[Clause]) { (x, rule) =>
val isAwake = awakeIds.contains(rule.##.toString)
val isTopLevel = rulesToSplitIds.contains(rule.##)
if (isAwake) if (isTopLevel) (x._1 :+ rule, x._2) else (x._1, x._2) // then it's a refinement rule
else if (isTopLevel) (x._1, x._2 :+ rule) else (x._1, x._2) // then it's a refinement rule
}
(topLevelAwakeRules, topLevelAsleepRules)
}
def updateStructure_NEW_HEDGE(
atom: AtomTobePredicted,
previousTime: Int,
markedMap: Map[String, Clause],
predictedLabel: String,
feedback: String,
batch: Example,
currentAtom: String,
inps: RunningOptions,
logger: org.slf4j.Logger,
stateHandler: StateHandler,
percentOfMistakesBeforeSpecialize: Int,
randomizedPrediction: Boolean,
selected: String,
specializeAllAwakeOnMistake: Boolean,
conservativeRuleGeneration: Boolean,
generateNewRuleFlag: Boolean) = {
def getAwakeBottomRules(what: String) = {
if (what == "initiatedAt") atom.initiatedBy.filter(x => markedMap(x).isBottomRule)
else atom.terminatedBy.filter(x => markedMap(x).isBottomRule)
}
var updatedStructure = false
if (is_FP_mistake(predictedLabel, feedback)) {
val awakeBottomRules = getAwakeBottomRules("terminatedAt")
if (generateNewRuleFlag) { //&& awakeBottomRules.isEmpty //atom.terminatedBy.isEmpty
// If we leave the if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) clause here
// we get many more mistakes. On the other hand, it seems more reasonable to generate
// termination rules only when the fluent holds by inertia... (don't know what to do)
//if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) {
// Use the rest of the awake termination rules to compare the new
// ones to and make sure that no redundant rules are generated:
val awakeTerminationRules = atom.terminatedBy.map(x => markedMap(x))
// This generates bottom clause heads with adbuction
/*
updatedStructure =
generateNewRule(batch, currentAtom, inps, "FP", logger,
stateHandler, "terminatedAt", 1.0, otherAwakeExperts = awakeTerminationRules)
*/
// This generates a bottom rule head by simply generating an initiation/termination atom
// for a fluent from the previous time point from which a mistake is made (no abduction).
// TO USE THIS THE streaming VARIABLE AT THE BEGINNING OF THE PROCESS METHOD NEEDS TO BE SET TO true
///*
updatedStructure =
generateNewExpert_NEW(batch, atom, previousTime, inps, "FP", logger,
stateHandler, "terminatedAt", 1.0, otherAwakeExperts = awakeTerminationRules)
//*/
//}
}
// Also, in the case of an FP mistake we try to specialize awake initiation rules.
if (atom.initiatedBy.nonEmpty) {
// We are doing this after each batch
///*
val (topLevelAwakeRules, topLevelAsleepRules) = splitAwakeAsleep(stateHandler.ensemble.initiationRules, atom.initiatedBy.toSet)
val expandedInit = SingleCoreOLEDFunctions.
expandRules(Theory(topLevelAwakeRules.toList.filter(x => x.refinements.nonEmpty)), inps, logger)
if (expandedInit._2) {
stateHandler.ensemble.initiationRules = expandedInit._1.clauses ++ topLevelAsleepRules
updatedStructure = true
}
//*/
}
}
if (is_FN_mistake(predictedLabel, feedback)) {
val awakeBottomRules = getAwakeBottomRules("initiatedAt")
if (generateNewRuleFlag) { // atom.initiatedBy.isEmpty
// We don't have firing initiation rules. Generate one.
if (awakeBottomRules.isEmpty) {
// Use the rest of the awake termination rules to compare the new
// ones to and make sure that no redundant rules are generated:
val awakeInitiationRules = atom.initiatedBy.map(x => markedMap(x))
// This generates bottom clause heads with adbuction
/*
updatedStructure =
generateNewRule(batch, currentAtom, inps, "FN", logger,
stateHandler, "initiatedAt", 1.0, otherAwakeExperts = awakeInitiationRules)
*/
// This generates a bottom rule head by simply generating an initiation/termination atom
// for a fluent from the previous time point from which a mistake is made (no abduction).
// TO USE THIS THE streaming VARIABLE AT THE BEGINNING OF THE PROCESS METHOD NEEDS TO BE SET TO true
///*
updatedStructure =
generateNewExpert_NEW(batch, atom, previousTime, inps, "FN", logger,
stateHandler, "initiatedAt", 1.0, otherAwakeExperts = awakeInitiationRules)
//*/
}
} else {
if (!conservativeRuleGeneration) {
// If we are not in conservative mode we try to generate new initiation rules even if awake initiation
// rules already exist. We only do so if the current example has not already been compressed into an existing
// bottom rule.
if (awakeBottomRules.isEmpty) {
updatedStructure = generateNewRule_1(batch, currentAtom, inps, logger, stateHandler, "initiatedAt", 1.0)
}
}
}
// Also, in the case of an FP mistake we try to specialize awake termination rules.
if (atom.terminatedBy.nonEmpty) {
// We are doing this after each batch
///*
val (topLevelAwakeRules, topLevelAsleepRules) = splitAwakeAsleep(stateHandler.ensemble.terminationRules, atom.terminatedBy.toSet)
val expandedInit = SingleCoreOLEDFunctions.
expandRules(Theory(topLevelAwakeRules.toList.filter(x => x.refinements.nonEmpty)), inps, logger)
if (expandedInit._2) {
stateHandler.ensemble.terminationRules = expandedInit._1.clauses ++ topLevelAsleepRules
updatedStructure = true
}
//*/
}
}
updatedStructure
}
def predictHedge_NO_INERTIA(a: AtomTobePredicted, stateHanlder: StateHandler, markedMap: Map[String, Clause]) = {
// Here we assume that initiation rules predict '1' and termination rules predict '0'.
// The prediction is a number in [0,1] resulting from the weighted avegage of the experts predictions:
// prediction = (Sum_{weight of awake init rules} + inertia_weight) / (Sum_{weight of awake term rules} + Sum_{weight of awake init rules} + inertia_weight)
// if prediction > threshold (e.g. 0.5) then we predict holds, else false
val (_, _, awakeInit, awakeTerm, currentFluent) = (a.atom, a.time, a.initiatedBy, a.terminatedBy, a.fluent)
val initWeightSum = if (awakeInit.nonEmpty) awakeInit.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (awakeTerm.nonEmpty) awakeTerm.map(x => markedMap(x).w_pos).sum else 0.0
val _prediction = initWeightSum / (initWeightSum + termWeightSum)
val prediction = if (_prediction.isNaN) 0.0 else _prediction
(prediction, 0.0, initWeightSum, termWeightSum)
}
/*
def updateWeights_NO_INERTIA(atom: AtomTobePredicted, prediction: Double, inertiaExpertPrediction: Double,
initWeightSum: Double, termWeightSum: Double, predictedLabel: String,
markedMap: Map[String, Clause], feedback: String, stateHandler: StateHandler,
learningRate: Double, weightUpdateStrategy: String) = {
val currentFluent = atom.fluent
val hedge = weightUpdateStrategy == "hedge"
val totalWeightBeforeUpdate = initWeightSum + termWeightSum
def getSleeping(what: String) = {
val awake = if (what == "initiated") atom.initiatedBy.toSet else atom.terminatedBy.toSet
markedMap.filter(x => x._2.head.functor.contains(what) && !awake.contains(x._1))
}
val nonFiringInitRules = getSleeping("initiated")
val nonFiringTermRules = getSleeping("terminated")
def updateScore(what: String) = {
updateRulesScore(what, atom.initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
atom.terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
}
if (is_TP(predictedLabel, feedback)) {
updateScore("TP")
if (hedge) reduceWeights(atom.terminatedBy, markedMap, learningRate, "hedge")
}
if (is_FP_mistake(predictedLabel, feedback)) {
if (!hedge) {
reduceWeights(atom.initiatedBy, markedMap, learningRate)
increaseWeights(atom.terminatedBy, markedMap, learningRate)
} else {
reduceWeights(atom.initiatedBy, markedMap, learningRate, "hedge")
}
updateScore("FP")
}
if (is_FN_mistake(predictedLabel, feedback)) {
if (!hedge) {
increaseWeights(atom.initiatedBy, markedMap, learningRate)
reduceWeights(atom.terminatedBy, markedMap, learningRate)
} else {
reduceWeights(atom.terminatedBy, markedMap, learningRate, "hedge")
}
updateScore("FN")
}
if (is_TN(predictedLabel, feedback)) {
updateScore("TN")
if (hedge) {
reduceWeights(atom.initiatedBy, markedMap, learningRate, "hedge")
}
}
if (hedge) {
/*
println(s"Awake init: ${awakeInitRules.size}, Awake term: ${awakeTermRules.size}, " +
s"total awake: ${awakeInitRules.size+awakeTermRules.size} total rules: ${markedMap.size}")
*/
// Re-normalize weights of awake experts
//val inertCurrentWeight = stateHandler.inertiaExpert.getWeight(currentFluent)
val getTotalWeight = (x: Vector[Clause]) => x.map(x => x.w_pos).sum
val updateWeight = (x: Clause, y: Double) => {
if (x.tostring.replaceAll("\\s", "") == "initiatedAt(meeting(X0,X1),X2):-happensAt(active(X0),X2),happensAt(active(X1),X2).") {
println(s"${x.tostring}\nw: ${x.w_pos} w_new: ${y} predicted: $predictedLabel actual: $feedback")
}
if (y == 0.0) {
val stop = "stop"
}
x.w_pos = y
}
val awakeInitRules = atom.initiatedBy.map(x => markedMap(x))
val awakeTermRules = atom.terminatedBy.map(x => markedMap(x))
val totalInitWeightAfter = getTotalWeight(awakeInitRules)
val totalTermWeightAfter = getTotalWeight(awakeTermRules)
val totalWeightAfter = totalInitWeightAfter + totalTermWeightAfter
val mult = totalWeightBeforeUpdate/totalWeightAfter
if (!mult.isNaN) {
awakeInitRules.foreach(x => updateWeight(x, mult * x.w_pos ) )
awakeTermRules.foreach(x => updateWeight(x, mult * x.w_pos ) )
}
}
}
*/
}
| 12,394 | 41.594502 | 160 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/ExpertAdviceFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import java.io.{File, PrintWriter}
import scala.util.{Failure, Random, Success}
import app.runutils.{Globals, RunningOptions}
import com.typesafe.scalalogging.{LazyLogging, Logger}
import logic.{Clause, Constant, Literal, Theory}
import logic.Examples.Example
import lomrf.logic.{AtomSignature, EvidenceAtom, TRUE}
import lomrf.mln.model.Evidence
import oled.functions.SingleCoreOLEDFunctions
import oled.mwua
import oled.mwua.AuxFuncs._
import oled.mwua.HelperClasses.AtomTobePredicted
import utils.Utils
import xhail.Xhail
import scala.util.control.Breaks._
object ExpertAdviceFunctions extends LazyLogging {
// The "batch" argument contains only the evidence atoms here. The annotation atoms found in the batch
// are passed-in via the "trueAtoms" argument.
def process(
batch: Example,
trueAtoms: Set[String],
inps: RunningOptions,
stateHandler: StateHandler,
trueLabels: Set[String],
learningRate: Double,
epsilon: Double, // used for the randomized version
randomizedPrediction: Boolean,
batchCounter: Int,
percentOfMistakesBeforeSpecialize: Int,
specializeAllAwakeOnMistake: Boolean,
receiveFeedbackBias: Double,
conservativeRuleGeneration: Boolean = true,
weightUpdateStrategy: String = "winnow", // this is either 'hedge' or 'winnow',
withInertia: Boolean = true,
feedBackGap: Int = 0,
splice: Option[Map[String, Double] => (Set[EvidenceAtom], Evidence)] = None,
mapper: Option[Set[EvidenceAtom] => Vector[String]] = None,
incompleteTrueAtoms: Option[Set[String]] = None,
inputTheory: Option[List[Clause]] = None) = {
//========================================
//stateHandler.ensemble.removeZeroWeights
//========================================
stateHandler.inertiaExpert.clear() // THIS IS NECESSARY (removing it breaks results)
// quick & dirty...
stateHandler.ensemble.initiationRules.foreach(x => x.isTopRule = true)
stateHandler.ensemble.terminationRules.foreach(x => x.isTopRule = true)
val streaming = true //true
var batchError = 0
var batchFPs = 0
var batchFNs = 0
var batchAtoms = 0
var atomCounter = 0 //used for feedback gap
val hedgePredictionThreshold = 0.5 //hedgePredictionThreshold = Globals.hedgePredictionThreshold
var withInputTheory = inputTheory.isDefined
/* TEST PHASE ONLY (NO WEIGHT/STRUCTURE UPDATE) */
/*
val isTestPhase = receiveFeedbackBias == 0.0
if (isTestPhase) {
setFinalTestingRules(stateHandler, streaming)
withInputTheory = true
}
*/
var spliceInput = Map.empty[String, Double]
stateHandler.batchCounter = batchCounter
var alreadyProcessedAtoms = Set.empty[String]
var finishedBatch = false
val predictAndUpdateTimed = Utils.time {
while (!finishedBatch) {
val (markedProgram, markedMap, groundingsMap, times, sortedAtomsToBePredicted, orderedTimes) =
ground(batch, inps, stateHandler, withInputTheory, streaming)
breakable {
sortedAtomsToBePredicted foreach { atom =>
val currentAtom = atom.atom
if (!alreadyProcessedAtoms.contains(currentAtom)) {
if (feedBackGap != 0) atomCounter += 1 // used for experiments with the feedback gap
// this should be placed at the end of the iteration. In this way, if
// a break actual occurs due to structure update we'll retrain on the
// current example to update weights of the new/revised rules and handle the
// inertia buffer (e.g. an FN turns into a TP after the addition of an
// initiation rule, the inertia expert now remembers the atom).
// UPDATE: Not a very good idea! Why insisting on correcting a particular mistake?
// After all, the provided feedback/label may be wrong!
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
batchAtoms += 1
stateHandler.totalNumberOfRounds += 1
//-------------------
// MAKE A PREDICTION
//-------------------
var prediction = 0.0
var inertiaExpertPrediction = 0.0
var initWeightSum = 0.0
var termWeightSum = 0.0
var totalWeight = 0.0
var selected = ""
// The randomized prediction case has not been carried over to the PrequentialInference class.
// You need to modify the logic there to add it.
if (!randomizedPrediction) {
val (_prediction, _inertiaExpertPrediction, _initWeightSum, _termWeightSum) =
if (weightUpdateStrategy == "winnow") predict(atom, stateHandler, markedMap)
else predictHedge(atom, stateHandler, markedMap, withInertia, topRulesOnly = false)
//else ClassicSleepingExpertsHedge.predictHedge_NO_INERTIA(atom, stateHandler, markedMap)
prediction = _prediction
inertiaExpertPrediction = _inertiaExpertPrediction
initWeightSum = _initWeightSum
termWeightSum = _termWeightSum
} else {
val (_prediction_, _totalWeight, _selected) = predictRandomized(atom, stateHandler, markedMap)
prediction = _prediction_
totalWeight = _totalWeight
selected = _selected
}
// This has not been carried over to the PrequentialInference class.
// You need to modify the logic there to add it.
if (splice.isDefined) {
val time = atom.atomParsed.terms.tail.head.name
val eventAtom = atom.atomParsed.terms.head.asInstanceOf[Literal]
val eventPred = eventAtom.predSymbol.capitalize
val eventArgs = eventAtom.terms.map(x => x.name.capitalize).mkString(",")
val out = s"$eventPred($eventArgs,$time)"
if (weightUpdateStrategy == "winnow") {
// Rescale the prediction to [0,1]. If I is the interval (range) of the prediction, then
// rescaledPrediction = (prediction - min(I))/(max(I) - min(I)),
// where min(I) = -termWeightSum
// max(I) = initWeightSum + inertiaExpertPrediction
val _rescaledPrediction = (prediction - (-termWeightSum)) / ((initWeightSum + inertiaExpertPrediction) - (-termWeightSum))
val rescaledPrediction = {
if (_rescaledPrediction.isNaN) 0.0
else if (prediction <= 0) (-1) * _rescaledPrediction else _rescaledPrediction
}
//println(out, rescaledPrediction)
spliceInput += (out -> rescaledPrediction)
} else {
// with Hedge predictions are already in [0,1]
spliceInput += (out -> prediction)
}
}
// This has not been carried over to the PrequentialInference class.
// You need to modify the logic there to add it.
val feedback =
if (incompleteTrueAtoms.isDefined) {
getFeedback(atom, spliceInput, splice, mapper, incompleteTrueAtoms.get)
} else {
getFeedback(atom, spliceInput, splice, mapper, trueAtoms)
}
// The Winnow case has not been carried over to the PrequentialInference class.
// You need to modify the logic there to add it.
val predictedLabel =
if (weightUpdateStrategy == "winnow") {
if (prediction > 0) "true" else "false"
} else {
//if (prediction > 0) "true" else "false"
if (prediction >= hedgePredictionThreshold) "true" else "false"
}
// this is required in the randomized prediction case because the inertiaExpertPrediction variable
// is passed to weight update function and the inertia prediction is necessary for calculating the mistake probability.
if (selected == "inertia") inertiaExpertPrediction = prediction
if (incompleteTrueAtoms.isDefined) { // incomplete, splice etc
if (trueAtoms.contains(atom.atom) && prediction > 0) { // TP
stateHandler.totalTPs += 1
} else if (!trueAtoms.contains(atom.atom) && prediction > 0) { // FP
stateHandler.totalFPs += 1
batchError += 1
batchFPs += 1
} else if (!trueAtoms.contains(atom.atom) && prediction <= 0) { // TN
stateHandler.totalTNs += 1
} else { //FN
stateHandler.totalFNs += 1
batchError += 1
batchFNs += 1
}
} else { // full annotation
if (predictedLabel != feedback) {
batchError += 1
if (is_FP_mistake(predictedLabel, feedback)) {
batchFPs += 1
stateHandler.totalFPs += 1
if (selected == "inertia") {
logger.info(s"Inertia FP prediction for fluent ${atom.fluent}. " +
s"Inertia weight: ${stateHandler.inertiaExpert.getWeight(atom.fluent)}")
}
//logger.info(s"\nFP mistake for atom $currentAtom. " +
// s"Init w. sum: $initWeightSum, term w. sum: $termWeightSum, inert: $inertiaExpertPrediction")
}
if (is_FN_mistake(predictedLabel, feedback)) {
batchFNs += 1
stateHandler.totalFNs += 1
//logger.info(s"\nFN mistake for atom $currentAtom. " +
// s"Init w. sum: $initWeightSum, term w. sum: $termWeightSum, inert: $inertiaExpertPrediction")
}
} else {
if (predictedLabel == "true") stateHandler.totalTPs += 1 else stateHandler.totalTNs += 1
}
}
// Handles whether we receive feedback or not.
val update = {
///*
if (receiveFeedbackBias == 1.0) {
true
} else {
val p = Math.random()
if (p <= receiveFeedbackBias) true else false
}
//*/
/*
if (atomCounter < feedBackGap) {
false
} else if (atomCounter == feedBackGap) {
atomCounter = 0
true
} else throw new RuntimeException("Problem with atom counter for feedback gap...")
*/
}
var generateNewRuleFlag = false
if (update) {
stateHandler.receivedFeedback += 1
if (!randomizedPrediction) {
// in Winnow mode this updates weights only after a mistake (in the TP case it just updates the inertia expert)
///*
generateNewRuleFlag = updateWeights(atom, prediction, inertiaExpertPrediction, initWeightSum,
termWeightSum, predictedLabel, markedMap, feedback, stateHandler, learningRate, weightUpdateStrategy, withInertia)
// */
/*
ClassicSleepingExpertsHedge.updateWeights_NO_INERTIA(atom, prediction, inertiaExpertPrediction, initWeightSum,
termWeightSum, predictedLabel, markedMap, feedback, stateHandler, learningRate, weightUpdateStrategy)
*/
// Do not normalize when splice is used, to allow for larger confidence values
/*
if (randomizedPrediction) { //weightUpdateStrategy == "winnow" //randomizedPrediction //splice.isEmpty
stateHandler.normalizeWeights( (atom.initiatedBy ++ atom.terminatedBy).map(x => markedMap(x)), atom.fluent )
}
*/
} else {
updateWeightsRandomized(atom, prediction, inertiaExpertPrediction, predictedLabel, feedback, stateHandler, epsilon, markedMap, totalWeight)
}
/*if (predictedLabel != feedback) {
if (!withInputTheory) {
// Shouldn't we update the weights on newly generated rules here?
// Of course it's just 1 example, no big deal, but still...
val previousTime = if (streaming) orderedTimes( orderedTimes.indexOf(atom.time) -1 ) else 0
val structureUpdate_? =
ClassicSleepingExpertsHedge.updateStructure_NEW_HEDGE(atom, previousTime, markedMap, predictedLabel,
feedback, batch, currentAtom, inps, Logger(this.getClass).underlying, stateHandler,
percentOfMistakesBeforeSpecialize, randomizedPrediction, selected, specializeAllAwakeOnMistake,
conservativeRuleGeneration, generateNewRuleFlag)
if (structureUpdate_?) break
}
}*/
} else {
val delayedUpdate = new DelayedUpdate(atom, prediction, inertiaExpertPrediction,
initWeightSum, termWeightSum, predictedLabel, markedMap, feedback, stateHandler,
learningRate, weightUpdateStrategy, withInertia, orderedTimes)
stateHandler.delayedUpdates = stateHandler.delayedUpdates :+ delayedUpdate
}
}
}
finishedBatch = true
stateHandler.perBatchError = stateHandler.perBatchError :+ batchError
stateHandler.updateRunningF1Score
/*if (!withInputTheory) { // Update structure only when we are learning from scratch
val expandedInit =
SingleCoreOLEDFunctions.expandRules(Theory(stateHandler.ensemble.initiationRules.filter(x => x.refinements.nonEmpty)),
inps, Logger(this.getClass).underlying)
val expandedTerm =
SingleCoreOLEDFunctions.expandRules(Theory(stateHandler.ensemble.terminationRules.filter(x => x.refinements.nonEmpty)),
inps, Logger(this.getClass).underlying)
stateHandler.ensemble.initiationRules = expandedInit._1.clauses
stateHandler.ensemble.terminationRules = expandedTerm._1.clauses
}*/
}
}
}
if (batchError > 0) logger.info(s"*** Batch #$batchCounter Total mistakes: ${batchFPs + batchFNs} (FPs: $batchFPs | FNs: $batchFNs). Total batch atoms: $batchAtoms ***")
batchError
}
def setFinalTestingRules(stateHandler: StateHandler, streaming: Boolean) = {
val weightThreshold = 1.0 //0.00001 //1.1 // // 0.0
val (goodInit, goodTerm) = getFinalRulesDefault(stateHandler, weightThreshold)
stateHandler.ensemble.initiationRules = goodInit
stateHandler.ensemble.terminationRules = goodTerm
if (!streaming) {
logger.info(s"Testing with Initiation:\n${Theory(stateHandler.ensemble.initiationRules.sortBy(x => -x.w_pos)).showWithStats}")
logger.info(s"Testing with Termination:\n${Theory(stateHandler.ensemble.terminationRules.sortBy(x => -x.w_pos)).showWithStats}")
}
}
def getFinalRulesDefault(s: StateHandler, weightThreshold: Double) = {
val isGood = (r: Clause) => r.w_pos > weightThreshold //r.w_pos >= weightThreshold
def getGoodRules(x: List[Clause], threshold: Double) = {
x.foldLeft(List.empty[Clause]) { (accum, rule) =>
if (isGood(rule)) accum :+ rule else accum
}
}
val init = getGoodRules(s.ensemble.initiationRules, weightThreshold)
val term = getGoodRules(s.ensemble.terminationRules, weightThreshold)
//val init = (s.ensemble.initiationRules ++ s.ensemble.initiationRules.flatMap(x => x.refinements :+ x.supportSet.clauses.head)).filter(x => x.body.nonEmpty)
//val term = (s.ensemble.terminationRules ++ s.ensemble.terminationRules.flatMap(x => x.refinements :+ x.supportSet.clauses.head)).filter(x => x.body.nonEmpty)
(init, term)
}
def updateWeightsRandomized(atom: AtomTobePredicted, prediction: Double,
inertiaExpertPrediction: Double, predictedLabel: String,
feedback: String, stateHandler: StateHandler, epsilon: Double,
markedMap: Map[String, Clause], totalWeight: Double) = {
def weightNoInfinity(prev: Double, _new: Double) = {
if (_new.isPosInfinity) prev else _new
}
def getMistakeProbability(incorrectExperts: Vector[Clause], isInertiaCorrect: Boolean) = {
// The algorithm's probability of making a mistake is the sum, for all awake
// experts, of the each expert's h_i selection probability (h_i/totalAwakeWeight)
// times 1 (if the expert is incorrect) or 0 (if the expert is correct). Since
// correct experts do not contribute to the sum we only take into account the incorrect ones.
if (isInertiaCorrect) {
incorrectExperts.map(i => i.w_pos / totalWeight.toDouble).sum
} else {
incorrectExperts.map(i => i.w_pos / totalWeight.toDouble).sum + inertiaExpertPrediction / totalWeight.toDouble
}
}
def updateRulesWeights(correctExperts: Vector[String], incorrectExperts: Vector[String], isInertiaCorrect: Boolean) = {
val inc = incorrectExperts.map(x => markedMap(x))
val mistakeProbability = getMistakeProbability(inc, isInertiaCorrect)
val correctExponent = mistakeProbability / (1 + epsilon)
//val inCorrectExponent = mistakeProbability/((1+epsilon) - 1)
val inCorrectExponent = mistakeProbability / (1 + epsilon) - 1
correctExperts foreach { x =>
val rule = markedMap(x)
rule.w_pos = weightNoInfinity(rule.w_pos, rule.w_pos * Math.pow(1 + epsilon, correctExponent))
}
incorrectExperts foreach { x =>
val rule = markedMap(x)
rule.w_pos = weightNoInfinity(rule.w_pos, rule.w_pos * Math.pow(1 + epsilon, inCorrectExponent))
}
if (inertiaExpertPrediction > 0) {
if (isInertiaCorrect) {
stateHandler.inertiaExpert.updateWeight(atom.fluent,
weightNoInfinity(inertiaExpertPrediction, inertiaExpertPrediction * Math.pow(1 + epsilon, correctExponent)))
} else {
stateHandler.inertiaExpert.updateWeight(atom.fluent,
weightNoInfinity(inertiaExpertPrediction, inertiaExpertPrediction * Math.pow(1 + epsilon, inCorrectExponent)))
}
}
}
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !atom.initiatedBy.toSet.contains(x._1))
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !atom.terminatedBy.toSet.contains(x._1))
if (is_TP(predictedLabel, feedback)) {
if (!stateHandler.inertiaExpert.knowsAbout(atom.fluent)) {
//logger.info(s"ADDING ${atom.fluent} TO THE INERTIA EXPERT ")
stateHandler.inertiaExpert.updateWeight(atom.fluent, prediction)
// Prediction should be positive here (we predicted holds). Check this, just to be on the safe side...
if (prediction <= 0.0) {
throw new RuntimeException(s"TP atom with prediction =< 0. " +
s"At batch: ${stateHandler.batchCounter}, while predicting for atom ${atom.atom}")
}
}
// Awake initiation rules are correct, awake termination rules are incorrect and inertia is correct
updateRulesWeights(atom.initiatedBy, atom.terminatedBy, true)
updateRulesScore("TP", atom.initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
atom.terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
}
if (is_FP_mistake(predictedLabel, feedback)) {
// Awake initiation rules are incorrect, awake termination rules are correct and inertia is incorrect
updateRulesWeights(atom.terminatedBy, atom.initiatedBy, false)
updateRulesScore("FP", atom.initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
atom.terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
// This is clearly wrong... But the randomized version does not work anyway.
if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) stateHandler.inertiaExpert.forget(atom.fluent)
}
if (is_FN_mistake(predictedLabel, feedback)) {
// Awake initiation rules are correct, awake termination rules are incorrect and inertia is incorrect
updateRulesWeights(atom.initiatedBy, atom.terminatedBy, true)
updateRulesScore("FN", atom.initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
atom.terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
}
if (is_TN(predictedLabel, feedback)) { // TN
// Awake initiation rules are incorrect, awake termination rules are correct and inertia is incorrect
updateRulesWeights(atom.terminatedBy, atom.initiatedBy, false)
updateRulesScore("TN", atom.initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
atom.terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
if (inertiaExpertPrediction > 0.0) stateHandler.inertiaExpert.forget(atom.fluent)
}
}
def updateWeights(atom: AtomTobePredicted, prediction: Double, inertiaExpertPrediction: Double,
initWeightSum: Double, termWeightSum: Double, predictedLabel: String,
markedMap: Map[String, Clause], feedback: String, stateHandler: StateHandler,
learningRate: Double, weightUpdateStrategy: String, withInertia: Boolean = true) = {
var generateNewRule = false
val currentFluent = atom.fluent
val hedge = weightUpdateStrategy == "hedge"
val getTotalWeight = (x: Vector[Clause]) => x.map(x => x.w_pos).sum
// These Include empty-bodied rules. Only for score (default, foilgain) update. Used for Hoeffding tests only
val _awakeInitRules = atom.initiatedBy.map(x => markedMap(x))
val _awakeTermRules = atom.terminatedBy.map(x => markedMap(x))
val awakeInitRules = _awakeInitRules.filter(x => x.body.nonEmpty) // exclude empty-bodied rules from prediction and weight normalization
val awakeTermRules = _awakeTermRules.filter(x => x.body.nonEmpty) // exclude empty-bodied rules from prediction and weight normalization
// Create a map with the rules and their current weight. We'll use it
// to show the weight updates in the case of Hedge (this is for debugging).
var initRulesMap = scala.collection.mutable.Map.empty[Int, (String, Double)]
var termRulesMap = scala.collection.mutable.Map.empty[Int, (String, Double)]
awakeInitRules.foreach(x => initRulesMap += (x.## -> (x.tostring, x.w_pos)))
awakeTermRules.foreach(x => termRulesMap += (x.## -> (x.tostring, x.w_pos)))
val totalWeightBeforeUpdate =
if (withInertia) {
inertiaExpertPrediction + initWeightSum + termWeightSum
} else {
initWeightSum + termWeightSum
}
def getSleeping(what: String) = {
val awake = if (what == "initiated") atom.initiatedBy.toSet else atom.terminatedBy.toSet
markedMap.filter(x => x._2.head.functor.contains(what) && !awake.contains(x._1))
}
val sleepingInitRules = getSleeping("initiated")
val sleepingTermRules = getSleeping("terminated")
def updateScore(what: String) = {
updateRulesScore(what, _awakeInitRules, sleepingInitRules.values.toVector, _awakeTermRules, sleepingTermRules.values.toVector)
}
var outcome = ""
if (is_TP(predictedLabel, feedback)) {
outcome = "TP"
if (hedge) {
// Reduce the weights of termination rules.
reduceWeights(atom.terminatedBy, markedMap, learningRate, "hedge")
// Inertia is correct here, so if the inertia expert knows about the
// current fluent we leave the weight unchanged. If not (i.e. the fluent is
// initiated for the first time) we deal with that after all weights of other
// experts have been updated. We then set the inertia weight of this fluent
// to the weighted sum of the experts for this fluent (the actual prediction value).
// If we update the weight of the inertia expert before normalization
// then we have that the total weight of the ensemble after
// the update // W_{t+1} might be larger than the total weight before the update (W_t).
// This breaks things up (W_{t+1} is always smaller since erroneous rules are penalized,
// while the weight of correct ones is left unchanged). This has the effect of actually
// increasing the weight of correct rules after the normalization:
// W = W_{t}/W_{t+1} > 1, since W_{t} > W_{t+1} => w_{i,t+1} = W * w_{i,t} > w_{i,t},
// where w_{i,t} is the weight of the i-th expert (which is unchanged is the expert is correct).
// If this is messed-up, we'll end-up reducing the weight of correct experts after each round.
} else {
// Winnow
val holdsWeight = inertiaExpertPrediction + initWeightSum
stateHandler.inertiaExpert.updateWeight(currentFluent, holdsWeight)
}
} else if (is_FP_mistake(predictedLabel, feedback)) {
outcome = "FP"
if (!hedge) {
reduceWeights(atom.initiatedBy, markedMap, learningRate)
increaseWeights(atom.terminatedBy, markedMap, learningRate)
} else {
reduceWeights(atom.initiatedBy, markedMap, learningRate, "hedge")
}
if (inertiaExpertPrediction > 0.0) {
val newWeight =
if (!hedge) inertiaExpertPrediction * Math.pow(Math.E, (-1.0) * learningRate)
else inertiaExpertPrediction * learningRate
stateHandler.inertiaExpert.updateWeight(currentFluent, newWeight)
} else {
// Remember this since it was recognized. This is the correct approach,
// since we're doing sequential prediction. If we make an FP mistake at the
// next round we reduce the inertia weight and add termination rules.
//stateHandler.inertiaExpert.updateWeight(currentFluent, prediction)
// I don't think so... this doesn't make much sense. Do nothing
}
} else if (is_FN_mistake(predictedLabel, feedback)) {
outcome = "FN"
if (!hedge) {
increaseWeights(atom.initiatedBy, markedMap, learningRate)
reduceWeights(atom.terminatedBy, markedMap, learningRate)
// In Winnow, we also promote the inertia expert if it knows something for the current fluent
if (inertiaExpertPrediction > 0.0) {
val newWeight = inertiaExpertPrediction * Math.pow(Math.E, 1.0 * learningRate)
//newWeight = if (newWeight.isPosInfinity) stateHandler.inertiaExpert.getWeight(currentFluent) else newWeight
stateHandler.inertiaExpert.updateWeight(currentFluent, newWeight)
}
} else {
reduceWeights(atom.terminatedBy, markedMap, learningRate, "hedge")
// In Hedge, even if inertia predicts here it is correct, so we leave its weight unchanged.
}
} else { // TN
outcome = "TN"
stateHandler.inertiaExpert.forget(currentFluent)
if (hedge) reduceWeights(atom.initiatedBy, markedMap, learningRate, "hedge")
}
updateScore(outcome)
if (hedge) {
// Re-normalize weights of awake experts
val totalInitWeightPrevious = initRulesMap.map(x => x._2._2).sum
val totalTermWeightPrevious = termRulesMap.map(x => x._2._2).sum
val initWeightsAfterUpdatesMap = awakeInitRules.map(x => (x.##, x.w_pos))
val termWeightsAfterUpdatesMap = awakeTermRules.map(x => (x.##, x.w_pos))
val allRulesWeightsAfterUpdatesMap = (initWeightsAfterUpdatesMap ++ termWeightsAfterUpdatesMap).toMap
val totalInitWeightAfterWeightsUpdate = getTotalWeight(awakeInitRules) // the updates have already taken place
val totalTermWeightAfterWeightsUpdate = getTotalWeight(awakeTermRules) // the updates have already taken place
val inertAfterWeightUpdate = stateHandler.inertiaExpert.getWeight(currentFluent)
val totalWeightAfterUpdate =
if (withInertia) {
inertAfterWeightUpdate + totalInitWeightAfterWeightsUpdate + totalTermWeightAfterWeightsUpdate
} else {
totalInitWeightAfterWeightsUpdate + totalTermWeightAfterWeightsUpdate
}
val mult = totalWeightBeforeUpdate / totalWeightAfterUpdate
val updateWeight = (rule: Clause, y: Double) => rule.w_pos = y
if (!mult.isNaN) {
awakeInitRules.foreach(x => updateWeight(x, mult * x.w_pos))
awakeTermRules.foreach(x => updateWeight(x, mult * x.w_pos))
if (stateHandler.inertiaExpert.knowsAbout(currentFluent)) {
stateHandler.inertiaExpert.updateWeight(currentFluent, mult * inertAfterWeightUpdate)
}
}
// after normalization
val totalInitWeightAfterNormalization = getTotalWeight(awakeInitRules)
val totalTermWeightAfterNormalization = getTotalWeight(awakeTermRules)
val inertAfterNormalization = stateHandler.inertiaExpert.getWeight(currentFluent)
val totalEnsembleWeightBefore = totalWeightBeforeUpdate
val totalEnsembleWeightAfterUpdates =
if (withInertia) {
totalInitWeightAfterWeightsUpdate + totalTermWeightAfterWeightsUpdate + inertAfterWeightUpdate
} else {
totalInitWeightAfterWeightsUpdate + totalTermWeightAfterWeightsUpdate
}
val totalEnsembleWeightAfterNormalization =
if (withInertia) {
totalInitWeightAfterNormalization + totalTermWeightAfterNormalization + inertAfterNormalization
} else {
totalInitWeightAfterWeightsUpdate + totalTermWeightAfterNormalization
}
// This is wrong. The total AWAKE weight of the ensemble is supposed to remain the same.
// Differences are due to number precision of Doubles. It's the total initiation or termination
// weight that is the key here. If the total initiation weight does not drop after an FP,
// generate new termination rules. Similarly, if the total termination weight does not drop
// after an FN, generate new initiation rules.
//if (totalEnsembleWeightAfter - totalEnsembleWeightBefore <= Math.pow(10,-4)) generateNewRule = true
if (outcome == "FP" && totalInitWeightAfterNormalization >= totalInitWeightPrevious) generateNewRule = true
if (outcome == "FN" && totalTermWeightAfterNormalization >= totalTermWeightPrevious) generateNewRule = true
/* DEBUGGING INFO */
def debuggingInfo(x: Vector[Clause], what: String) = {
x foreach { rule =>
val entry = if (what == "initiated") initRulesMap(rule.##) else termRulesMap(rule.##)
println(s"weight prev/after update/after normalization: ${entry._2}/${allRulesWeightsAfterUpdatesMap(rule.##)}/${rule.w_pos} (tps,fps,fns): (${rule.tps},${rule.fps},${rule.fns})\n${entry._1}")
}
}
/*if (outcome == "FP") { //|| outcome == "FN"
println("======================================================================")
println(s"prediction: $prediction, actual: $outcome, fluent: $currentFluent")
if (withInertia) {
println(s"Inertia before|after weights update|after normalization: $inertiaExpertPrediction|$inertAfterWeightUpdate|$inertAfterNormalization")
}
println(s"Total init before|after weights update & normalization: $totalInitWeightPrevious|$totalInitWeightAfterWeightsUpdate|$totalInitWeightAfterNormalization")
println(s"Total term before|after weights update & normalization: $totalTermWeightPrevious|$totalTermWeightAfterWeightsUpdate|$totalTermWeightAfterNormalization")
println(s"total weight before/after updates/normalization: $totalEnsembleWeightBefore/$totalEnsembleWeightAfterUpdates/$totalEnsembleWeightAfterNormalization equal: ${totalEnsembleWeightBefore == totalEnsembleWeightAfterNormalization}")
println("======================================================================")
}*/
if (outcome == "TP") { // || outcome == "FP" // should we do this for FP as well (remember the fluent)? NO! MESSES THINGS UP. Generates wrong termination rules
// If we recognized the fluent successfully during at this round, remember it
if (!stateHandler.inertiaExpert.knowsAbout(currentFluent)) {
stateHandler.inertiaExpert.updateWeight(currentFluent, prediction)
}
}
}
generateNewRule
}
def updateStructure_NEW(
atom: AtomTobePredicted,
markedMap: Map[String, Clause],
predictedLabel: String,
feedback: String,
batch: Example,
currentAtom: String,
inps: RunningOptions,
logger: org.slf4j.Logger,
stateHandler: StateHandler,
percentOfMistakesBeforeSpecialize: Int,
randomizedPrediction: Boolean,
selected: String,
specializeAllAwakeOnMistake: Boolean,
conservativeRuleGeneration: Boolean) = {
def getAwakeBottomRules(what: String) = {
if (what == "initiatedAt") atom.initiatedBy.filter(x => markedMap(x).isBottomRule)
else atom.terminatedBy.filter(x => markedMap(x).isBottomRule)
}
def splitAwakeAsleep(rulesToSplit: List[Clause], awakeIds: Set[String]) = {
val rulesToSplitIds = rulesToSplit.map(_##).toSet
val (topLevelAwakeRules, topLevelAsleepRules) = rulesToSplit.foldLeft(Vector.empty[Clause], Vector.empty[Clause]) { (x, rule) =>
val isAwake = awakeIds.contains(rule.##.toString)
val isTopLevel = rulesToSplitIds.contains(rule.##)
if (isAwake) if (isTopLevel) (x._1 :+ rule, x._2) else (x._1, x._2) // then it's a refinement rule
else if (isTopLevel) (x._1, x._2 :+ rule) else (x._1, x._2) // then it's a refinement rule
}
(topLevelAwakeRules, topLevelAsleepRules)
}
var updatedStructure = false
if (is_FP_mistake(predictedLabel, feedback)) {
val awakeBottomRules = getAwakeBottomRules("terminatedAt")
// We don't have firing termination rules so we'll try to generate one.
// If we're in conservative mode, we generate new rules only if none awake currently exists
// Also, we are always conservative with termination rules. We generate new ones only if the FP
// holds by inertia. Otherwise it doesn't make much sense.
if (atom.terminatedBy.isEmpty) {
// If we leave the if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) clause here
// we get many more mistakes. On the other hand, it seems more reasonable to generate
// termination rules only when the fluent holds by inertia... (don't know what to do)
if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) {
updatedStructure = generateNewRule(batch, currentAtom, inps, "FP", logger, stateHandler, "terminatedAt", 1.0)
}
} else {
if (!conservativeRuleGeneration) {
// If we are not in conservative mode we try to generate new termination rules even if awake termination
// rules already exist. We only do so if the current example has not already been compressed into an existing
// bottom rule.
if (awakeBottomRules.isEmpty && stateHandler.inertiaExpert.knowsAbout(atom.fluent)) {
updatedStructure = generateNewRule_1(batch, currentAtom, inps, logger, stateHandler, "terminatedAt", 1.0)
}
}
}
// Also, in the case of an FP mistake we try to specialize awake initiation rules.
if (atom.initiatedBy.nonEmpty) {
// We are doing this after each batch
/*
val (topLevelAwakeRules, topLevelAsleepRules) = splitAwakeAsleep(stateHandler.ensemble.initiationRules, atom.initiatedBy.toSet)
val expandedInit = SingleCoreOLEDFunctions.
expandRules(Theory(topLevelAwakeRules.toList.filter(x => x.refinements.nonEmpty)), inps, logger)
if (expandedInit._2) {
stateHandler.ensemble.initiationRules = expandedInit._1.clauses ++ topLevelAsleepRules
updatedStructure = true
}
*/
}
}
if (is_FN_mistake(predictedLabel, feedback)) {
val awakeBottomRules = getAwakeBottomRules("initiatedAt")
if (atom.initiatedBy.isEmpty) {
// We don't have firing initiation rules. Generate one.
updatedStructure = generateNewRule(batch, currentAtom, inps, "FN", logger, stateHandler, "initiatedAt", 1.0)
} else {
if (!conservativeRuleGeneration) {
// If we are not in conservative mode we try to generate new initiation rules even if awake initiation
// rules already exist. We only do so if the current example has not already been compressed into an existing
// bottom rule.
if (awakeBottomRules.isEmpty) {
updatedStructure = generateNewRule_1(batch, currentAtom, inps, logger, stateHandler, "initiatedAt", 1.0)
}
}
}
// Also, in the case of an FP mistake we try to specialize awake termination rules.
if (atom.terminatedBy.nonEmpty) {
// We are doing this after each batch
/*
val (topLevelAwakeRules, topLevelAsleepRules) = splitAwakeAsleep(stateHandler.ensemble.terminationRules, atom.terminatedBy.toSet)
val expandedInit = SingleCoreOLEDFunctions.
expandRules(Theory(topLevelAwakeRules.toList.filter(x => x.refinements.nonEmpty)), inps, logger)
if (expandedInit._2) {
stateHandler.ensemble.terminationRules = expandedInit._1.clauses ++ topLevelAsleepRules
updatedStructure = true
}
*/
}
}
updatedStructure
}
def updateStructure(atom: AtomTobePredicted, markedMap: Map[String, Clause],
predictedLabel: String, feedback: String, batch: Example,
currentAtom: String, inps: RunningOptions,
logger: org.slf4j.Logger, stateHandler: StateHandler,
percentOfMistakesBeforeSpecialize: Int, randomizedPrediction: Boolean,
selected: String, specializeAllAwakeOnMistake: Boolean) = {
if (is_FP_mistake(predictedLabel, feedback)) {
if (atom.terminatedBy.isEmpty) {
// We don't have firing termination rules. Generate one.
generateNewRule(batch, currentAtom, inps, "FP", logger, stateHandler, "terminatedAt", 1.0)
} else {
// We do have firing termination rules.
// Specialize initiation rules.
if (atom.initiatedBy.nonEmpty) {
// This is for performing Hoeffding tests for awake initiation rules on FP mistakes, considering as
// specialization candidates only the sleeping refinements (which can fix the current mistake).
// It doesn't make much sense. Why specialize only on mistake?
/*
val (topLevelAwakeRules, topLevelAsleepRules) = stateHandler.ensemble.initiationRules.foldLeft(Vector.empty[Clause], Vector.empty[Clause]) { (x, rule) =>
val isAwake = atom.initiatedBy.toSet.contains(rule.##.toString)
val isTopLevel = stateHandler.ensemble.initiationRules.map(_##).toSet.contains(rule.##)
if (isAwake) {
if (isTopLevel) {
(x._1 :+ rule, x._2)
} else {
(x._1, x._2) // then it's a refinement rule
}
} else {
if (isTopLevel) {
(x._1, x._2 :+ rule)
} else {
(x._1, x._2) // then it's a refinement rule
}
}
}
val specializationCandidates = topLevelAwakeRules
val expanded = SingleCoreOLEDFunctions.expandRules(Theory(specializationCandidates.toList), inps, logger)
val break_? = expanded._2
if (break_?) {
stateHandler.ensemble.initiationRules = topLevelAsleepRules.toList ++ expanded._1.clauses
break
}
*/
// This is for expanding to a sleeping refinement immediately after an FP mistake. No sense in doing that.
///*
val break_? = specialize(atom, stateHandler, markedMap, "initiated", inps,
logger, percentOfMistakesBeforeSpecialize, "FP", randomizedPrediction, selected, specializeAllAwakeOnMistake)
if (break_?) break
//*/
}
}
}
if (is_FN_mistake(predictedLabel, feedback)) {
if (atom.initiatedBy.isEmpty) {
// We don't have firing initiation rules. Generate one.
generateNewRule(batch, currentAtom, inps, "FN", logger, stateHandler, "initiatedAt", 1.0)
} else {
// We do have firing initiation rules.
// Specialize termination rules.
if (atom.terminatedBy.nonEmpty) {
// This is for performing Hoeffding tests for awake termination rules on FN mistakes, considering as
// specialization candidates only the sleeping refinements (which can fix the current mistake).
// It doesn't make much sense. Why specialize only on mistake?
/*
val (topLevelAwakeRules, topLevelAsleepRules) = stateHandler.ensemble.terminationRules.foldLeft(Vector.empty[Clause], Vector.empty[Clause]) { (x, rule) =>
val isAwake = atom.terminatedBy.toSet.contains(rule.##.toString)
val isTopLevel = stateHandler.ensemble.terminationRules.map(_##).toSet.contains(rule.##)
if (isAwake) {
if (isTopLevel) {
(x._1 :+ rule, x._2)
} else {
(x._1, x._2) // then it's a refinement rule
}
} else {
if (isTopLevel) {
(x._1, x._2 :+ rule)
} else {
(x._1, x._2) // then it's a refinement rule
}
}
}
val specializationCandidates = topLevelAwakeRules
val expanded = SingleCoreOLEDFunctions.expandRules(Theory(specializationCandidates.toList), inps, logger)
val break_? = expanded._2
if (break_?) {
stateHandler.ensemble.terminationRules = topLevelAsleepRules.toList ++ expanded._1.clauses
break
}
*/
// This is for expanding to a sleeping refinement immediately after an FP mistake. No sense in doing that.
///*
val break_? = specialize(atom, stateHandler, markedMap, "terminated", inps,
logger, percentOfMistakesBeforeSpecialize, "FN", randomizedPrediction, selected, specializeAllAwakeOnMistake)
if (break_?) break
//*/
}
}
}
}
def updateStructure_STRONGLY_INIT(atom: AtomTobePredicted, markedMap: Map[String, Clause],
predictedLabel: String, feedback: String, batch: Example,
currentAtom: String, inps: RunningOptions,
logger: org.slf4j.Logger, stateHandler: StateHandler,
percentOfMistakesBeforeSpecialize: Int, randomizedPrediction: Boolean,
selected: String, specializeAllAwakeOnMistake: Boolean) = {
if (is_FP_mistake(predictedLabel, feedback)) {
// For an FP mistake we have the following cases:
// 1. holdsAt wins because there are no awake termination rules (so we have awake initiation rules and/or inertia). Then:
// 1.1 If the fluent already holds by inertia, generate new termination expert tree
// (it's hopeless to expect to fix the mistake by down-weighting initiation rules,
// since we need to terminate the fluent for it to be "forgotten" by the inertia expert).
// 1.2. If the fluent does not already hold by inertia, then specialize an existing initiation rule (or all???). With that we remove
// an awake initiation expert, thus hoping to prevent this scenario from happening again in the future.
// NOTE: For this to work it is necessary for the inertia expert to remember a fluent only in the case of a TP
// (NOT an FP, otherwise we get back the problem of 1.1)
// 2. holdsAt wins because the awake termination rules are out-scored. In that case let the weight demotion
// (for the awake init. rules and the inertia expert) and the weight promotion (for the awake term. rules) do the job.
if (atom.terminatedBy.isEmpty) {
if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) { // this is 1.1 from above
generateNewRule(batch, currentAtom, inps, "FP", logger, stateHandler, "terminatedAt", 1.0)
} else { // this is 1.2 from above.
val break_? = specialize(atom, stateHandler, markedMap, "initiated", inps,
logger, percentOfMistakesBeforeSpecialize, "FP", randomizedPrediction, selected, specializeAllAwakeOnMistake)
if (break_?) break
}
} else { // this is 2 from above
// We do have firing termination rules.
// Specialize initiation rules.
/*
if (atom.initiatedBy.nonEmpty) {
val break_? = specialize(atom, stateHandler, markedMap, "initiated", inps,
logger, percentOfMistakesBeforeSpecialize, "FP", randomizedPrediction, selected, specializeAllAwakeOnMistake)
if (break_?) break
}
*/
}
}
if (is_FN_mistake(predictedLabel, feedback)) {
if (atom.initiatedBy.isEmpty) {
// We don't have firing initiation rules. Generate one, if it does not hold by inertia.
if (!stateHandler.inertiaExpert.knowsAbout(atom.fluent)) {
generateNewRule(batch, currentAtom, inps, "FN", logger, stateHandler, "initiatedAt", 1.0)
} else {
// it holds by inertia, let the weights fix the problem
val break_? = specialize(atom, stateHandler, markedMap, "terminated", inps,
logger, percentOfMistakesBeforeSpecialize, "FN", randomizedPrediction, selected, specializeAllAwakeOnMistake)
if (break_?) break
}
} else {
// We do have firing initiation rules.
// Specialize termination rules.
///*
if (atom.terminatedBy.nonEmpty) {
val break_? = specialize(atom, stateHandler, markedMap, "terminated", inps,
logger, percentOfMistakesBeforeSpecialize, "FN", randomizedPrediction, selected, specializeAllAwakeOnMistake)
if (break_?) break
}
//*/
}
}
}
def generateNewExpert_NEW(batch: Example, currentAtom: AtomTobePredicted, previousTimePoint: Int,
inps: RunningOptions, mistakeType: String, logger: org.slf4j.Logger,
stateHandler: StateHandler, what: String, totalWeight: Double,
removePastExperts: Boolean = false, otherAwakeExperts: Vector[Clause] = Vector.empty[Clause]) = {
def isRedundant(newRule: Clause) = {
val getAllBottomRules = (x: List[Clause]) => x.flatMap(y => y.supportSet.clauses)
val allBottomRules = {
if (newRule.head.functor.contains("initiated")) getAllBottomRules(stateHandler.ensemble.initiationRules)
else getAllBottomRules(stateHandler.ensemble.terminationRules)
}
allBottomRules.exists(c => newRule.thetaSubsumes(c))
}
var generatedRule = false
val newRule = {
val headAtom = s"$what(${currentAtom.fluent},$previousTimePoint)"
val xhailInput = Map("annotation" -> batch.annotationASP, "narrative" -> batch.narrativeASP)
val bkFile = if (what == "initiatedAt") inps.globals.BK_INITIATED_ONLY else inps.globals.BK_TERMINATED_ONLY
val aspFile: File = Utils.getTempFile("aspinput", ".lp")
val (_, bcs) = Xhail.generateKernel(List(headAtom), examples = xhailInput, aspInputFile = aspFile, bkFile = bkFile, globals = inps.globals)
aspFile.delete()
val _bottomClause = bcs.head
val topRule = {
val c = Clause(head = _bottomClause.head, body = List())
c.addToSupport(_bottomClause)
c
}
val _newRule = {
if (bcs.isEmpty) {
Clause.empty
} else {
// newRule here is an empty-bodied rule along with the newly-generated bottom clause.
// Populate the newRule's refinements.
topRule.generateCandidateRefs(inps.globals, otherAwakeExperts)
topRule.w_pos = totalWeight
topRule.refinements.foreach(x => x.w_pos = totalWeight)
topRule
}
}
_newRule
}
if (!newRule.equals(Clause.empty)) {
logger.info(s"Generated new $what rule in response to $mistakeType atom: ${currentAtom.atom}")
//=========================================
// Store the new rule in the state handler
stateHandler.addRule(newRule)
//=========================================
generatedRule = true
} else {
logger.info(s"At batch ${stateHandler.batchCounter}: Failed to generate bottom rule from $mistakeType mistake with atom: $currentAtom")
}
generatedRule
}
def generateNewRule(batch: Example, currentAtom: String, inps: RunningOptions, mistakeType: String,
logger: org.slf4j.Logger, stateHandler: StateHandler,
what: String, totalWeight: Double, removePastExperts: Boolean = false,
otherAwakeExperts: Vector[Clause] = Vector.empty[Clause]) = {
def isRedundant(newRule: Clause) = {
val getAllBottomRules = (x: List[Clause]) => x.flatMap(y => y.supportSet.clauses)
val allBottomRules = {
if (newRule.head.functor.contains("initiated")) getAllBottomRules(stateHandler.ensemble.initiationRules)
else getAllBottomRules(stateHandler.ensemble.terminationRules)
}
allBottomRules.exists(c => newRule.thetaSubsumes(c))
}
var generatedRule = false
val newRule = generateNewExpert(batch, currentAtom, inps.globals, what, totalWeight, otherAwakeExperts)
//if (!isRedundant(newRule)) {
if (!newRule.equals(Clause.empty)) {
logger.info(s"Generated new $what rule in response to $mistakeType atom: $currentAtom")
stateHandler.addRule(newRule)
generatedRule = true
} else {
logger.info(s"At batch ${stateHandler.batchCounter}: Failed to generate bottom rule from $mistakeType mistake with atom: $currentAtom")
}
//} else {
//logger.info(s"At batch ${stateHandler.batchCounter}: Dropped redundant bottom rule.")
//}
generatedRule
}
def generateNewRule_1(batch: Example, currentAtom: String, inps: RunningOptions,
logger: org.slf4j.Logger, stateHandler: StateHandler,
what: String, totalWeight: Double, removePastExperts: Boolean = false) = {
var generatedNewRule = false
val newRule = generateNewExpert(batch, currentAtom, inps.globals, what, totalWeight)
if (!newRule.equals(Clause.empty)) {
logger.info(s"Generated new $what rule from atom: $currentAtom")
stateHandler.addRule(newRule)
generatedNewRule = true
} else {
logger.info(s"At batch ${stateHandler.batchCounter}: Failed to generate bottom rule from atom: $currentAtom")
}
generatedNewRule
}
def specialize(atom: AtomTobePredicted, stateHandler: StateHandler,
markedMap: Map[String, Clause], what: String, inps: RunningOptions,
logger: org.slf4j.Logger, percentOfMistakesBeforeSpecialize: Int,
mistakeType: String, randomizedPrediction: Boolean, selected: String, specializeAllAwakeOnMistake: Boolean) = {
val topLevelRules = if (what == "initiated") stateHandler.ensemble.initiationRules else stateHandler.ensemble.terminationRules
val awakeExperts = if (what == "initiated") atom.initiatedBy else atom.terminatedBy // This contains the refinements
// This contains the refinements
val nonFiringRules =
markedMap.filter(x =>
x._2.head.functor.contains(what) && !awakeExperts.toSet.contains(x._1))
def getSleepingChildren(ruleToSpecialize: Clause) = {
ruleToSpecialize.refinements.filter(r => nonFiringRules.keySet.contains(r.##.toString)).
//filter(s => s.score > ruleToSpecialize.score).
//filter(r => !allRules.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
}
def performSpecialization(ruleToSpecialize: (Clause, List[Clause])) = {
val suitableRefs = ruleToSpecialize._2
val bestRefinement = suitableRefs.head
/*
if (bestRefinement.w > ruleToSpecialize._1.w) {
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
showInfo(ruleToSpecialize._1, bestRefinement, atom.atom, mistakeType)
stateHandler.removeRule(ruleToSpecialize._1)
stateHandler.addRule(bestRefinement)
}
*/
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
showInfo(ruleToSpecialize._1, bestRefinement, atom.atom, mistakeType)
stateHandler.removeRule(ruleToSpecialize._1)
stateHandler.addRule(bestRefinement)
}
val allIncorrectAwakeTopLevelRules = {
topLevelRules.filter{ x =>
val totalFPs = stateHandler.totalFPs
val specialize = x.fps >= totalFPs * (percentOfMistakesBeforeSpecialize.toDouble / 100)
awakeExperts.toSet.contains(x.##.toString) && specialize
}
}
val goAheads = allIncorrectAwakeTopLevelRules.map(x => (x, getSleepingChildren(x))).filter(x => x._2.nonEmpty)
var performedSpecialization = false
if (goAheads.nonEmpty) {
if (specializeAllAwakeOnMistake) {
goAheads foreach { ruleToSpecialize =>
performedSpecialization = true
performSpecialization(ruleToSpecialize)
}
} else {
performedSpecialization = true
// Pick only one at random
val shuffled = scala.util.Random.shuffle(goAheads)
if (!randomizedPrediction) {
performSpecialization(shuffled.head)
} else {
performedSpecialization = true
// Try to specialize the rule we predicted with, if not possible, specialize one at random
val rule = goAheads.find(p => p._1.##.toString == selected) match {
case Some(x) => x
case _ => shuffled.head
}
performSpecialization(rule)
}
}
} else {
//logger.info("Could not perform specialization (no sleeping children found).")
}
performedSpecialization
}
def is_FP_mistake(predictedLabel: String, feedback: String) = {
predictedLabel == "true" && feedback == "false"
}
def is_FN_mistake(predictedLabel: String, feedback: String) = {
predictedLabel == "false" && feedback == "true"
}
def is_TP(predictedLabel: String, feedback: String) = {
predictedLabel == "true" && feedback == "true"
}
def is_TN(predictedLabel: String, feedback: String) = {
predictedLabel == "false" && feedback == "false"
}
def getFeedback(
a: AtomTobePredicted,
predictions: Map[String, Double],
splice: Option[Map[String, Double] => (Set[EvidenceAtom], Evidence)] = None,
mapper: Option[Set[EvidenceAtom] => Vector[String]] = None,
labels: Set[String] = Set[String]()) = {
// The prediction is sent to Splice here to receive the feedback.
// If the atom is labelled Slice will return its true label, otherwise it will send
// a Splice-predicted label for this atom.
// To work in a stand-alone fashion (without Splice) and also test it we also use
// the true labels here.
if (splice.isDefined && mapper.isDefined) {
val result = splice.get(predictions)
////
val querySig = AtomSignature("Meet", 3)
val atomDB = result._2.db(querySig)
val atomIDF = atomDB.identity
val seq = atomIDF.indices.flatMap { id =>
atomIDF.decode(id) match {
case Success(terms) if atomDB(id) == TRUE =>
Some(EvidenceAtom.asTrue(s"${querySig.symbol}", terms.map(lomrf.logic.Constant).toVector))
case Failure(exception) => throw exception
case _ => None
}
}
/////
val spliceTrueLabels = mapper.get(seq.toSet)
val predictedAtomTruthValue = spliceTrueLabels.contains(a.atom).toString
predictedAtomTruthValue
} else {
if (labels.contains(a.atom)) "true" else "false"
}
}
private def showInfo(parent: Clause, child: Clause, currentAtom: String, mistakeType: String) = {
logger.info(s"\nSpecialization in response to $mistakeType atom $currentAtom:\nRule (id: ${parent.##} | " +
s"score: ${parent.score} | tps: ${parent.tps} fps: ${parent.fps} " +
s"fns: ${parent.fns} | ExpertWeight: ${parent.w_pos} " +
s"AvgExpertWeight: ${parent.avgWeight})\n${parent.tostring}\nwas refined to" +
s"(id: ${child.##} | score: ${child.score} | tps: ${child.tps} fps: ${child.fps} fns: ${child.fns} | " +
s"ExpertWeight: ${child.w_pos} AvgExpertWeight: ${child.avgWeight})\n${child.tostring}")
}
def ground(
batch: Example,
inps: RunningOptions,
stateHandler: StateHandler,
withInputTheory: Boolean = false,
streaming: Boolean = false) = {
val startTime = System.nanoTime()
val (markedProgram, markedMap, groundingsMap, times) = groundEnsemble(batch, inps, stateHandler, withInputTheory, streaming)
val sortedAtomsToBePredicted = sortGroundingsByTime(groundingsMap)
val orderedTimes = (sortedAtomsToBePredicted.map(x => x.time) ++ times.toVector).sorted
val endTime = System.nanoTime()
println(s"Grounding time: ${(endTime - startTime) / 1000000000.0}")
(markedProgram, markedMap, groundingsMap, times, sortedAtomsToBePredicted, orderedTimes)
}
/* Generate groundings of the rules currently in the ensemble. */
def groundEnsemble(
batch: Example,
inps: RunningOptions,
stateHandler: StateHandler,
withInputTheory: Boolean = false,
streaming: Boolean = false) = {
val ensemble = stateHandler.ensemble
val merged = ensemble.merged(inps, withInputTheory)
stateHandler.runningRulesNumber = stateHandler.runningRulesNumber :+ merged.size
//println(s"Number of rules: ${merged.size}")
//println(s"Predicting with:\n${merged.tostring}")
val _marked = marked(merged.clauses.toVector, inps.globals)
val markedProgram = _marked._1
val markedMap = _marked._2
//val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\n")
//val trueAtoms = batch.annotation.toSet
//val trueAtoms = Set.empty[String]
val e = batch.narrativeASP.mkString("\n")
val groundingsMapTimed = Utils.time { computeRuleGroundings(inps, markedProgram, markedMap, e, streaming = streaming) }
val groundingsMap = groundingsMapTimed._1._1
val times = groundingsMapTimed._1._2
val groundingsTime = groundingsMapTimed._2
stateHandler.updateGrndsTimes(groundingsTime)
(markedProgram, markedMap, groundingsMap, times)
}
// givenLabels are the real annotation given in the case of full supervision. sliceLabels are the labels
// received by splice in case of partial supervision.
def sortGroundingsByTime(groundingsMap: scala.collection.mutable.Map[String, (scala.Vector[String], scala.Vector[String])] //,
//givenLabels: Set[String] = Set[String](),
//sliceLabels: Map[String, String] = Map[String, String]()
) = {
val objs = groundingsMap.foldLeft(Vector[AtomTobePredicted]()) { (accum, mapEntry) =>
val (atom, initBy, termBy) = (mapEntry._1, mapEntry._2._1, mapEntry._2._2)
val parsed = Literal.parse(atom)
val time = parsed.terms.tail.head.name.toInt
val fluent = parsed.terms.head.tostring
/*
val label =
if(sliceLabels.isEmpty) {
if (givenLabels.contains(atom)) "true" else "false"
} else {
sliceLabels(atom)
}
*/
val obj = new AtomTobePredicted(atom, fluent, parsed, time, initBy, termBy)
accum :+ obj
}
objs.sortBy(x => x.time)
}
/* Make a prediction on the current atom */
def predict(a: AtomTobePredicted, stateHanlder: StateHandler, markedMap: Map[String, Clause]) = {
val (currentAtom, currentTime, awakeInit, awakeTerm, currentFluent) = (a.atom, a.time, a.initiatedBy, a.terminatedBy, a.fluent)
val inertiaExpertPrediction = stateHanlder.inertiaExpert.getWeight(currentFluent)
val initWeightSum = if (awakeInit.nonEmpty) awakeInit.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (awakeTerm.nonEmpty) awakeTerm.map(x => markedMap(x).w_pos).sum else 0.0
val prediction = inertiaExpertPrediction + initWeightSum - termWeightSum
//val prediction = initWeightSum - termWeightSum
(prediction, inertiaExpertPrediction, initWeightSum, termWeightSum)
/*
val initWeightSum = if (awakeInit.nonEmpty) awakeInit.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (awakeTerm.nonEmpty) awakeTerm.map(x => markedMap(x).w_pos).sum else 0.0
// This is used when we have two sub-experts per rule, one predicting 'true' and one 'no'
/*
val prediction =
if (termWeightSum > 0) { // then the termination part predicts 'yes' (so, termination)
inertiaExpertPrediction + initWeightSum - termWeightSum
} else {// then the termination part predicts 'no' (so, no termination)
inertiaExpertPrediction + initWeightSum // just initiation should be taken into account here
}
*/
//val prediction = initWeightSum - termWeightSum
(prediction, inertiaExpertPrediction, initWeightSum, termWeightSum)
*/
}
def predictHedge(a: AtomTobePredicted, stateHanlder: StateHandler,
markedMap: Map[String, Clause], withInertia: Boolean = true, topRulesOnly: Boolean = false) = {
// Here we assume that initiation rules predict '1' and termination rules predict '0'.
// The prediction is a number in [0,1] resulting from the weighted average of the experts predictions:
// prediction = (Sum_{weight of awake init rules} + inertia_weight) / (Sum_{weight of awake term rules} + Sum_{weight of awake init rules} + inertia_weight)
// if prediction > threshold (e.g. 0.5) then we predict holds, else false
val (_, _, awakeInit, awakeTerm, currentFluent) = (a.atom, a.time, a.initiatedBy, a.terminatedBy, a.fluent)
val inertiaExpertPrediction = stateHanlder.inertiaExpert.getWeight(currentFluent)
//val initWeightSum = if (awakeInit.nonEmpty) awakeInit.map(x => markedMap(x).w_pos).sum else 0.0
//val termWeightSum = if (awakeTerm.nonEmpty) awakeTerm.map(x => markedMap(x).w_pos).sum else 0.0
val initWeightSum = if (awakeInit.nonEmpty) awakeInit.map(x => markedMap(x)).filter(x => x.body.nonEmpty).map(x => x.w_pos).sum else 0.0
val termWeightSum = if (awakeTerm.nonEmpty) awakeTerm.map(x => markedMap(x)).filter(x => x.body.nonEmpty).map(x => x.w_pos).sum else 0.0
val _prediction =
if (!topRulesOnly) {
if (withInertia) {
(inertiaExpertPrediction + initWeightSum) / (inertiaExpertPrediction + initWeightSum + termWeightSum)
} else {
initWeightSum / (initWeightSum + termWeightSum)
}
} else {
// Quick and dirty hack to get predictions without the specializations (only the top rules)
val initWeightSumTop = if (awakeInit.nonEmpty) awakeInit.map(x => markedMap(x)).filter(x => x.body.nonEmpty && x.isTopRule).map(x => x.w_pos).sum else 0.0
val termWeightSumTop = if (awakeTerm.nonEmpty) awakeTerm.map(x => markedMap(x)).filter(x => x.body.nonEmpty && x.isTopRule).map(x => x.w_pos).sum else 0.0
if (withInertia) {
(inertiaExpertPrediction + initWeightSumTop) / (inertiaExpertPrediction + initWeightSumTop + termWeightSumTop)
} else {
initWeightSum / (initWeightSumTop + termWeightSumTop)
}
}
//val _prediction = initWeightSum / (initWeightSum + termWeightSum)
val prediction = if (_prediction.isNaN) 0.0 else _prediction
(prediction, inertiaExpertPrediction, initWeightSum, termWeightSum)
}
def predict_NEW(a: AtomTobePredicted, stateHanlder: StateHandler, markedMap: Map[String, Clause]) = {
val (awakeInit, awakeTerm, currentFluent) = (a.initiatedBy, a.terminatedBy, a.fluent)
val inertiaExpertPrediction = stateHanlder.inertiaExpert.getWeight(currentFluent)
if (awakeInit.isEmpty && awakeTerm.isEmpty && inertiaExpertPrediction == 0.0) {
(0.0, 0.0, "None")
} else {
val bestInit = awakeInit.map(x => markedMap(x)).map(x => (x.##.toString, x.w_pos)).sortBy(x => -x._2)
val bestTerm = awakeTerm.map(x => markedMap(x)).map(x => (x.##.toString, x.w_pos)).sortBy(x => -x._2)
val nonEmprtyBodied = (awakeInit ++ awakeTerm).map(x => markedMap(x)).filter(_.body.nonEmpty)
val awakeRuleExpertsWithWeights = nonEmprtyBodied.map(x => (x.##.toString, x.w_pos)).toMap
val awakeExpertsWithWeights =
if (inertiaExpertPrediction > 0) awakeRuleExpertsWithWeights + ("inertia" -> inertiaExpertPrediction)
else awakeRuleExpertsWithWeights
val totalWeight = awakeExpertsWithWeights.values.sum
var v = Vector.empty[(String, Double)]
if (bestInit.nonEmpty) v = v :+ bestInit.head
if (bestTerm.nonEmpty) v = v :+ bestTerm.head
val _sorted =
if (inertiaExpertPrediction > 0) v :+ ("inertia" -> inertiaExpertPrediction)
else v
val pick = _sorted.sortBy(x => -x._2).head
val selected = pick._1
if (selected == "inertia") {
// return
stateHanlder.predictedWithInertia += 1
(inertiaExpertPrediction, totalWeight, selected)
} else {
if (!markedMap.keySet.contains(selected)) {
throw new RuntimeException(s"atom: ${a.atom}, selected: $selected")
}
val expert = markedMap(selected)
// return
if (expert.head.functor.contains("initiated")) {
stateHanlder.predictedWithInitRule += 1
(expert.w_pos, totalWeight, selected)
} else {
stateHanlder.predictedWithTermRule += 1
(-expert.w_pos, totalWeight, selected)
}
}
}
}
def predictRandomized(a: AtomTobePredicted, stateHanlder: StateHandler, markedMap: Map[String, Clause]) = {
val (awakeInit, awakeTerm, currentFluent) = (a.initiatedBy, a.terminatedBy, a.fluent)
val inertiaExpertPrediction = stateHanlder.inertiaExpert.getWeight(currentFluent)
if (awakeInit.isEmpty && awakeTerm.isEmpty && inertiaExpertPrediction == 0.0) {
(0.0, 0.0, "None")
} else {
val nonEmprtyBodied = (awakeInit ++ awakeTerm).map(x => markedMap(x)).filter(_.body.nonEmpty)
val awakeRuleExpertsWithWeights = nonEmprtyBodied.map(x => (x.##.toString, x.w_pos)).toMap
val awakeExpertsWithWeights =
if (inertiaExpertPrediction > 0) awakeRuleExpertsWithWeights + ("inertia" -> inertiaExpertPrediction)
else awakeRuleExpertsWithWeights
val totalWeight = awakeExpertsWithWeights.values.sum
// We need to pick an element according to the probability of w_i/totalAwakeWeight
// ORDERING DOESN'T MATTER. IS THIS TRUE?
val sorted = awakeExpertsWithWeights.toVector.map(x => (x._1, x._2 / totalWeight.toDouble)).sortBy(x => x._2)
//val sorted = awakeExpertsWithWeights.toVector.map(x => (x._1, x._2/totalWeight.toDouble))
// Pick an element according to its probability:
// 1) Generate a uniformly distributed random number.
// 2) Iterate through the list until the cumulative probability of the visited elements is greater than the random number.
val p = Math.random()
var cummulativeProbability = 0.0
var selected = ""
breakable {
for (i <- sorted) {
cummulativeProbability += i._2
if (p <= cummulativeProbability) {
selected = i._1
break
}
}
}
if (selected == "inertia") {
// return
stateHanlder.predictedWithInertia += 1
(inertiaExpertPrediction, totalWeight, selected)
} else {
if (!markedMap.keySet.contains(selected)) {
throw new RuntimeException(s"atom: ${a.atom}, selected: $selected")
}
val expert = markedMap(selected)
// return
if (expert.head.functor.contains("initiated")) {
stateHanlder.predictedWithInitRule += 1
(expert.w_pos, totalWeight, selected)
} else {
stateHanlder.predictedWithTermRule += 1
(-expert.w_pos, totalWeight, selected)
}
}
}
}
}
| 69,990 | 45.41313 | 244 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/HelperClasses.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import app.runutils.RunningOptions
import logic.{Clause, Literal, Theory}
/**
* Created by nkatz at 9/2/2019
*/
object HelperClasses {
// Label can be "true", "false" or "unknown".
class AtomTobePredicted(val atom: String, val fluent: String, val atomParsed: Literal, val time: Int,
val initiatedBy: Vector[String], val terminatedBy: Vector[String], val label: String = "unknown")
}
| 1,114 | 33.84375 | 103 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/InertiaExpert.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
class InertiaExpert {
private var weightMemory: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map[String, Double]()
def knowsAbout(fluent: String) = {
weightMemory.keySet.contains(fluent)
}
def forget(fluent: String) = {
weightMemory -= fluent
}
val decayingFactor = 1.0 //0.05
def getWeight(fluent: String) = {
if (weightMemory.keySet.contains(fluent)) weightMemory(fluent) * decayingFactor else 0.0
}
def updateWeight(fluent: String, newWeight: Double) = {
weightMemory += (fluent -> newWeight)
}
def clear() = {
weightMemory = scala.collection.mutable.Map[String, Double]()
}
def getMemory() = weightMemory
}
| 1,408 | 27.755102 | 121 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/Learner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import java.io.File
import akka.actor.{Actor, ActorRef, PoisonPill, Props}
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic._
import oled.mwua.MessageTypes.{FinishedBatchMsg, ProcessBatchMsg}
import org.slf4j.LoggerFactory
import oled.functions.SingleCoreOLEDFunctions.{crossVal, eval}
import scala.collection.mutable.{ListBuffer, Map}
import AuxFuncs._
import utils.{ASP, Utils}
import utils.Implicits._
import scala.util.control.Breaks._
import scala.reflect.internal.Trees
import java.util.Random
import scala.util.matching.Regex
/**
* Created by nkatz at 26/10/2018
*/
/*
*
* I ran this on the normal CAVIAR ordering as follows:
* --inpath=/home/nkatz/dev/OLED-BK/BKExamples/BK-various-taks/DevTest/caviar-bk --delta=0.00001 --prune=0.8
* --train=caviar --repfor=4 --chunksize=50 --try-more-rules=true --scorefun=default --onlineprune=true
*
* */
class Learner[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example],
val writeExprmtResultsTo: String = "") extends Actor {
startTime = System.nanoTime()
/*
private var totalTPs = 0
private var totalFPs = 0
private var totalFNs = 0
private var totalTNs = 0
*/
//--------------------------
val normalizeWeights = true
//--------------------------
private var totalTPs = Set[String]()
private var totalFPs = Set[String]()
private var totalFNs = Set[String]()
private var totalTNs = Set[String]()
private var totalBatchProcessingTime = 0.0
private var totalRuleScoringTime = 0.0
private var totalNewRuleTestTime = 0.0
private var totalCompressRulesTime = 0.0
private var totalExpandRulesTime = 0.0
private var totalNewRuleGenerationTime = 0.0
private var totalWeightsUpdateTime = 0.0
private var totalgroundingsTime = 0.0
private var totalPredictionTime = 0.0
private val logger = LoggerFactory.getLogger(self.path.name)
private val withec = Globals.glvalues("with-ec").toBoolean
private var bestTheoryFoundSoFar = Theory()
// This map cantanins all fluents that were true previously,
// (i.e. at the time point prior to the one that is currently being processed)
// along with their weights. The weights are updated properly at each time point
// and new atoms are added if we predict that they start holding, and
// existing atoms are removed if we predict that they're terminated.
// The key values are string representations of fluents, not holdsAt/2 atoms.
// So, we're storing "meeting(id1,id2)", not "holdsAt(meeting(id1,id2), 10)".
private var inertiaExpert = scala.collection.mutable.Map[String, Double]()
def getInertiaExpertPrediction(fluent: String) = {
if (inertiaExpert.keySet.contains(fluent)) inertiaExpert(fluent) else 0.0
}
val learningRate = 1.0
//----------------------------------------------------------------------
// If true, the firing/non-firing initiation rules are not taken
// into account when making a prediction about a fluent that persists
// by inertia.
// Setting this to false is the default for learning/reasoning with
// weakly initiated fluents, but setting it to true is necessary for
// strongly initiated settings, in order to allow for previously
// recognized fluents to persist.
private val isStrongInertia = false
//----------------------------------------------------------------------
/* All these are for presenting analytics/results after a run. */
private val initWeightSums = new ListBuffer[Double]
private val nonInitWeightSums = new ListBuffer[Double]
private val TermWeightSums = new ListBuffer[Double]
private val monTermWeightSums = new ListBuffer[Double]
private val predictInitWeightSums = new ListBuffer[Double]
private val predictTermWeightSums = new ListBuffer[Double]
private val inertWeightSums = new ListBuffer[Double]
private val prodictHoldsWeightSums = new ListBuffer[Double]
// For each query atom encountered during a run, 0.0 or 1.0 is stored in this buffer (true false)
private val trueLabels = new ListBuffer[Double]
// Keep weights only for this
val keepStatsForFluent = "meeting(id4,id5)"
// Control learning iterations over the data
private var repeatFor = inps.repeatFor
// Used to count examples for holdout evaluation
private var exampleCounter = 0
// Local data variable. Cleared at each iteration (in case repfor > 1).
private var data = Iterator[Example]()
// This is optional. A testing set (for holdout evaluation) may not be provided.
private var testingData = Iterator[Example]()
// Counts the number of precessed batches. Used to determine when to
// perform holdout evaluation on the test set. Incremented whenever a
// new batch is fetched (see the getNextBatch() method)
private var batchCounter = 0
// Stores the error from the prequential evaluation at each batch.
private var prequentialError = Vector[Double]()
// Current prequential error (for logging only, updated as a string message containing the actual error).
private var currentError = ""
// Stores the F1-scores from holdout evaluation
private var holdoutScores = Vector[Double]()
// Evolving theory. If we're learning with the Event Calculus the head of the
// list is the initiation part of the theory and the tail is the termination.
// If not, the list has a single element (the current version of the theory).
private var theory = if (withec) List(Theory(), Theory()) else List(Theory())
private var startTime = System.nanoTime()
private var endTime = System.nanoTime()
// Get the training data from the current inout source
private def getTrainData = trainingDataFunction(trainingDataOptions)
private def getTestingData = testingDataFunction(testingDataOptions)
private def getNextBatch(lleNoise: Boolean = false) = {
this.batchCounter += 1
if (data.isEmpty) {
Example()
} else {
if (!lleNoise) {
data.next()
} else {
val currentBatch = data.next()
val noisyNarrative = {
currentBatch.narrative map { x =>
x.replaceAll("active", "active_1")
}
}
Example(annot = currentBatch.annotation, nar = noisyNarrative, _time = currentBatch.time)
}
}
}
val workers: List[ActorRef] = {
// Two workers for initiated and terminated rules respectively.
if (withec) {
val worker1 = context.actorOf(Props(new Worker(inps)), name = "worker-1")
val worker2 = context.actorOf(Props(new Worker(inps)), name = "worker-2")
List(worker1, worker2)
} else {
val worker = context.actorOf(Props(new Worker(inps)), name = "worker")
List(worker)
}
}
// Use this variable to count the responses received from worker actors while processing a new batch.
private var responseCounter = workers.length
// Keep response messages from workers in here until all workers are done.
private val responses = Map[String, FinishedBatchMsg]()
def receive = {
case "start" => {
this.repeatFor -= 1
this.data = getTrainData
if (inps.test != "None") this.testingData = getTestingData
if (this.data.isEmpty) {
logger.error(s"Input source ${inps.train} is empty.")
System.exit(-1)
}
processNext()
}
case "eval" => {
// Prequential evaluation of a given theory
///*
logger.info(s"Performing prequential Evaluation of theory from ${inps.evalth}")
(1 to repeatFor) foreach { _ =>
this.data = getTrainData
while (data.hasNext) {
evaluate(data.next(), inps.evalth)
logger.info(currentError)
}
}
logger.info(s"Prequential error vector:\n${prequentialError.map(x => x.toDouble)}")
logger.info(s"Prequential error vector (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
//*/
// This is evaluation on a test set, just comment-out prequential, uncomment this.
/*
val testData = testingDataFunction(testingDataOptions)
val (tps,fps,fns,precision,recall,fscore) = crossVal(Theory(), data=testData, handCraftedTheoryFile = inps.evalth, globals = inps.globals, inps = inps)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: " + s"$fns\nprecision: $precision\nrecall: $recall\nf-score: $fscore)")
*/
context.system.terminate()
}
// Use a hand-crafted theory for sequential prediction. This updates the rule weights after each round,
// but it does not mess with the structure of the rules.
case "predict" => {
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
val rules = scala.io.Source.fromFile(inps.evalth).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val rulesParsed = rules.map(r => Clause.parse(r))
println(rulesParsed)
(1 to repeatFor) foreach { _ =>
this.data = getTrainData
while (data.hasNext) {
val batch = getNextBatch(lleNoise = false)
logger.info(s"Prosessing $batchCounter")
evaluateTest_NEW(batch, "", false, true, Theory(rulesParsed))
}
}
logger.info(s"Prequential error vector:\n${prequentialError.map(x => x.toDouble)}")
logger.info(s"\nPrequential error vector (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
/*
logger.info(s"\nTrue labels:\n$trueLabels")
logger.info(s"\nInitiation Weight Sums:\n$initWeightSums")
logger.info(s"\nNo Initiation Weight Sums:\n$nonInitWeightSums")
logger.info(s"\nTermination Weight Sums:\n$TermWeightSums")
logger.info(s"\nNon Termination Weight Sums:\n$monTermWeightSums")
logger.info(s"\nPredict Initiation Weight Sums:\n$predictInitWeightSums")
logger.info(s"\nPredict Termination Weight Sums:\n$predictTermWeightSums")
logger.info(s"\nInertia Weight Sums:\n$inertWeightSums")
logger.info(s"\nHolds Weight Sums:\n$prodictHoldsWeightSums")
*/
//logger.info(s"\nTrue labels:\n$trueLabels")
///*
utils.plotting.PlotTest2.plotResults("/home/nkatz/Desktop/", "results",
trueLabels.toVector, initWeightSums.toVector, nonInitWeightSums.toVector, TermWeightSums.toVector,
monTermWeightSums.toVector, predictInitWeightSums.toVector, predictTermWeightSums.toVector,
inertWeightSums.toVector, prodictHoldsWeightSums.toVector)
//*/
context.system.terminate()
}
case p: FinishedBatchMsg => {
responseCounter -= 1
if (p.targetClass == "") responses += ("theory-no-ec" -> p) else responses += (p.targetClass -> p)
if (responseCounter == 0) {
processedBatches += 1
// General case first (no event calculus)
if (responses.keySet.size == 1) {
val r = responses("theory-no-ec")
this.theory = List(r.theory)
this.totalBatchProcessingTime += r.BatchProcessingTime
this.totalCompressRulesTime += r.compressRulesTime
this.totalExpandRulesTime += r.expandRulesTime
this.totalNewRuleGenerationTime += r.newRuleGenerationTime
this.totalNewRuleTestTime += r.newRuleTestTime
this.totalRuleScoringTime += r.ruleScoringTime
} else {
val ir = responses("initiated")
val tr = responses("terminated")
val newInitTheory = ir.theory
val newTermTheory = tr.theory
this.theory = List(newInitTheory, newTermTheory)
this.totalBatchProcessingTime += math.max(ir.BatchProcessingTime, tr.BatchProcessingTime)
this.totalCompressRulesTime += math.max(ir.compressRulesTime, tr.compressRulesTime)
this.totalExpandRulesTime += math.max(ir.expandRulesTime, tr.expandRulesTime)
this.totalNewRuleGenerationTime += math.max(ir.newRuleGenerationTime, tr.newRuleGenerationTime)
this.totalNewRuleTestTime += math.max(ir.newRuleTestTime, tr.newRuleTestTime)
this.totalRuleScoringTime += math.max(ir.ruleScoringTime, tr.ruleScoringTime)
}
//logger.info(currentError)
// reset these before processing a new batch
responseCounter = workers.length
responses.clear()
processNext()
}
}
}
var processedBatches = 0
/*
* Performs online evaluation and sends the next batch to the worker(s) for processing.
*
* */
private def processNext() = {
val nextBatch = getNextBatch(lleNoise = false)
logger.info(s"Processing batch $batchCounter")
exampleCounter += inps.chunkSize
if (nextBatch.isEmpty) {
logger.info(s"Finished the data.")
if (this.repeatFor > 0) {
logger.info(s"Starting new iteration.")
self ! "start"
} else if (this.repeatFor == 0) {
endTime = System.nanoTime()
logger.info("Done.")
workers foreach (w => w ! PoisonPill)
wrapUp()
context.system.terminate()
} else {
throw new RuntimeException("This should never have happened (repeatfor is now negative?)")
}
} else {
evaluate(nextBatch)
//evaluateTest(nextBatch)
//evaluateTest_NEW(nextBatch)
//evaluateTest_NEW_EXPAND_WHEN_NEEDED(nextBatch)
if (this.workers.length > 1) { // we're learning with the Event Calculus.
val msg1 = new ProcessBatchMsg(theory.head, nextBatch, "initiated")
val msg2 = new ProcessBatchMsg(theory.tail.head, nextBatch, "terminated")
workers.head ! msg1
workers.tail.head ! msg2
} else { // We're learning without the Event Calculus.
workers.head ! new ProcessBatchMsg(theory.head, nextBatch)
}
}
}
/* Finished. Just show results and shut down */
def wrapUp(): Unit = {
val merged = {
if (theory.length == 1) {
theory.head
} else {
Theory(theory.head.clauses ++ theory.tail.head.clauses)
}
}
val theorySize = merged.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
val totalRunningTime = (endTime - startTime) / 1000000000.0
val totalTrainingTime = totalBatchProcessingTime
logger.info(s"\nAll rules found (non-pruned, non-compressed):\n ${merged.showWithStats}")
val pruned = Theory(merged.clauses.filter(_.score >= inps.pruneThreshold))
/* THIS MAY TAKE TOO LONG FOR LARGE AND COMPLEX THEORIES!! */
logger.info("Compressing theory...")
val pruned_ = Theory(LogicUtils.compressTheory(pruned.clauses))
logger.info(s"\nFinal Pruned theory found:\n ${pruned_.showWithStats}")
logger.info(s"Theory size: $theorySize")
logger.info(s"Total running time: $totalTrainingTime")
logger.info(s"Total batch processing time: $totalRunningTime")
logger.info(s"Total rule scoring time: $totalRuleScoringTime")
logger.info(s"Total rule expansion time: $totalExpandRulesTime")
logger.info(s"Total rule compression time: $totalCompressRulesTime")
logger.info(s"Total testing for new rule generation time: $totalNewRuleTestTime")
logger.info(s"Total new rule generation time: $totalNewRuleGenerationTime")
logger.info(s"Total prediction & weights update time: $totalWeightsUpdateTime")
logger.info(s"Total groundings computation time: $totalgroundingsTime")
logger.info(s"Prequential error vector:\n${prequentialError.map(x => x.toDouble)}")
logger.info(s"Prequential error vector (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Prequential F1-score:\n$runningF1Score")
logger.info(s"Total TPs: $TPs, total FPs: $FPs, total FNs: $FNs")
if (this.writeExprmtResultsTo != "") {
// Just for quick and dirty experiments
val x = prequentialError.scanLeft(0.0)(_ + _).tail.toString()
Utils.writeToFile(new File(this.writeExprmtResultsTo), "append") { p => List(x).foreach(p.println) }
}
//logger.info(s"Total TPs: ${totalTPs.size}, Total FPs: ${totalFPs.size}, Total FNs: ${totalFNs.size}")
if (trainingDataOptions != testingDataOptions) {
//logger.info("Evaluating on the test set")
val testData = testingDataFunction(testingDataOptions)
// Prequential eval on the test set (without weights update at each step).
logger.info("Evaluating on the test set with the theory found so far (no weights update at each step, no structure updates).")
prequentialError = Vector[Double]()
totalTPs = Set[String]()
totalFPs = Set[String]()
totalFNs = Set[String]()
// This includes the refinements in the final theory
// Comment it out to test with the final theory
///*
val predictWith = getFinalTheory(theory, useAvgWeights = true, logger)
val newInit = predictWith._1
val newTerm = predictWith._2
theory = List(Theory(newInit), Theory(newTerm))
//*/
testData foreach { batch =>
evaluateTest_NEW(batch, testOnly = true)
}
logger.info(s"Prequential error on test set:\n${prequentialError.mkString(",")}")
logger.info(s"Prequential error vector on test set (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Evaluation on the test set\ntps: ${totalTPs.size}\nfps: ${totalFPs.size}\nfns: ${totalFNs.size}")
// just for quick and dirty experiments
if (this.writeExprmtResultsTo != "") {
val x = s"tps: ${totalTPs.size}\nfps: ${totalFPs.size}\nfns: ${totalFNs.size}\n\n"
Utils.writeToFile(new File(this.writeExprmtResultsTo), "append") { p => List(x).foreach(p.println) }
}
logger.info(s"Total prediction & weights update time: $totalWeightsUpdateTime")
logger.info(s"Total groundings computation time: $totalgroundingsTime")
logger.info(s"Total per-rule prediction time (combining rule's sub-experts' predictions): $totalPredictionTime")
}
//val (tps,fps,fns,precision,recall,fscore) = crossVal(pruned_, data=testData, globals = inps.globals, inps = inps)
//logger.info(s"\ntps: $tps\nfps: $fps\nfns: " + s"$fns\nprecision: $precision\nrecall: $recall\nf-score: $fscore)")
}
var TPs = 0
var FPs = 0
var FNs = 0
var runningF1Score = Vector.empty[Double]
def evaluate(batch: Example, inputTheoryFile: String = ""): Unit = {
if (inps.prequential) {
if (withec) {
val (init, term) = (theory.head, theory.tail.head)
//val merged = Theory( (init.clauses ++ term.clauses).filter(p => p.body.length >= 1 && p.seenExmplsNum > 5000 && p.score > 0.7) )
//val merged = Theory( (init.clauses ++ term.clauses).filter(p => p.body.length >= 1 && p.score > 0.9) )
val merged = Theory(init.clauses.filter(p => p.precision >= inps.pruneThreshold) ++ term.clauses.filter(p => p.recall >= inps.pruneThreshold))
val (tps, fps, fns, precision, recall, fscore) = eval(merged, batch, inps)
// I think this is wrong, the correct error is the number of mistakes (fps+fns)
//currentError = s"TPs: $tps, FPs: $fps, FNs: $fns, error (|true state| - |inferred state|): ${math.abs(batch.annotation.toSet.size - (tps+fps))}"
val error = (fps + fns).toDouble
TPs += tps
FPs += fps
FNs += fns
val currentPrecision = TPs.toDouble / (TPs + FPs)
val currentRecall = TPs.toDouble / (TPs + FNs)
val _currentF1Score = 2 * currentPrecision * currentRecall / (currentPrecision + currentRecall)
val currentF1Score = if (_currentF1Score.isNaN) 0.0 else _currentF1Score
runningF1Score = runningF1Score :+ currentF1Score
currentError = s"Number of mistakes (FPs+FNs) "
this.prequentialError = this.prequentialError :+ error
println(s"time, scoring theory size, error: ${batch.time}, ${merged.size}, $error")
println(this.prequentialError)
}
}
// TODO :
// Implement holdout evaluation.
if (inps.holdout != 0) {
}
}
private def getMergedTheory(testOnly: Boolean) = {
if (withec) {
val (init, term) = (theory.head, theory.tail.head)
val _merged = Theory(init.clauses ++ term.clauses)
if (testOnly) {
_merged
} else {
_merged.clauses foreach (rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals))
// Do we want to also filter(p => p.score > inps.pruneThreshold) here?
// Do we want to compress here? Theory(LogicUtils.compressTheory(_merged.clauses))
val mergedWithRefs = Theory(_merged.clauses ++ _merged.clauses.flatMap(_.refinements))
//val merged = _merged
val merged = mergedWithRefs
merged
}
} else {
Theory() /* TODO */
}
}
/* This is called whenever a new rule is added due to a mistake. */
private def addRuleAndUpdate(r: Clause, testOnly: Boolean = false) = {
// Update the current theory
if (withec) {
if (r.head.functor.contains("initiated")) {
theory = List(Theory(theory.head.clauses :+ r), theory.tail.head)
} else if (r.head.functor.contains("terminated")) {
theory = List(theory.head, Theory(theory.tail.head.clauses :+ r))
} else {
throw new RuntimeException("Error while updating current theory.")
}
} else {
/* TODO */
}
// Update merged theory and marked-up stuff.
val mergedNew = getMergedTheory(testOnly)
val markedNew = marked(mergedNew.clauses.toVector, inps.globals)
val markedProgramNew = markedNew._1
val markedMapNew = markedNew._2
(mergedNew, markedProgramNew, markedMapNew)
}
/* This is called whenever we're specializing a rule due to a mistake */
private def specializeRuleAndUpdate(
topRule: Clause,
refinement: Clause, testOnly: Boolean = false) = {
val filter = (p: List[Clause]) => {
p.foldLeft(List[Clause]()) { (x, y) =>
if (!topRule.equals(y)) {
x :+ y
} else {
x
}
}
}
// Update the current theory
val oldInit = theory.head.clauses
val oldTerm = theory.tail.head.clauses
if (withec) {
if (topRule.head.functor.contains("initiated")) {
val newInit = filter(oldInit) :+ refinement
theory = List(Theory(newInit), Theory(oldTerm))
showInfo(topRule, refinement)
} else if (topRule.head.functor.contains("terminated")) {
val newTerm = filter(oldTerm) :+ refinement
theory = List(Theory(oldInit), Theory(newTerm))
showInfo(topRule, refinement)
} else {
throw new RuntimeException("Error while updating current theory.")
}
} else {
/* TODO */
}
// Update merged theory and marked-up stuff.
val mergedNew = getMergedTheory(testOnly)
val markedNew = marked(mergedNew.clauses.toVector, inps.globals)
val markedProgramNew = markedNew._1
val markedMapNew = markedNew._2
(mergedNew, markedProgramNew, markedMapNew)
}
private def showInfo(parent: Clause, child: Clause) = {
logger.info(s"\nRule (id: ${parent.##} | score: ${parent.score} | tps: ${parent.tps} fps: ${parent.fps} " +
s"fns: ${parent.fns} | ExpertWeight: ${parent.w_pos} " +
s"AvgExpertWeight: ${parent.avgWeight})\n${parent.tostring}\nwas refined to" +
s"(id: ${child.##} | score: ${child.score} | tps: ${child.tps} fps: ${child.fps} fns: ${child.fns} | " +
s"ExpertWeight: ${child.w_pos} AvgExpertWeight: ${child.avgWeight})\n${child.tostring}")
}
def evaluateTest_NEW(batch: Example, inputTheoryFile: String = "",
testOnly: Boolean = false, weightsOnly: Boolean = false, inputTheory: Theory = Theory()) = {
if (withec) {
var merged = if (inputTheory == Theory()) getMergedTheory(testOnly) else inputTheory
//logger.info(s"Predicting with ${merged.tostring}")
// just for debugging
val weightsBefore = merged.clauses.map(x => x.w_pos)
// just for debugging
val inertiaBefore = inertiaExpert.map(x => x)
val _marked = marked(merged.clauses.toVector, inps.globals)
var markedProgram = _marked._1
var markedMap = _marked._2
val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\n")
val trueAtoms = batch.annotation.toSet
var inferredAtoms = (Set[String](), Set[String](), Set[String]())
// this is to be set to the time the previous iteration stopped at.
// It was supposed to be used for removing already seen stuff from the batch
// whenever we make a mistake and start computing groundings all over again, but
// I haven't done that yet.
var processedUntil = 0
var finishedBatch = false
var alreadyProcessedAtoms = Set.empty[String]
while (!finishedBatch) {
val groundingsMapTimed = Utils.time{
computeRuleGroundings(inps, markedProgram, markedMap, e, trueAtoms)
}
val groundingsMap = groundingsMapTimed._1._1
val times = groundingsMapTimed._1._2
val groundingsTime = groundingsMapTimed._2
totalgroundingsTime += groundingsTime
// We sort the groundings map by the time-stamp of each inferred holdsAt atom in ascending order.
// For each holdsAt atom we calculate if it should actually be inferred, based on the weights
// of the rules that initiate or terminate it. In this process, the weights of the rules are
// updated based on whether the atom is mistakenly/correctly predicted and whether each individual
// rule mistakenly/correctly predicts it. Sorting the inferred atoms and iterating over them is necessary
// so as to promote/penalize the rule weights correctly after each mistake.
val sorted = groundingsMap.map { entry =>
val parsed = Literal.parse(entry._1)
val time = parsed.terms.tail.head.name.toInt
((entry._1, time), entry._2)
}.toVector.sortBy(x => x._1._2) // sort by time
val predictAndUpdateTimed = Utils.time {
breakable {
sorted foreach { y =>
val (currentAtom, currentTime) = (y._1._1, y._1._2)
if (!alreadyProcessedAtoms.contains(currentAtom)) {
val parsed = Literal.parse(currentAtom)
val currentFluent = parsed.terms.head.tostring
val (initiatedBy, terminatedBy) = (y._2._1, y._2._2)
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
// only updates weights when we're not running in test mode.
val prediction =
predictAndUpdate(currentAtom, currentFluent,
initiatedBy, terminatedBy, markedMap, testOnly, trueAtoms, batch)
//val prediction = _prediction._1
prediction match {
case "TP" => inferredAtoms = (inferredAtoms._1 + currentAtom, inferredAtoms._2, inferredAtoms._3)
case "FP" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2 + currentAtom, inferredAtoms._3)
case "FN" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3 + currentAtom)
case "TN" => // do nothing
case _ => throw new RuntimeException("Unexpected response from predictAndUpdate")
}
if (!testOnly && !weightsOnly) {
if (prediction == "FP" && terminatedBy.isEmpty) {
// Let's try adding a new termination expert only when there is no other termination expert that fires.
// Else, let it fix the mistakes in future rounds by increasing the weights of firing terminating experts.
// Generate a new termination rule from the point where we currently err.
// This rule will be used for fixing the mistake in the next round.
// This most probably results in over-training. It increases weights too much and the new rule dominates.
//val totalWeight = inertiaExpert(currentFluent) + initWeightSum
val totalWeight = 1.0
val newTerminationRule = generateNewExpert(batch, currentAtom, inps.globals, "terminatedAt", totalWeight)
if (!newTerminationRule.equals(Clause.empty)) {
logger.info(s"Generated new termination rule in response to FP atom: $currentAtom")
// Since neither the new termination rule (empty-bodied), nor its refinements fire,
// therefore, they do not contribute to the FP, increase their weights further
increaseWeights(newTerminationRule.refinements :+ newTerminationRule, learningRate)
// Finally, add the new termination rule to the current theory.
val update = addRuleAndUpdate(newTerminationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FP mistake with atom: $currentAtom")
}
}
//if (prediction == "FN" && initiatedBy.isEmpty && getInertiaExpertPrediction(currentFluent) == 0.0) {
if (prediction == "FN" && initiatedBy.isEmpty) {
//val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", termWeightSum)
// Don't give the total weight of the termination part. It's dangerous
// (e.g. if the termination part new rules get the total weight of 0.0, and the TN is never fixed!)
// and it's also wrong. You just over-train to get rid of a few mistakes!
val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", 1.0)
if (!newInitiationRule.equals(Clause.empty)) {
logger.info(s"Generated new initiation rule in response to FN atom: $currentAtom")
val update = addRuleAndUpdate(newInitiationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FN mistake with atom: $currentAtom")
}
}
}
}
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
}
finishedBatch = true
}
}
totalWeightsUpdateTime += predictAndUpdateTimed._2
}
val (tps, fps, fns) = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3)
val (fpsNumber, currentFNsNumber) = (fps.size, fns.size)
// All atoms in tps are certainly true positives.
// But we need to account for the real true atoms which are not in there.
val restFNs = trueAtoms.diff(tps).filter(!fns.contains(_))
if (restFNs.nonEmpty) throw new RuntimeException("FUUUUUUUUUUUUCK!!!!!")
val restFNsNumber = restFNs.size
var trueFNsNumber = currentFNsNumber + restFNsNumber
// Ugly (AND DANGEROUS) hack to avoid counting as mistakes the holdsAt/2 atoms at the first time point of an interval
if (trueFNsNumber == 2) trueFNsNumber = 0
// We have gathered the FNs that have not been inferred, but we need to add the rest of them in the global counter
totalFNs = totalFNs ++ restFNs
// Just for debugging.
val weightsAfter = merged.clauses.map(x => x.w_pos)
//just for debugging
val inertiaAfter = inertiaExpert.map(x => x)
prequentialError = prequentialError :+ (fpsNumber + trueFNsNumber).toDouble
if (fpsNumber + trueFNsNumber > 0) {
logger.info(s"\nMade mistakes: FPs: $fpsNumber, " +
s"FNs: $trueFNsNumber.\nWeights before: $weightsBefore\nWeights after: $weightsAfter\nInertia Before: " +
s"$inertiaBefore\nInertia after: $inertiaAfter") //\nPredicted with:\n${merged.showWithStats}")
}
} else { // No Event Calculus. We'll see what we'll do with that.
}
}
def evaluateTest_NEW_EXPAND_WHEN_NEEDED(batch: Example, inputTheoryFile: String = "",
testOnly: Boolean = false, weightsOnly: Boolean = false, inputTheory: Theory = Theory()) = {
if (withec) {
var merged = if (inputTheory == Theory()) getMergedTheory(testOnly) else inputTheory
// just for debugging
val weightsBefore = merged.clauses.map(x => x.w_pos)
// just for debugging
val inertiaBefore = inertiaExpert.map(x => x)
val _marked = marked(merged.clauses.toVector, inps.globals)
var markedProgram = _marked._1
var markedMap = _marked._2
val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\n")
val trueAtoms = batch.annotation.toSet
var inferredAtoms = (Set[String](), Set[String](), Set[String]())
// this is to be set to the time the previous iteration stopped at.
// It was supposed to be used for removing already seen stuff from the batch
// whenever we make a mistake and start computing groundings all over again, but
// I haven't done that yet.
var processedUntil = 0
var finishedBatch = false
var alreadyProcessedAtoms = Set.empty[String]
while (!finishedBatch) {
val groundingsMapTimed = Utils.time{
computeRuleGroundings(inps, markedProgram, markedMap, e, trueAtoms)
}
val groundingsMap = groundingsMapTimed._1._1
val times = groundingsMapTimed._1._2
val groundingsTime = groundingsMapTimed._2
totalgroundingsTime += groundingsTime
// We sort the groundings map by the time-stamp of each inferred holdsAt atom in ascending order.
// For each holdsAt atom we calculate if it should actually be inferred, based on the weights
// of the rules that initiate or terminate it. In this process, the weights of the rules are
// updated based on whether the atom is mistakenly/correctly predicted and whether each individual
// rule mistakenly/correctly predicts it. Sorting the inferred atoms and iterating over them is necessary
// so as to promote/penalize the rule weights correctly after each mistake.
val sorted = groundingsMap.map { entry =>
val parsed = Literal.parse(entry._1)
val time = parsed.terms.tail.head.name.toInt
((entry._1, time), entry._2)
}.toVector.sortBy(x => x._1._2) // sort by time
val predictAndUpdateTimed = Utils.time {
breakable {
sorted foreach { y =>
val (currentAtom, currentTime) = (y._1._1, y._1._2)
if (!alreadyProcessedAtoms.contains(currentAtom)) {
val parsed = Literal.parse(currentAtom)
val currentFluent = parsed.terms.head.tostring
val (initiatedBy, terminatedBy) = (y._2._1, y._2._2)
// This is also calculated at predictAndUpdate, we need to factor it out.
// Calculate it here (because it is needed here) and pass it to predictAndUpdate
// to avoid doing it twice.
///*
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !initiatedBy.contains(x._1))
//*/
// This is also calculated at predictAndUpdate, we need to factor it out.
// Calculate it here (because it is needed here) and pass it to predictAndUpdate
// to avoid doing it twice.
///*
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !terminatedBy.toSet.contains(x._1))
//*/
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
// only updates weights when we're not running in test mode.
val prediction =
predictAndUpdate(currentAtom, currentFluent,
initiatedBy, terminatedBy, markedMap, testOnly, trueAtoms, batch)
prediction match {
case "TP" => inferredAtoms = (inferredAtoms._1 + currentAtom, inferredAtoms._2, inferredAtoms._3)
case "FP" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2 + currentAtom, inferredAtoms._3)
case "FN" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3 + currentAtom)
case "TN" => // do nothing
case _ => throw new RuntimeException("Unexpected response from predictAndUpdate")
}
if (!testOnly && !weightsOnly) {
if (prediction == "FP") {
if (terminatedBy.isEmpty) {
// Let's try adding a new termination expert only when there is no other termination expert that fires.
// Else, let it fix the mistakes in future rounds by increasing the weights of firing terminating experts.
// Generate a new termination rule from the point where we currently err.
// This rule will be used for fixing the mistake in the next round.
// This most probably results in over-training. It increases weights too much and the new rule dominates.
//val totalWeight = inertiaExpert(currentFluent) + initWeightSum
val totalWeight = 1.0
val newTerminationRule = generateNewExpert(batch, currentAtom, inps.globals, "terminatedAt", totalWeight)
if (!newTerminationRule.equals(Clause.empty)) {
logger.info(s"Generated new termination rule in response to FP atom: $currentAtom")
// Since neither the new termination rule (empty-bodied), nor its refinements fire,
// therefore, they do not contribute to the FP, increase their weights further
// NO, WE DO NOT INCREASE WEIGHTS OF NON-FIRING RULES!!!
//increaseWeights(newTerminationRule.refinements :+ newTerminationRule, learningRate)
// Finally, add the new termination rule to the current theory.
val update = addRuleAndUpdate(newTerminationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FP mistake with atom: $currentAtom")
}
} else { // We do have firing termination rules
// Specialize a firing initiation rule. If no firing initiation rule exists,
// therefore the FP is due to inertia, just let the inertia weight degrade, until
// the termination rules take over the majority (note that we do have firing termination rules here,
// so there are reasons to believe that we'll have such rules in the up-coming rounds).
if (initiatedBy.nonEmpty) {
// Note that we'll most certainly have a top-rule that fires: for every
// refinement that fires, its parent rule must fire as well. Therefore, if
// initiatedBy is non empty, at least some of the rules in there must be top rules.
val rulesToSpecialize =
// This is the first minor difference with the piece of code
// for specializing termination rules (below). Here We select the
// rules from the initiation part of the theory, below from the termination
theory.head.clauses.
filter(x => initiatedBy.toSet.contains(x.##.toString))
var performedSpecialization = false
rulesToSpecialize foreach { ruleToSpecialize =>
// Find suitable refinements, i.e refinements that DO NOT fire
// w.r.t. the current FP atom.
val suitableRefs =
// Here is the second difference. We use nonFiringInitRules here.
// It's really stupid to have this code duplicated like that.
// Fuck your quick & dirty bullshit.
ruleToSpecialize.refinements.
filter(r => nonFiringInitRules.keySet.contains(r.##.toString)).
filter(s => s.score > ruleToSpecialize.score).
filter(r => !theory.head.clauses.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
if (suitableRefs.nonEmpty) {
performedSpecialization = true
val bestRefinement = suitableRefs.head
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
val update = specializeRuleAndUpdate(ruleToSpecialize, bestRefinement)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
}
}
if (performedSpecialization) break
}
}
}
if (prediction == "FN") {
//if (initiatedBy.isEmpty || (nonFiringInitRules.values.map(_.w).sum > initWeightSum) ) {
if (initiatedBy.isEmpty) {
//val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", termWeightSum)
// Don't give the total weight of the termination part. It's dangerous
// (e.g. if the termination part new rules get the total weight of 0.0, and the TN is never fixed!)
// and it's also wrong. You just over-train to get rid of a few mistakes!
val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", 1.0)
if (!newInitiationRule.equals(Clause.empty)) {
logger.info(s"Generated new initiation rule in response to FN atom: $currentAtom")
val update = addRuleAndUpdate(newInitiationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FN mistake with atom: $currentAtom")
}
/* THE CODE BELOW IS THE SAME AS ABOVE. FACTOR IT OUT TO A FUNCTION. */
} else {
// Them the FN is due to over-weighted firing termination rules. Specialize one.
if (terminatedBy.nonEmpty) {
val termRulesToSpecialize =
theory.tail.head.clauses.
filter(x => terminatedBy.toSet.contains(x.##.toString))
var performedSpecialization = false
termRulesToSpecialize foreach { ruleToSpecialize =>
// Find suitable refinements, i.e refinements that DO NOT fire
// w.r.t. the current FN atom.
val suitableRefs =
ruleToSpecialize.refinements.
filter(r => nonFiringTermRules.keySet.contains(r.##.toString)).
filter(s => s.score > ruleToSpecialize.score).
filter(r => !theory.tail.head.clauses.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
if (suitableRefs.nonEmpty) {
performedSpecialization = true
val bestRefinement = suitableRefs.head
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
val update = specializeRuleAndUpdate(ruleToSpecialize, bestRefinement)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
}
}
if (performedSpecialization) break
} else {
// This would be a problem, certainly something that is worth looking into.
// We have an FN, with firing initiation rules, but not firing termination ones.
// UPDATE: It's ok, it can happen because the non-firing weight is greater then the firing weight.
/*
throw new RuntimeException(s"We have an FN atom, which is " +
s"initiated by some rules and terminated by NO rules. It's worth finding out how this happens!\nBatch" +
s" cointer: $batchCounter, atom: $currentAtom")
*/
}
}
}
}
}
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
}
finishedBatch = true
}
}
totalWeightsUpdateTime += predictAndUpdateTimed._2
}
val (tps, fps, fns) = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3)
val (fpsNumber, currentFNsNumber) = (fps.size, fns.size)
// All atoms in tps are certainly true positives.
// But we need to account for the real true atoms which are not in there.
val restFNs = trueAtoms.diff(tps).filter(!fns.contains(_))
if (restFNs.nonEmpty) throw new RuntimeException("FUUUUUUUUUUUUCK!!!!!")
val restFNsNumber = restFNs.size
var trueFNsNumber = currentFNsNumber + restFNsNumber
// Ugly (AND DANGEROUS) hack to avoid counting as mistakes the holdsAt/2 atoms at the first time point of an interval
//if (trueFNsNumber == 2) trueFNsNumber = 0
// We have gathered the FNs that have not been inferred, but we need to add the rest of them in the global counter
totalFNs = totalFNs ++ restFNs
// Just for debugging.
val weightsAfter = merged.clauses.map(x => x.w_pos)
//just for debugging
val inertiaAfter = inertiaExpert.map(x => x)
prequentialError = prequentialError :+ (fpsNumber + trueFNsNumber).toDouble
if (fpsNumber + trueFNsNumber > 0) {
logger.info(s"\nMade mistakes: FPs: $fpsNumber, " +
s"FNs: $trueFNsNumber.\nWeights before: $weightsBefore\nWeights after: $weightsAfter\nInertia Before: " +
s"$inertiaBefore\nInertia after: $inertiaAfter") //\nPredicted with:\n${merged.showWithStats}")
}
} else { // No Event Calculus. We'll see what we'll do with that.
}
}
def updateAnalyticsBuffers(atom: String, initWghtSum: Double, termWghtSum: Double,
nonInitWghtSum: Double, nonTermWghtSum: Double,
predictInitWghtSum: Double, predictTermWghtSum: Double,
inertWghtSum: Double, holdsWght: Double) = {
if (atom.contains(keepStatsForFluent)) {
initWeightSums += initWghtSum
TermWeightSums += termWghtSum
nonInitWeightSums += nonInitWghtSum
monTermWeightSums += nonTermWghtSum
predictInitWeightSums += predictInitWghtSum
predictTermWeightSums += predictTermWghtSum
inertWeightSums += inertWghtSum
prodictHoldsWeightSums += holdsWght
}
}
def updateTrueLabels(atom: String, value: Double) = {
if (atom.contains(keepStatsForFluent)) {
trueLabels += value
}
}
def predictAndUpdate(currentAtom: String, currentFluent: String, init: Vector[String],
term: Vector[String], markedMap: scala.collection.immutable.Map[String, Clause],
testOnly: Boolean, trueAtoms: Set[String], batch: Example) = {
val (initiatedBy, terminatedBy) = (init, term)
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val inertiaExpertPrediction = getInertiaExpertPrediction(currentFluent)
val firingInitRulesIds = initiatedBy
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !firingInitRulesIds.contains(x._1))
// Use this to have all rules and their refs vote independently:
// This was the default but does not seam reasonable.
val predictInitiated = initWeightSum // - nonFiringInitRules.values.map(_.w).sum
// Use this to have one prediction per top rule, resulting by combing the
// opinions of the rule's sub-expert committee (its specializations)
/*
val predictInitiatedTimed = Utils.time{
val individualPredictions =
theory.head.clauses.map( rule => getRulePrediction(rule, firingInitRulesIds, nonFiringInitRules.keys.toVector) )
individualPredictions.sum
}
val predictInitiated = predictInitiatedTimed._1
totalPredictionTime += predictInitiatedTimed._2
*/
// Use this to have one prediction per top rule.
// The best (based on current weight) between the top-rule's own
// prediction and the prediction of the rule's best sub-expert:
/*
val predictInitiatedTimed = Utils.time{
val individualPredictions =
theory.head.clauses.map( rule => getRulePrediction1(rule, firingInitRulesIds, nonFiringInitRules.keys.toVector) )
individualPredictions.sum
}
val predictInitiated = predictInitiatedTimed._1
totalPredictionTime += predictInitiatedTimed._2
*/
val firingTermRulesIds = terminatedBy
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !firingTermRulesIds.toSet.contains(x._1))
// Use this to have all rules and their refs vote independently:
// This was the default but does not seam reasonable.
val predictTerminated = termWeightSum // - nonFiringTermRules.values.map(_.w).sum
// Use this to have one prediction per top rule, resulting by combing the
// opinions of the rule's sub-expert committee (its specializations):
/*
val predictTerminatedTimed = Utils.time {
val individualPredictions =
theory.tail.head.clauses.map( rule => getRulePrediction(rule, firingTermRulesIds, nonFiringTermRules.keys.toVector) )
individualPredictions.sum
}
val predictTerminated = predictTerminatedTimed._1
totalPredictionTime += predictTerminatedTimed._2
*/
// Use this to have one prediction per top rule.
// The best (based on current weight) between the top-rule's own
// prediction and the prediction of the rule's best sub-expert:
/*
val predictTerminatedTimed = Utils.time {
val individualPredictions =
theory.tail.head.clauses.map( rule => getRulePrediction1(rule, firingTermRulesIds, nonFiringTermRules.keys.toVector) )
individualPredictions.sum
}
val predictTerminated = predictTerminatedTimed._1
totalPredictionTime += predictTerminatedTimed._2
*/
// WITH INERTIA
///*
val _predictAtomHolds = predict(inertiaExpertPrediction, predictInitiated, predictTerminated, isStrongInertia)
val (predictAtomHolds, holdsWeight) = (_predictAtomHolds._1, _predictAtomHolds._2)
//*/
// NO INERTIA
//val _predictAtomHolds = predictInitiated - predictTerminated
//val (predictAtomHolds, holdsWeight) = (if (_predictAtomHolds > 0) true else false, _predictAtomHolds)
updateAnalyticsBuffers(currentAtom, initWeightSum, termWeightSum,
nonFiringInitRules.values.map(_.w_pos).sum, nonFiringTermRules.values.map(_.w_pos).sum,
predictInitiated, predictTerminated, inertiaExpertPrediction, holdsWeight)
/*
* THIS PREDICTION RULE IS WRONG:
*
* val holdsPredictionWeight = inertiaExpertPrediction + predictInitiated - predictTerminated
* val predictAtomHolds = holdsPredictionWeight > 0.0
*
* Look what might happen:
*
* Made FP mistake for atom: holdsAt(meeting(id3,id1),2600).
* Inertia weight: 0.0
* Firing initiation rules: 0, sum of weight: 0.0
* Non firing initiation rules: 17, sum of weight: 25.23524944624197
* Firing termination rules: 3, sum of weight: 101.70330033914848
* Non firing termination rules: 4, sum of weight: 135.60440045219798
*
* Semantically, there is no reason to predict HOLDS: The fluent does not hold by inertia, nor is it
* initiated by any rule. But we have that predictInitiated = -25.23524944624197 and
* predictInitiated = -33.901100113049495, because in both cases, the sum of weights of the non-firing
* is greater than that of the firing ones. Therefore, (and since predictInitiated > 25.23524944624197) we have
*
* holdsPredictionWeight = 0.0 + (-25.23524944624197) - (-33.901100113049495) > 0
*
* and we get a wrong prediction, while there is no reason for that.
*
* */
//val holdsPredictionWeight = inertiaExpertPrediction + predictInitiated - predictTerminated
//val predictAtomHolds = holdsPredictionWeight > 0.0
if (predictAtomHolds) {
// If the fluent we predicted that it holds is not in the inertia expert map, add it,
// with the weight it was predicted.
if (!inertiaExpert.keySet.contains(currentFluent)) {
// this is guaranteed to be positive from the prediction rule
val holdsWeight = inertiaExpertPrediction + predictInitiated
inertiaExpert += (currentFluent -> holdsWeight)
//inertiaExpert += (currentFluent -> 1.0)
}
if (trueAtoms.contains(currentAtom)) {
// Then it's a TP. Simply return it without updating any weights, after properly scoring the rules.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 1.0)
updateRulesScore("TP", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
totalTPs = totalTPs + currentAtom
"TP"
} else {
// Then it's an FP.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 0.0)
// That's for debugging
/*
reportMistake("FP", currentAtom, inertiaExpertPrediction, initiatedBy.size,
nonFiringInitRules.size, terminatedBy.size, nonFiringTermRules.size, initWeightSum,
termWeightSum, nonFiringInitRules.values.map(_.w).sum, nonFiringTermRules.values.map(_.w).sum, this.logger)
*/
totalFPs = totalFPs + currentAtom
if (!testOnly) {
// Decrease the weights of all rules that contribute to the FP: Rules that incorrectly initiate it.
reduceWeights(initiatedBy, markedMap, learningRate)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
// reduceWeights(nonFiringTermRules.keys.toVector, markedMap, learningRate)
// Reduce the weight of the inertia expert for the particular atom, if the inertia expert predicted that it holds.
if (inertiaExpert.keySet.contains(currentFluent)) {
val newWeight = inertiaExpert(currentFluent) * Math.pow(Math.E, (-1.0) * learningRate)
inertiaExpert += (currentFluent -> newWeight)
}
// Increase the weights of rules that can fix the mistake:
// Rules that terminate the fluent and initiation rules that do not fire (NO!).
increaseWeights(terminatedBy, markedMap, learningRate)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
//increaseWeights(nonFiringInitRules.keys.toVector, markedMap, learningRate)
}
updateRulesScore("FP", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"FP" // result returned to the calling method.
}
} else {
// We predicted that the atom does not hold...
if (trueAtoms.contains(currentAtom)) {
// ...while it actually does, so we have an FN.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 1.0)
/*
reportMistake("FN", currentAtom, inertiaExpertPrediction, initiatedBy.size,
nonFiringInitRules.size, terminatedBy.size, nonFiringTermRules.size, initWeightSum,
termWeightSum, nonFiringInitRules.values.map(_.w).sum, nonFiringTermRules.values.map(_.w).sum, this.logger)
*/
totalFNs = totalFNs + currentAtom
if (!testOnly) {
// Increase the weights of all rules that initiate it
increaseWeights(initiatedBy, markedMap, learningRate)
// and all rules that do not terminate it (NO!!)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
//increaseWeights(nonFiringTermRules.keys.toVector, markedMap, learningRate)
// Increase the weight of the inertia expert for that particular atom,
// if the inertia expert predicted that it holds.
if (inertiaExpert.keySet.contains(currentFluent)) {
var newWeight = inertiaExpert(currentFluent) * Math.pow(Math.E, 1.0 * learningRate)
newWeight = if (newWeight.isPosInfinity) inertiaExpert(currentFluent) else newWeight
inertiaExpert += (currentFluent -> newWeight)
}
// Also, reduce the weights of all initiation rules that do not fire (NO!) and all termination rules that fire.
//reduceWeights(nonFiringInitRules.keys.toVector, markedMap, learningRate) // No, maybe that's wrong, there's no point in penalizing a rule that does not fire.
reduceWeights(terminatedBy, markedMap, learningRate)
}
updateRulesScore("FN", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"FN" // result returned to the calling method.
} else {
// Then we have an atom which was erroneously inferred by the (un-weighted) rules (with ordinary logical
// inference), but which eventually not inferred, thanks to the expert-based weighted framework. That is,
// either the total weight of the non-firing "initiatedAt" fragment of the theory was greater than the weight
// of the firing part, or the the total weight of the firing "terminatedAt" path of the theory was greater
// than the weight of the "initiatedAt" fragment. In either case, the atom is eventually a TN. We don't do
// anything with it, but we need to instruct the the inertia expert to "forget" the atom.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 0.0)
if (inertiaExpert.keySet.contains(currentFluent)) {
inertiaExpert -= currentFluent
}
updateRulesScore("TN", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"TN"
}
}
}
}
| 63,191 | 43.564175 | 169 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/Learner_NEW.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import java.io.{BufferedWriter, File, FileWriter, PrintWriter}
import akka.actor.Actor
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import com.typesafe.scalalogging.Logger
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.functions.SingleCoreOLEDFunctions
import org.slf4j.LoggerFactory
import scala.util.Random
import scala.util.matching.Regex
class Learner_NEW[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example],
val writeExprmtResultsTo: String = "") extends Actor {
val learningRate = 0.2 //1.0 //0.05 //0.2 // 1.0 usually works for winnow
val epsilon = 0.9 //0.9 // used in the randomized version
val randomizedPrediction = false
val feedbackGap = 100
// If this is false, some non-determinism is introduced (number of mistakes may vary slightly from round to round)
val specializeAllAwakeRulesOnFPMistake = false
val withInertia = true //false
// This is either 'winnow' or 'hedge'
val weightUpdateStrategy = "hedge" //"winnow" // "hedge"
// Set this to 1.0 to simulate the case of constant feedback at each round.
// For values < 1.0 we only update weights and structure if a biased coin
// with receiveFeedbackBias for heads returns heads.
val receiveFeedbackBias = 1.0 //0.09 //0.2 //0.5
val conservativeRuleGeneration = true
// A rule must make this much % of the total FPs before it is specialized
val percentOfMistakesBeforeSpecialize = 0
// have this set to "" for a regular run without an input theory
//val inputTheoryFile = "/home/nkatz/dev/BKExamples/BK-various-taks/WeightLearning/Caviar/fragment/meeting/ASP/asp-rules-test"
val inputTheoryFile = ""
val inputTheory: List[Clause] = {
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
if (inputTheoryFile == "") {
Nil
} else {
val rules = scala.io.Source.fromFile(inputTheoryFile).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val rulesParsed = rules.map(r => Clause.parse(r))
rulesParsed
}
}
val stateHandler: StateHandler = {
val stateHandler = new StateHandler
if (inputTheory.isEmpty) {
stateHandler
} else {
val (inputInitRules, inputTermRules) = inputTheory.foldLeft(List.empty[Clause], List.empty[Clause]){ (x, y) =>
if (y.head.functor.contains("initiated")) (x._1 :+ y, x._2) else (x._1, x._2 :+ y)
}
stateHandler.ensemble.initiationRules = inputInitRules
stateHandler.ensemble.terminationRules = inputTermRules
stateHandler
}
}
// Just print-out all the data
/*
val test = {
val pw = new PrintWriter(new File("/home/nkatz/Desktop/caviar-whole" ))
data = getTrainData
while (data.hasNext) {
val x = data.next()
val a = (x.annotation ++ x.narrative).mkString("\n")+"\n\n% New Batch\n\n"
pw.write(a)
}
pw.close()
}
*/
private val logger = LoggerFactory.getLogger(self.path.name)
private val withec = true
// Control learning iterations over the data
private var repeatFor = inps.repeatFor
// Used to count examples for holdout evaluation
private var exampleCounter = 0
// Local data variable. Cleared at each iteration (in case repfor > 1).
private var data = Iterator[Example]()
// This is optional. A testing set (for holdout evaluation) may not be provided.
private var testingData = Iterator[Example]()
// Counts the number of processed batches. Used to determine when to
// perform holdout evaluation on the test set. Incremented whenever a
// new batch is fetched (see the getNextBatch() method)
private var batchCounter = 0
private var startTime = System.nanoTime()
private var endTime = System.nanoTime()
// Get the training data from the current inout source
private def getTrainData = trainingDataFunction(trainingDataOptions)
private def getTestingData = testingDataFunction(testingDataOptions)
private def getNextBatch(lleNoise: Boolean = false) = {
this.batchCounter += 1
if (data.isEmpty) {
Example()
} else {
if (!lleNoise) {
data.next()
} else {
val currentBatch = data.next()
val noisyNarrative = {
currentBatch.narrative map { x =>
x.replaceAll("active", "active_1")
}
}
Example(annot = currentBatch.annotation, nar = noisyNarrative, _time = currentBatch.time)
}
}
}
def logEmptyDataError() = {
if (this.data.isEmpty) {
logger.error(s"Input source ${inps.train} is empty.")
System.exit(-1)
}
}
def wrapupAndShutDown() = {}
def processData() = {
data = getTrainData
logEmptyDataError()
var done = false
var perBatchError: Vector[Int] = Vector.empty[Int]
while (!done) {
val batch = getNextBatch(lleNoise = false)
logger.info(s"Processing batch $batchCounter")
if (batch.isEmpty) {
logger.info(s"Finished the data.")
endTime = System.nanoTime()
logger.info("Done.")
//workers foreach(w => w ! PoisonPill)
wrapUp()
done = true
//context.system.terminate()
} else {
val trueLabels = batch.annotation.toSet
if (inputTheory.isEmpty) {
//stateHandler.perBatchError = stateHandler.perBatchError :+ batchError
/* ======================= Dirty hack ===============================*/
stateHandler.clearDelayedUpdates
var bias = 0.0
val error = ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
bias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap)
perBatchError = perBatchError :+ error
generateNewRules(error, batch)
bias = 1.0
ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
bias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap)
expandRules()
woled.Utils.dumpToFile(avgLoss(perBatchError)._3.mkString(", "), "/home/nkatz/Desktop/kernel", "overwrite")
//stateHandler.pruneRules("weight", 0.0005, Logger(this.getClass).underlying)
//stateHandler.pruneRules("score", inps.pruneThreshold)
/*
stateHandler.clearDelayedUpdates
var bias = 0.0
val error = ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
bias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap)
perBatchError = perBatchError :+ error
println(s"Per batch error:\n$perBatchError")
println(s"Accumulated Per batch error:\n${perBatchError.scanLeft(0.0)(_ + _).tail}")
//val debugDelayedUpdates = stateHandler.delayedUpdates.map(x => x.atom.atom).mkString("\n")
stateHandler.delayedUpdates foreach { u =>
val newRuleFlag = ExpertAdviceFunctions.updateWeights(u.atom, u.prediction, u.inertiaExpertPrediction, u.initWeightSum,
u.termWeightSum, u.predictedLabel, u.markedMap, u.feedback, stateHandler, u.learningRate, u.weightUpdateStrategy,
u.withInertia)
u.generateNewRuleFlag = newRuleFlag
}
var newRulesFrom = stateHandler.delayedUpdates.filter(_.generateNewRuleFlag)
/*newRulesFrom foreach { u =>
val previousTime = u.orderedTimes( u.orderedTimes.indexOf(u.atom.time) -1 )
ClassicSleepingExpertsHedge.updateStructure_NEW_HEDGE(u.atom, previousTime, u.markedMap, u.predictedLabel,
u.feedback, batch, u.atom.atom, inps, Logger(this.getClass).underlying, stateHandler,
percentOfMistakesBeforeSpecialize, randomizedPrediction, "", false,
conservativeRuleGeneration, u.generateNewRuleFlag)
}*/
// Generating rules from each mistake breaks things down. Until I find a generic strategy for new rule
// generation (applicable to WOLED also), just generate say, 3 rules, randomly.
if (newRulesFrom.nonEmpty) {
val random = new Random
for (_ <- 1 to 5) {
val u = newRulesFrom(random.nextInt(newRulesFrom.length))
newRulesFrom = newRulesFrom.filter(x => x != u)
val previousTime = u.orderedTimes( u.orderedTimes.indexOf(u.atom.time) -1 )
ClassicSleepingExpertsHedge.updateStructure_NEW_HEDGE(u.atom, previousTime, u.markedMap, u.predictedLabel,
u.feedback, batch, u.atom.atom, inps, Logger(this.getClass).underlying, stateHandler,
percentOfMistakesBeforeSpecialize, randomizedPrediction, "", false,
conservativeRuleGeneration, u.generateNewRuleFlag)
}
}
expandRules()
val allRules = (stateHandler.ensemble.initiationRules ++ stateHandler.ensemble.terminationRules).flatMap(x => List(x) ++ x.refinements)
println(s"\n\n====================== Theory Size: ${allRules.size}=======================")
woled.Utils.dumpToFile(avgLoss(perBatchError)._3.mkString(", "), "/home/nkatz/Desktop/kernel", "overwrite")
stateHandler.pruneRules(inps.pruneThreshold)
*/
// ACTUAL EXECUTION FLOW
/*ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
receiveFeedbackBias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap)*/
} else {
/*==============================*/
stateHandler.clearDelayedUpdates
/*==============================*/
var bias = 1.0
val error = ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
bias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap, inputTheory = Some(inputTheory))
perBatchError = perBatchError :+ error
/*val debugDelayedUpdates = stateHandler.delayedUpdates.map(x => x.atom.atom).mkString("\n")
stateHandler.delayedUpdates foreach { u =>
val newRuleFlag = ExpertAdviceFunctions.updateWeights(u.atom, u.prediction, u.inertiaExpertPrediction, u.initWeightSum,
u.termWeightSum, u.predictedLabel, u.markedMap, u.feedback, stateHandler, u.learningRate, u.weightUpdateStrategy,
u.withInertia)
u.generateNewRuleFlag = newRuleFlag
}*/
/*bias = 1.0
ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
bias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap, inputTheory = Some(inputTheory))*/
println(s"Per batch error:\n$perBatchError")
println(s"Accumulated Per batch error:\n${perBatchError.scanLeft(0.0)(_ + _).tail}")
/*ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
receiveFeedbackBias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap, inputTheory = Some(inputTheory))*/
}
}
}
}
def expandRules() = {
val expandedInit =
SingleCoreOLEDFunctions.expandRules(Theory(stateHandler.ensemble.initiationRules.filter(x => x.refinements.nonEmpty)), inps, logger)
val expandedTerm =
SingleCoreOLEDFunctions.expandRules(Theory(stateHandler.ensemble.terminationRules.filter(x => x.refinements.nonEmpty)), inps, logger)
stateHandler.ensemble.initiationRules = expandedInit._1.clauses
stateHandler.ensemble.terminationRules = expandedTerm._1.clauses
}
def avgLoss(in: Vector[Int]) = {
in.foldLeft(0, 0, Vector.empty[Double]){ (x, y) =>
val (count, prevSum, avgVector) = (x._1, x._2, x._3)
val (newCount, newSum) = (count + 1, prevSum + y)
(newCount, newSum, avgVector :+ newSum.toDouble / newCount)
}
}
def generateNewRules(error: Double, batch: Example) = {
if (error > 0) {
println("Generating new rules...")
val topInit = stateHandler.ensemble.initiationRules
val topTerm = stateHandler.ensemble.terminationRules
val growNewInit = Theory(topInit).growNewRuleTest(batch, "initiatedAt", inps.globals)
val growNewTerm = Theory(topTerm).growNewRuleTest(batch, "terminatedAt", inps.globals)
val newInit = if (growNewInit) oled.functions.SingleCoreOLEDFunctions.generateNewRules(Theory(topInit), batch, "initiatedAt", inps.globals) else Nil
val newTerm = if (growNewTerm) oled.functions.SingleCoreOLEDFunctions.generateNewRules(Theory(topTerm), batch, "terminatedAt", inps.globals) else Nil
stateHandler.ensemble.updateRules(newInit ++ newTerm, "add", inps)
}
}
def receive = {
case "start" => {
for (i <- (1 to 1)) {
processData()
}
}
case "start-streaming" => {
data = getTrainData
val slidingData = data.sliding(20)
var done = false
var acuumMistakes = Vector.empty[Int]
//while (!done) {
while (slidingData.hasNext) {
val dataSlice = slidingData.next()
if (dataSlice.isEmpty) {
logger.info(s"Finished.")
endTime = System.nanoTime()
logger.info("Done.")
//workers foreach(w => w ! PoisonPill)
wrapUp()
done = true
context.system.terminate()
} else {
// Train on the first data point of the slice, test on the rest.
val train = dataSlice.head
val trueLabels = train.annotation.toSet
if (inputTheory.isEmpty) {
ExpertAdviceFunctions.process(train, train.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
receiveFeedbackBias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, feedbackGap)
} else {
ExpertAdviceFunctions.process(train, train.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
receiveFeedbackBias, conservativeRuleGeneration, weightUpdateStrategy, withInertia,
feedbackGap, inputTheory = Some(inputTheory))
}
// test
val test = dataSlice.tail
//println(s"training on ${train.time} testing on ${test.map(x => x.time).mkString(" ")}")
//println(s"training on ${train.time}")
stateHandler.perBatchError = Vector.empty[Int]
/*
test foreach { batch =>
val trueLabels = batch.annotation.toSet
val _receiveFeedbackBias = 0.0
if (inputTheory.isEmpty) {
ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake, _receiveFeedbackBias,
conservativeRuleGeneration, weightUpdateStrategy)
} else {
ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake, _receiveFeedbackBias,
conservativeRuleGeneration, weightUpdateStrategy, inputTheory = Some(inputTheory))
}
}
*/
val currentError = stateHandler.perBatchError.sum
acuumMistakes = acuumMistakes :+ currentError
println(s"trained on ${train.time}, current error: $currentError")
}
}
val file = new File(s"/home/nkatz/Desktop/${inps.targetHLE}-streaming")
val bw = new BufferedWriter(new FileWriter(file))
bw.write(acuumMistakes.mkString(","))
bw.close()
logger.info(s"Finished.")
endTime = System.nanoTime()
logger.info("Done.")
//workers foreach(w => w ! PoisonPill)
wrapUp()
done = true
context.system.terminate()
}
}
def wrapUp() = {
if (trainingDataOptions != testingDataOptions) {
// show the info so far:
wrapUp_NO_TEST()
// and do the test
logger.info("\n\nEvaluating on the test set\n\n")
val _stateHandler = new StateHandler
_stateHandler.ensemble = {
val ensemble = stateHandler.ensemble
val getRules = (allRules: List[Clause]) => {
val nonEmptyBodied = allRules.filter(x => x.body.nonEmpty) // Refs of empty-bodied rules would be too immature.
nonEmptyBodied.map(x => (x.refinements :+ x).minBy(-_.w_pos)) // use the ref with the best score so far
//nonEmptyBodied.flatMap( x => (x.refinements :+ x)) // use all
}
val init = getRules(ensemble.initiationRules)
val term = getRules(ensemble.terminationRules)
ensemble.initiationRules = init
ensemble.terminationRules = term
ensemble
}
val testData = testingDataFunction(testingDataOptions)
val _receiveFeedbackBias = 0.0 // Give no supervision for training, we're only testing
testData foreach { batch =>
_stateHandler.inertiaExpert.clear()
val trueLabels = batch.annotation.toSet
ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
_stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake, _receiveFeedbackBias,
conservativeRuleGeneration, weightUpdateStrategy)
}
logger.info(s"Prequential error vector:\n${_stateHandler.perBatchError.map(x => x.toDouble)}")
logger.info(s"Prequential error vector (Accumulated Error):\n${_stateHandler.perBatchError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Total TPs: ${_stateHandler.totalTPs}, Total FPs: ${_stateHandler.totalFPs}, Total FNs: ${_stateHandler.totalFNs}, Total TNs: ${_stateHandler.totalTNs}")
if (_receiveFeedbackBias != 1.0) {
logger.info(s"\nReceived feedback on ${_stateHandler.receivedFeedback} rounds")
}
val tps = _stateHandler.totalTPs
val fps = _stateHandler.totalFPs
val fns = _stateHandler.totalFNs
val microPrecision = tps.toDouble / (tps.toDouble + fps.toDouble)
val microRecall = tps.toDouble / (tps.toDouble + fns.toDouble)
val microFscore = (2 * microPrecision * microRecall) / (microPrecision + microRecall)
println(s"Micro F1-score on test set: $microFscore")
} else {
wrapUp_NO_TEST()
}
}
def wrapUp_NO_TEST() = {
def show(in: List[Clause]) = {
in.sortBy(x => -x.w_pos).
map(x => x.showWithStats + "\n" + x.refinements.sortBy(x => -x.w_pos).map(x => x.showWithStats).mkString("\n ")).mkString("\n")
}
//logger.info(show(stateHandler.ensemble.initiationRules))
//logger.info(show(stateHandler.ensemble.terminationRules))
logger.info(Theory(stateHandler.ensemble.initiationRules.sortBy(x => -x.w_pos)).showWithStats)
logger.info(Theory(stateHandler.ensemble.terminationRules.sortBy(x => -x.w_pos)).showWithStats)
logger.info(s"Prequential error vector:\n${stateHandler.perBatchError.map(x => x.toDouble)}")
logger.info(s"Prequential error vector (Accumulated Error):\n${stateHandler.perBatchError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Prequential (running) F1-score:\n${stateHandler.runningF1Score}")
logger.info(s"Running rules number:\n${stateHandler.runningRulesNumber}")
logger.info(s"Total TPs: ${stateHandler.totalTPs}, Total FPs: ${stateHandler.totalFPs}, Total FNs: ${stateHandler.totalFNs}, Total TNs: ${stateHandler.totalTNs}")
logger.info(s"Total time: ${(endTime - startTime) / 1000000000.0}")
if (randomizedPrediction) {
logger.info(s"\nPredicted with initiation rules: ${stateHandler.predictedWithInitRule} times")
logger.info(s"\nPredicted with terminated rules: ${stateHandler.predictedWithTermRule} times")
logger.info(s"\nPredicted with inertia: ${stateHandler.predictedWithInertia} times")
}
//logger.info(s"Predictions vector:\n${stateHandler.predictionsVector}")
logger.info(s"Total number of rounds: ${stateHandler.totalNumberOfRounds}")
/*
val tps = stateHandler.totalTPs
val fps = stateHandler.totalFPs
val fns = stateHandler.totalFNs
val microPrecision = tps.toDouble/(tps.toDouble + fps.toDouble)
val microRecall = tps.toDouble/(tps.toDouble + fns.toDouble)
val microFscore = (2*microPrecision*microRecall)/(microPrecision+microRecall)
println(s"Micro F1-score: $microFscore")
*/
if (receiveFeedbackBias != 1.0 || feedbackGap != 0) {
logger.info(s"\nReceived feedback on ${stateHandler.receivedFeedback} rounds")
}
}
}
| 23,733 | 40.932862 | 172 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/Learner_OLD_DEBUG.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import java.io.File
import akka.actor.Actor
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic._
import org.slf4j.LoggerFactory
import oled.functions.SingleCoreOLEDFunctions.eval
import scala.collection.mutable.ListBuffer
import AuxFuncs._
import oled.mwua.ExpertAdviceFunctions.sortGroundingsByTime
import utils.Utils
import scala.util.control.Breaks._
import scala.util.matching.Regex
/**
* Created by nkatz at 26/10/2018
*/
/*
*
* I ran this on the normal CAVIAR ordering as follows:
* --inpath=/home/nkatz/dev/OLED-BK/BKExamples/BK-various-taks/DevTest/caviar-bk --delta=0.00001 --prune=0.8
* --train=caviar --repfor=4 --chunksize=50 --try-more-rules=true --scorefun=default --onlineprune=true
*
* */
class Learner_OLD_DEBUG[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example],
val writeExprmtResultsTo: String = "") extends Actor {
startTime = System.nanoTime()
private var totalTPs = 0
private var totalFPs = 0
private var totalFNs = 0
private var totalTNs = 0
//--------------------------
val normalizeWeights = true
//--------------------------
var processedBatches = 0
private var totalBatchProcessingTime = 0.0
private var totalRuleScoringTime = 0.0
private var totalNewRuleTestTime = 0.0
private var totalCompressRulesTime = 0.0
private var totalExpandRulesTime = 0.0
private var totalNewRuleGenerationTime = 0.0
private var totalWeightsUpdateTime = 0.0
private var totalgroundingsTime = 0.0
private var totalPredictionTime = 0.0
private val logger = LoggerFactory.getLogger(self.path.name)
private val withec = Globals.glvalues("with-ec").toBoolean
// This map cantanins all fluents that were true previously,
// (i.e. at the time point prior to the one that is currently being processed)
// along with their weights. The weights are updated properly at each time point
// and new atoms are added if we predict that they start holding, and
// existing atoms are removed if we predict that they're terminated.
// The key values are string representations of fluents, not holdsAt/2 atoms.
// So, we're storing "meeting(id1,id2)", not "holdsAt(meeting(id1,id2), 10)".
private var inertiaExpert = scala.collection.mutable.Map[String, Double]()
def getInertiaExpertPrediction(fluent: String) = {
if (inertiaExpert.keySet.contains(fluent)) inertiaExpert(fluent) else 0.0
}
val learningRate = 1.0
//----------------------------------------------------------------------
// If true, the firing/non-firing initiation rules are not taken
// into account when making a prediction about a fluent that persists
// by inertia.
// Setting this to false is the default for learning/reasoning with
// weakly initiated fluents, but setting it to true is necessary for
// strongly initiated settings, in order to allow for previously
// recognized fluents to persist.
private val isStrongInertia = false
//----------------------------------------------------------------------
/* All these are for presenting analytics/results after a run. */
private val initWeightSums = new ListBuffer[Double]
private val nonInitWeightSums = new ListBuffer[Double]
private val TermWeightSums = new ListBuffer[Double]
private val monTermWeightSums = new ListBuffer[Double]
private val predictInitWeightSums = new ListBuffer[Double]
private val predictTermWeightSums = new ListBuffer[Double]
private val inertWeightSums = new ListBuffer[Double]
private val prodictHoldsWeightSums = new ListBuffer[Double]
// For each query atom encountered during a run, 0.0 or 1.0 is stored in this buffer (true false)
private val trueLabels = new ListBuffer[Double]
// Keep weights only for this
val keepStatsForFluent = "meeting(id4,id5)"
// Control learning iterations over the data
private var repeatFor = inps.repeatFor
// Used to count examples for holdout evaluation
private var exampleCounter = 0
// Local data variable. Cleared at each iteration (in case repfor > 1).
private var data = Iterator[Example]()
// This is optional. A testing set (for holdout evaluation) may not be provided.
private var testingData = Iterator[Example]()
// Counts the number of precessed batches. Used to determine when to
// perform holdout evaluation on the test set. Incremented whenever a
// new batch is fetched (see the getNextBatch() method)
private var batchCounter = 0
// Stores the error from the prequential evaluation at each batch.
private var prequentialError = Vector[Double]()
// Current prequential error (for logging only, updated as a string message containing the actual error).
private var currentError = ""
// Evolving theory. If we're learning with the Event Calculus the head of the
// list is the initiation part of the theory and the tail is the termination.
// If not, the list has a single element (the current version of the theory).
private var theory = if (withec) List(Theory(), Theory()) else List(Theory())
private var startTime = System.nanoTime()
private var endTime = System.nanoTime()
// Get the training data from the current inout source
private def getTrainData = trainingDataFunction(trainingDataOptions)
private def getTestingData = testingDataFunction(testingDataOptions)
private def getNextBatch(lleNoise: Boolean = false) = {
this.batchCounter += 1
if (data.isEmpty) {
Example()
} else {
if (!lleNoise) {
data.next()
} else {
val currentBatch = data.next()
val noisyNarrative = {
currentBatch.narrative map { x =>
x.replaceAll("active", "active_1")
}
}
Example(annot = currentBatch.annotation, nar = noisyNarrative, _time = currentBatch.time)
}
}
}
def receive = {
case "start" => {
this.repeatFor -= 1
this.data = getTrainData
if (inps.test != "None") this.testingData = getTestingData
if (this.data.isEmpty) {
logger.error(s"Input source ${inps.train} is empty.")
System.exit(-1)
}
processNext()
}
case "eval" => {
// Prequential evaluation of a given theory
logger.info(s"Performing prequential Evaluation of theory from ${inps.evalth}")
(1 to repeatFor) foreach { _ =>
this.data = getTrainData
while (data.hasNext) {
evaluate(data.next(), inps.evalth)
logger.info(currentError)
}
}
logger.info(s"Prequential error vector:\n${prequentialError.mkString(",")}")
logger.info(s"Prequential error vector (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Prequential F1-Score:\n$runningF1Score")
logger.info(s"Total TPs: $TPs, total FPs: $FPs, total FNs: $FNs")
context.system.terminate()
}
// Use a hand-crafted theory for sequential prediction. This updates the rule weights after each round,
// but it does not mess with the structure of the rules.
case "predict" => {
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
val rules = scala.io.Source.fromFile(inps.evalth).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val rulesParsed = rules.map(r => Clause.parse(r))
(1 to repeatFor) foreach { _ =>
this.data = getTrainData
while (data.hasNext) {
val batch = getNextBatch(lleNoise = false)
logger.info(s"Prosessing $batchCounter")
evaluateTest_NEW(batch, "", false, true, Theory(rulesParsed))
}
}
logger.info(s"\nPrequential error vector:\n${prequentialError.mkString(",")}")
logger.info(s"\nPrequential error vector (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
context.system.terminate()
}
case "move on" => processNext()
}
/*
* Performs online evaluation and sends the next batch to the worker(s) for processing.
*
* */
private def processNext() = {
val nextBatch = getNextBatch()
logger.info(s"Processing batch $batchCounter")
if (nextBatch.isEmpty) {
logger.info(s"Finished the data.")
if (this.repeatFor > 0) {
logger.info(s"Starting new iteration.")
self ! "start"
} else if (this.repeatFor == 0) {
endTime = System.nanoTime()
logger.info("Done.")
wrapUp()
context.system.terminate()
} else {
throw new RuntimeException("This should never have happened (repeatfor is now negative?)")
}
} else {
//evaluate(nextBatch)
//evaluateTest(nextBatch)
evaluateTest_NEW(nextBatch)
//evaluateTest_NEW_EXPAND_WHEN_NEEDED(nextBatch)
}
}
/* Finished. Just show results and shut down */
def wrapUp(): Unit = {
val merged = {
if (theory.length == 1) {
theory.head
} else {
Theory(theory.head.clauses ++ theory.tail.head.clauses)
}
}
val theorySize = merged.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
val totalRunningTime = (endTime - startTime) / 1000000000.0
val totalTrainingTime = totalBatchProcessingTime
logger.info(s"\nAll rules found (non-pruned, non-compressed):\n ${merged.showWithStats}")
val pruned = Theory(merged.clauses.filter(_.score >= inps.pruneThreshold))
/* THIS MAY TAKE TOO LONG FOR LARGE AND COMPLEX THEORIES!! */
logger.info("Compressing theory...")
val pruned_ = Theory(LogicUtils.compressTheory(pruned.clauses))
logger.info(s"\nFinal Pruned theory found:\n ${pruned_.showWithStats}")
logger.info(s"Theory size: $theorySize")
logger.info(s"Total running time: $totalTrainingTime")
logger.info(s"Total batch processing time: $totalRunningTime")
logger.info(s"Total rule scoring time: $totalRuleScoringTime")
logger.info(s"Total rule expansion time: $totalExpandRulesTime")
logger.info(s"Total rule compression time: $totalCompressRulesTime")
logger.info(s"Total testing for new rule generation time: $totalNewRuleTestTime")
logger.info(s"Total new rule generation time: $totalNewRuleGenerationTime")
logger.info(s"Total prediction & weights update time: $totalWeightsUpdateTime")
logger.info(s"Total groundings computation time: $totalgroundingsTime")
logger.info(s"Prequential error vector:\n${prequentialError.mkString(",")}")
logger.info(s"Prequential error vector (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
if (this.writeExprmtResultsTo != "") {
// Just for quick and dirty experiments
val x = prequentialError.scanLeft(0.0)(_ + _).tail.toString()
Utils.writeToFile(new File(this.writeExprmtResultsTo), "append") { p => List(x).foreach(p.println) }
}
logger.info(s"Total TPs: $totalTPs, Total FPs: $totalFPs, Total FNs: $totalFNs")
if (trainingDataOptions != testingDataOptions) {
//logger.info("Evaluating on the test set")
val testData = testingDataFunction(testingDataOptions)
// Prequential eval on the test set (without weights update at each step).
logger.info("Evaluating on the test set with the theory found so far (no weights update at each step, no structure updates).")
prequentialError = Vector[Double]()
totalTPs = 0
totalFPs = 0
totalFNs = 0
// This includes the refinements in the final theory
// Comment it out to test with the final theory
///*
val predictWith = getFinalTheory(theory, useAvgWeights = true, logger)
val newInit = predictWith._1
val newTerm = predictWith._2
theory = List(Theory(newInit), Theory(newTerm))
//*/
testData foreach { batch =>
evaluateTest_NEW(batch, testOnly = true)
}
logger.info(s"Prequential error on test set:\n${prequentialError.mkString(",")}")
logger.info(s"Prequential error vector on test set (Accumulated Error):\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Evaluation on the test set\ntps: $totalTPs\nfps: $totalFPs\nfns: $totalFNs")
// just for quick and dirty experiments
if (this.writeExprmtResultsTo != "") {
val x = s"tps: $totalTPs\nfps: $totalFPs\nfns: $totalFNs\n\n"
Utils.writeToFile(new File(this.writeExprmtResultsTo), "append") { p => List(x).foreach(p.println) }
}
logger.info(s"Total prediction & weights update time: $totalWeightsUpdateTime")
logger.info(s"Total groundings computation time: $totalgroundingsTime")
logger.info(s"Total per-rule prediction time (combining rule's sub-experts' predictions): $totalPredictionTime")
}
//val (tps,fps,fns,precision,recall,fscore) = crossVal(pruned_, data=testData, globals = inps.globals, inps = inps)
//logger.info(s"\ntps: $tps\nfps: $fps\nfns: " + s"$fns\nprecision: $precision\nrecall: $recall\nf-score: $fscore)")
}
var TPs = 0
var FPs = 0
var FNs = 0
var runningF1Score = Vector.empty[Double]
def evaluate(batch: Example, inputTheoryFile: String = ""): Unit = {
if (inps.prequential) {
if (withec) {
val (init, term) = (theory.head, theory.tail.head)
//val merged = Theory( (init.clauses ++ term.clauses).filter(p => p.body.length >= 1 && p.seenExmplsNum > 5000 && p.score > 0.7) )
//val merged = Theory( (init.clauses ++ term.clauses).filter(p => p.body.length >= 1 && p.score > 0.9) )
val merged = Theory(init.clauses.filter(p => p.precision >= inps.pruneThreshold) ++ term.clauses.filter(p => p.recall >= inps.pruneThreshold))
val (tps, fps, fns, precision, recall, fscore) = eval(merged, batch, inps, inputTheoryFile)
TPs += tps
FPs += fps
FNs += fns
val currentPrecision = TPs.toDouble / (TPs + FPs)
val currentRecall = TPs.toDouble / (TPs + FNs)
val _currentF1Score = 2 * currentPrecision * currentRecall / (currentPrecision + currentRecall)
val currentF1Score = if (_currentF1Score.isNaN) 0.0 else _currentF1Score
runningF1Score = runningF1Score :+ currentF1Score
val error = (fps + fns).toDouble
currentError = s"Number of mistakes (FPs+FNs) "
this.prequentialError = this.prequentialError :+ error
println(s"time, scoring theory size, error: ${batch.time}, ${merged.size}, $error")
println(this.prequentialError)
}
}
// TODO :
// Implement holdout evaluation.
if (inps.holdout != 0) {
}
}
private def getMergedTheory(testOnly: Boolean) = {
if (withec) {
val (init, term) = (theory.head, theory.tail.head)
val _merged = Theory(init.clauses ++ term.clauses)
if (testOnly) {
_merged
} else {
_merged.clauses foreach (rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals))
// Do we want to also filter(p => p.score > inps.pruneThreshold) here?
// Do we want to compress here? Theory(LogicUtils.compressTheory(_merged.clauses))
val mergedWithRefs = Theory(_merged.clauses ++ _merged.clauses.flatMap(_.refinements))
//val merged = _merged
val merged = mergedWithRefs
merged
}
} else {
Theory() /* TODO */
}
}
/* This is called whenever a new rule is added due to a mistake. */
private def addRuleAndUpdate(r: Clause, testOnly: Boolean = false) = {
// Update the current theory
if (withec) {
if (r.head.functor.contains("initiated")) {
theory = List(Theory(theory.head.clauses :+ r), theory.tail.head)
} else if (r.head.functor.contains("terminated")) {
theory = List(theory.head, Theory(theory.tail.head.clauses :+ r))
} else {
throw new RuntimeException("Error while updating current theory.")
}
} else {
/* TODO */
}
// Update merged theory and marked-up stuff.
val mergedNew = getMergedTheory(testOnly)
val markedNew = marked(mergedNew.clauses.toVector, inps.globals)
val markedProgramNew = markedNew._1
val markedMapNew = markedNew._2
(mergedNew, markedProgramNew, markedMapNew)
}
/* This is called whenever we're specializing a rule due to a mistake */
private def specializeRuleAndUpdate(
topRule: Clause,
refinement: Clause, currentAtom: String, mistakeType: String, testOnly: Boolean = false) = {
val filter = (p: List[Clause]) => {
p.foldLeft(List[Clause]()) { (x, y) =>
if (!topRule.equals(y)) {
x :+ y
} else {
x
}
}
}
// Update the current theory
val oldInit = theory.head.clauses
val oldTerm = theory.tail.head.clauses
if (withec) {
if (topRule.head.functor.contains("initiated")) {
val newInit = filter(oldInit) :+ refinement
theory = List(Theory(newInit), Theory(oldTerm))
showInfo(topRule, refinement, currentAtom, mistakeType)
} else if (topRule.head.functor.contains("terminated")) {
val newTerm = filter(oldTerm) :+ refinement
theory = List(Theory(oldInit), Theory(newTerm))
showInfo(topRule, refinement, currentAtom, mistakeType)
} else {
throw new RuntimeException("Error while updating current theory.")
}
} else {
/* TODO */
}
// Update merged theory and marked-up stuff.
val mergedNew = getMergedTheory(testOnly)
val markedNew = marked(mergedNew.clauses.toVector, inps.globals)
val markedProgramNew = markedNew._1
val markedMapNew = markedNew._2
(mergedNew, markedProgramNew, markedMapNew)
}
private def showInfo(parent: Clause, child: Clause, currentAtom: String, mistakeType: String) = {
logger.info(s"\nSpecialization in response to $mistakeType atom $currentAtom:\nRule (id: ${parent.##} | score: ${parent.score} | tps: ${parent.tps} fps: ${parent.fps} " +
s"fns: ${parent.fns} | ExpertWeight: ${parent.w_pos} " +
s"AvgExpertWeight: ${parent.avgWeight})\n${parent.tostring}\nwas refined to" +
s"(id: ${child.##} | score: ${child.score} | tps: ${child.tps} fps: ${child.fps} fns: ${child.fns} | " +
s"ExpertWeight: ${child.w_pos} AvgExpertWeight: ${child.avgWeight})\n${child.tostring}")
}
def evaluateTest_NEW(batch: Example, inputTheoryFile: String = "",
testOnly: Boolean = false, weightsOnly: Boolean = false, inputTheory: Theory = Theory()) = {
if (withec) {
var merged = if (inputTheory == Theory()) getMergedTheory(testOnly) else inputTheory
// just for debugging
val weightsBefore = merged.clauses.map(x => x.w_pos)
// just for debugging
val inertiaBefore = inertiaExpert.map(x => x)
val _marked = marked(merged.clauses.toVector, inps.globals)
var markedProgram = _marked._1
var markedMap = _marked._2
val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\n")
val trueAtoms = batch.annotation.toSet
var perBatchTPs = 0
var perBatchFPs = 0
var perBatchFNs = 0
var perBatchTNs = 0
var finishedBatch = false
var alreadyProcessedAtoms = Set.empty[String]
while (!finishedBatch) {
val groundingsMapTimed = Utils.time{
computeRuleGroundings(inps, markedProgram, markedMap, e, trueAtoms)
}
val groundingsMap = groundingsMapTimed._1._1
val times = groundingsMapTimed._1._2
val groundingsTime = groundingsMapTimed._2
totalgroundingsTime += groundingsTime
// We sort the groundings map by the time-stamp of each inferred holdsAt atom in ascending order.
// For each holdsAt atom we calculate if it should actually be inferred, based on the weights
// of the rules that initiate or terminate it. In this process, the weights of the rules are
// updated based on whether the atom is mistakenly/correctly predicted and whether each individual
// rule mistakenly/correctly predicts it. Sorting the inferred atoms and iterating over them is necessary
// so as to promote/penalize the rule weights correctly after each mistake.
val sorted = groundingsMap.map { entry =>
val parsed = Literal.parse(entry._1)
val time = parsed.terms.tail.head.name.toInt
((entry._1, time), entry._2)
}.toVector.sortBy(x => x._1._2) // sort by time
val predictAndUpdateTimed = Utils.time {
breakable {
sorted foreach { y =>
val (currentAtom, currentTime) = (y._1._1, y._1._2)
if (!alreadyProcessedAtoms.contains(currentAtom)) {
val parsed = Literal.parse(currentAtom)
val currentFluent = parsed.terms.head.tostring
val (initiatedBy, terminatedBy) = (y._2._1, y._2._2)
//val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w).sum else 0.0
//val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w).sum else 0.0
// only updates weights when we're not running in test mode.
val prediction =
predictAndUpdate(currentAtom, currentFluent,
initiatedBy, terminatedBy, markedMap, testOnly, trueAtoms, batch)
//val prediction = _prediction._1
prediction match {
case "TP" => perBatchTPs += 1
case "FP" => perBatchFPs += 1
case "FN" => perBatchFNs += 1
case "TN" => perBatchTNs += 1
case _ => throw new RuntimeException("Unexpected response from predictAndUpdate")
}
if (!testOnly && !weightsOnly) {
if (prediction == "FP" && terminatedBy.isEmpty) {
// Let's try adding a new termination expert only when there is no other termination expert that fires.
// Else, let it fix the mistakes in future rounds by increasing the weights of firing terminating experts.
// Generate a new termination rule from the point where we currently err.
// This rule will be used for fixing the mistake in the next round.
// This most probably results in over-training. It increases weights too much and the new rule dominates.
//val totalWeight = inertiaExpert(currentFluent) + initWeightSum
val totalWeight = 1.0
val newTerminationRule = generateNewExpert(batch, currentAtom, inps.globals, "terminatedAt", totalWeight)
if (!newTerminationRule.equals(Clause.empty)) {
logger.info(s"Generated new termination rule in response to FP atom: $currentAtom")
// Since neither the new termination rule (empty-bodied), nor its refinements fire,
// therefore, they do not contribute to the FP, increase their weights further
increaseWeights(newTerminationRule.refinements :+ newTerminationRule, learningRate)
// Finally, add the new termination rule to the current theory.
val update = addRuleAndUpdate(newTerminationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FP mistake with atom: $currentAtom")
}
}
//if (prediction == "FN" && initiatedBy.isEmpty && getInertiaExpertPrediction(currentFluent) == 0.0) {
if (prediction == "FN" && initiatedBy.isEmpty) {
//val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", termWeightSum)
// Don't give the total weight of the termination part. It's dangerous
// (e.g. if the termination part new rules get the total weight of 0.0, and the TN is never fixed!)
// and it's also wrong. You just over-train to get rid of a few mistakes!
val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", 1.0)
if (!newInitiationRule.equals(Clause.empty)) {
logger.info(s"Generated new initiation rule in response to FN atom: $currentAtom")
val update = addRuleAndUpdate(newInitiationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FN mistake with atom: $currentAtom")
}
}
}
}
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
}
finishedBatch = true
}
}
totalWeightsUpdateTime += predictAndUpdateTimed._2
}
// Just for debugging.
val weightsAfter = merged.clauses.map(x => x.w_pos)
//just for debugging
val inertiaAfter = inertiaExpert.map(x => x)
prequentialError = prequentialError :+ (perBatchFPs + perBatchFNs).toDouble
if (perBatchFPs + perBatchFNs > 0) {
logger.info(s"\nMade mistakes: FPs: $perBatchFPs, " +
s"FNs: $perBatchFNs.\nWeights before: $weightsBefore\nWeights after: $weightsAfter\nInertia Before: " +
s"$inertiaBefore\nInertia after: $inertiaAfter")
}
totalTPs += perBatchTPs
totalFPs += perBatchFPs
totalFNs += perBatchFNs
totalTNs += perBatchTNs
} else { // No Event Calculus. We'll see what we'll do with that.
}
}
def evaluateTest_NEW_EXPAND_WHEN_NEEDED(batch: Example, inputTheoryFile: String = "",
testOnly: Boolean = false, weightsOnly: Boolean = false, inputTheory: Theory = Theory()) = {
if (withec) {
if (batchCounter == 38) {
val stop = "stop"
}
var merged = if (inputTheory == Theory()) getMergedTheory(testOnly) else inputTheory
// just for debugging
val weightsBefore = merged.clauses.map(x => x.w_pos)
// just for debugging
val inertiaBefore = inertiaExpert.map(x => x)
val _marked = marked(merged.clauses.toVector, inps.globals)
var markedProgram = _marked._1
var markedMap = _marked._2
val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\n")
val trueAtoms = batch.annotation.toSet
var perBatchTPs = 0
var perBatchFPs = 0
var perBatchFNs = 0
var perBatchTNs = 0
var finishedBatch = false
var alreadyProcessedAtoms = Set.empty[String]
while (!finishedBatch) {
val groundingsMapTimed = Utils.time{
computeRuleGroundings(inps, markedProgram, markedMap, e, trueAtoms)
}
val groundingsMap = groundingsMapTimed._1._1
val groundingsTime = groundingsMapTimed._2
totalgroundingsTime += groundingsTime
// We sort the groundings map by the time-stamp of each inferred holdsAt atom in ascending order.
// For each holdsAt atom we calculate if it should actually be inferred, based on the weights
// of the rules that initiate or terminate it. In this process, the weights of the rules are
// updated based on whether the atom is mistakenly/correctly predicted and whether each individual
// rule mistakenly/correctly predicts it. Sorting the inferred atoms and iterating over them is necessary
// so as to promote/penalize the rule weights correctly after each mistake.
/*
val sorted = groundingsMap.map { entry =>
val parsed = Literal.parse(entry._1)
val time = parsed.terms.tail.head.name.toInt
((entry._1, time), entry._2)
}.toVector.sortBy(x => x._1._2) // sort by time
*/
val sorted = sortGroundingsByTime(groundingsMap)
val predictAndUpdateTimed = Utils.time {
breakable {
sorted foreach { y =>
//val (currentAtom, currentTime) = (y._1._1, y._1._2)
val (currentAtom, currentTime) = (y.atom, y.time)
if (!alreadyProcessedAtoms.contains(currentAtom)) {
val parsed = Literal.parse(currentAtom)
val currentFluent = parsed.terms.head.tostring
//val (initiatedBy, terminatedBy) = (y._2._1, y._2._2)
val (initiatedBy, terminatedBy) = (y.initiatedBy, y.terminatedBy)
// This is also calculated at predictAndUpdate, we need to factor it out.
// Calculate it here (because it is needed here) and pass it to predictAndUpdate
// to avoid doing it twice.
///*
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !initiatedBy.contains(x._1))
//*/
// This is also calculated at predictAndUpdate, we need to factor it out.
// Calculate it here (because it is needed here) and pass it to predictAndUpdate
// to avoid doing it twice.
///*
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !terminatedBy.toSet.contains(x._1))
//*/
//val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w).sum else 0.0
//val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w).sum else 0.0
// only updates weights when we're not running in test mode.
val prediction =
predictAndUpdate(currentAtom, currentFluent,
initiatedBy, terminatedBy, markedMap, testOnly, trueAtoms, batch)
prediction match {
case "TP" => perBatchTPs += 1
case "FP" => perBatchFPs += 1
case "FN" => perBatchFNs += 1
case "TN" => perBatchTNs += 1
case _ => throw new RuntimeException("Unexpected response from predictAndUpdate")
}
if (!testOnly && !weightsOnly) {
if (prediction == "FP") {
if (terminatedBy.isEmpty) {
// Let's try adding a new termination expert only when there is no other termination expert that fires.
// Else, let it fix the mistakes in future rounds by increasing the weights of firing terminating experts.
// Generate a new termination rule from the point where we currently err.
// This rule will be used for fixing the mistake in the next round.
// This most probably results in over-training. It increases weights too much and the new rule dominates.
//val totalWeight = inertiaExpert(currentFluent) + initWeightSum
val totalWeight = 1.0
val newTerminationRule = generateNewExpert(batch, currentAtom, inps.globals, "terminatedAt", totalWeight)
if (!newTerminationRule.equals(Clause.empty)) {
logger.info(s"Generated new termination rule in response to FP atom: $currentAtom")
// Since neither the new termination rule (empty-bodied), nor its refinements fire,
// therefore, they do not contribute to the FP, increase their weights further
// NO, WE DO NOT INCREASE WEIGHTS OF NON-FIRING RULES!!!
//increaseWeights(newTerminationRule.refinements :+ newTerminationRule, learningRate)
// Finally, add the new termination rule to the current theory.
val update = addRuleAndUpdate(newTerminationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FP mistake with atom: $currentAtom")
}
} else { // We do have firing termination rules
// Specialize a firing initiation rule. If no firing initiation rule exists,
// therefore the FP is due to inertia, just let the inertia weight degrade, until
// the termination rules take over the majority (note that we do have firing termination rules here,
// so there are reasons to believe that we'll have such rules in the up-coming rounds).
if (initiatedBy.nonEmpty) {
// Note that we'll most certainly have a top-rule that fires: for every
// refinement that fires, its parent rule must fire as well. Therefore, if
// initiatedBy is non empty, at least some of the rules in there must be top rules.
val rulesToSpecialize =
// This is the first minor difference with the piece of code
// for specializing termination rules (below). Here We select the
// rules from the initiation part of the theory, below from the termination
theory.head.clauses.
filter(x => initiatedBy.toSet.contains(x.##.toString))
var performedSpecialization = false
rulesToSpecialize foreach { ruleToSpecialize =>
// Find suitable refinements, i.e refinements that DO NOT fire
// w.r.t. the current FP atom.
val suitableRefs =
// Here is the second difference. We use nonFiringInitRules here.
// It's really stupid to have this code duplicated like that.
// Fuck your quick & dirty bullshit.
ruleToSpecialize.refinements.
filter(r => nonFiringInitRules.keySet.contains(r.##.toString)).
filter(s => s.score > ruleToSpecialize.score).
filter(r => !theory.head.clauses.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
if (suitableRefs.nonEmpty) {
performedSpecialization = true
val bestRefinement = suitableRefs.head
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
val update = specializeRuleAndUpdate(ruleToSpecialize, bestRefinement, currentAtom, "FP")
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
}
}
if (performedSpecialization) break
}
}
}
if (prediction == "FN") {
//if (initiatedBy.isEmpty || (nonFiringInitRules.values.map(_.w).sum > initWeightSum) ) {
if (initiatedBy.isEmpty) {
//val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", termWeightSum)
// Don't give the total weight of the termination part. It's dangerous
// (e.g. if the termination part new rules get the total weight of 0.0, and the TN is never fixed!)
// and it's also wrong. You just over-train to get rid of a few mistakes!
val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", 1.0)
if (!newInitiationRule.equals(Clause.empty)) {
logger.info(s"Generated new initiation rule in response to FN atom: $currentAtom")
val update = addRuleAndUpdate(newInitiationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FN mistake with atom: $currentAtom")
}
/* THE CODE BELOW IS THE SAME AS ABOVE. FACTOR IT OUT TO A FUNCTION. */
} else {
// Them the FN is due to over-weighted firing termination rules. Specialize one.
if (terminatedBy.nonEmpty) {
val termRulesToSpecialize =
theory.tail.head.clauses.
filter(x => terminatedBy.toSet.contains(x.##.toString))
var performedSpecialization = false
termRulesToSpecialize foreach { ruleToSpecialize =>
// Find suitable refinements, i.e refinements that DO NOT fire
// w.r.t. the current FN atom.
val suitableRefs =
ruleToSpecialize.refinements.
filter(r => nonFiringTermRules.keySet.contains(r.##.toString)).
filter(s => s.score > ruleToSpecialize.score).
filter(r => !theory.tail.head.clauses.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
if (suitableRefs.nonEmpty) {
performedSpecialization = true
val bestRefinement = suitableRefs.head
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
val update = specializeRuleAndUpdate(ruleToSpecialize, bestRefinement, currentAtom, "FN")
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
}
}
if (performedSpecialization) break
} else {
// This would be a problem, certainly something that is worth looking into.
// We have an FN, with firing initiation rules, but not firing termination ones.
// UPDATE: It's ok, it can happen because the non-firing weight is greater then the firing weight.
/*
throw new RuntimeException(s"We have an FN atom, which is " +
s"initiated by some rules and terminated by NO rules. It's worth finding out how this happens!\nBatch" +
s" cointer: $batchCounter, atom: $currentAtom")
*/
}
}
}
}
}
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
}
finishedBatch = true
}
}
totalWeightsUpdateTime += predictAndUpdateTimed._2
}
// Just for debugging.
val weightsAfter = merged.clauses.map(x => x.w_pos)
//just for debugging
val inertiaAfter = inertiaExpert.map(x => x)
prequentialError = prequentialError :+ (perBatchFNs + perBatchFPs).toDouble
if (perBatchFNs + perBatchFPs > 0) {
logger.info(s"\nMade mistakes: FPs: $perBatchFPs, " +
s"FNs: $perBatchFNs.\nWeights before: $weightsBefore\nWeights after: $weightsAfter\nInertia Before: " +
s"$inertiaBefore\nInertia after: $inertiaAfter")
}
totalTPs += perBatchTPs
totalFPs += perBatchFPs
totalFNs += perBatchFNs
totalTNs += perBatchTNs
} else { // No Event Calculus. We'll see what we'll do with that.
}
self ! "move on"
}
def updateAnalyticsBuffers(atom: String, initWghtSum: Double, termWghtSum: Double,
nonInitWghtSum: Double, nonTermWghtSum: Double,
predictInitWghtSum: Double, predictTermWghtSum: Double,
inertWghtSum: Double, holdsWght: Double) = {
if (atom.contains(keepStatsForFluent)) {
initWeightSums += initWghtSum
TermWeightSums += termWghtSum
nonInitWeightSums += nonInitWghtSum
monTermWeightSums += nonTermWghtSum
predictInitWeightSums += predictInitWghtSum
predictTermWeightSums += predictTermWghtSum
inertWeightSums += inertWghtSum
prodictHoldsWeightSums += holdsWght
}
}
def updateTrueLabels(atom: String, value: Double) = {
if (atom.contains(keepStatsForFluent)) {
trueLabels += value
}
}
def predictAndUpdate(currentAtom: String, currentFluent: String, init: Vector[String],
term: Vector[String], markedMap: scala.collection.immutable.Map[String, Clause],
testOnly: Boolean, trueAtoms: Set[String], batch: Example) = {
val (initiatedBy, terminatedBy) = (init, term)
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val inertiaExpertPrediction = getInertiaExpertPrediction(currentFluent)
val firingInitRulesIds = initiatedBy
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !firingInitRulesIds.contains(x._1))
// Use this to have all rules and their refs vote independently:
// This was the default but does not seam reasonable.
val predictInitiated = initWeightSum // - nonFiringInitRules.values.map(_.w).sum
// Use this to have one prediction per top rule, resulting by combing the
// opinions of the rule's sub-expert committee (its specializations)
/*
val predictInitiatedTimed = Utils.time{
val individualPredictions =
theory.head.clauses.map( rule => getRulePrediction(rule, firingInitRulesIds, nonFiringInitRules.keys.toVector) )
individualPredictions.sum
}
val predictInitiated = predictInitiatedTimed._1
totalPredictionTime += predictInitiatedTimed._2
*/
// Use this to have one prediction per top rule.
// The best (based on current weight) between the top-rule's own
// prediction and the prediction of the rule's best sub-expert:
/*
val predictInitiatedTimed = Utils.time{
val individualPredictions =
theory.head.clauses.map( rule => getRulePrediction1(rule, firingInitRulesIds, nonFiringInitRules.keys.toVector) )
individualPredictions.sum
}
val predictInitiated = predictInitiatedTimed._1
totalPredictionTime += predictInitiatedTimed._2
*/
val firingTermRulesIds = terminatedBy
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !firingTermRulesIds.toSet.contains(x._1))
// Use this to have all rules and their refs vote independently:
// This was the default but does not seam reasonable.
val predictTerminated = termWeightSum // - nonFiringTermRules.values.map(_.w).sum
// Use this to have one prediction per top rule, resulting by combing the
// opinions of the rule's sub-expert committee (its specializations):
/*
val predictTerminatedTimed = Utils.time {
val individualPredictions =
theory.tail.head.clauses.map( rule => getRulePrediction(rule, firingTermRulesIds, nonFiringTermRules.keys.toVector) )
individualPredictions.sum
}
val predictTerminated = predictTerminatedTimed._1
totalPredictionTime += predictTerminatedTimed._2
*/
// Use this to have one prediction per top rule.
// The best (based on current weight) between the top-rule's own
// prediction and the prediction of the rule's best sub-expert:
/*
val predictTerminatedTimed = Utils.time {
val individualPredictions =
theory.tail.head.clauses.map( rule => getRulePrediction1(rule, firingTermRulesIds, nonFiringTermRules.keys.toVector) )
individualPredictions.sum
}
val predictTerminated = predictTerminatedTimed._1
totalPredictionTime += predictTerminatedTimed._2
*/
// WITH INERTIA
///*
val _predictAtomHolds = predict(inertiaExpertPrediction, predictInitiated, predictTerminated, isStrongInertia)
val (predictAtomHolds, holdsWeight) = (_predictAtomHolds._1, _predictAtomHolds._2)
//*/
// NO INERTIA
//val _predictAtomHolds = predictInitiated - predictTerminated
//val (predictAtomHolds, holdsWeight) = (if (_predictAtomHolds > 0) true else false, _predictAtomHolds)
updateAnalyticsBuffers(currentAtom, initWeightSum, termWeightSum,
nonFiringInitRules.values.map(_.w_pos).sum, nonFiringTermRules.values.map(_.w_pos).sum,
predictInitiated, predictTerminated, inertiaExpertPrediction, holdsWeight)
/*
* THIS PREDICTION RULE IS WRONG:
*
* val holdsPredictionWeight = inertiaExpertPrediction + predictInitiated - predictTerminated
* val predictAtomHolds = holdsPredictionWeight > 0.0
*
* Look what might happen:
*
* Made FP mistake for atom: holdsAt(meeting(id3,id1),2600).
* Inertia weight: 0.0
* Firing initiation rules: 0, sum of weight: 0.0
* Non firing initiation rules: 17, sum of weight: 25.23524944624197
* Firing termination rules: 3, sum of weight: 101.70330033914848
* Non firing termination rules: 4, sum of weight: 135.60440045219798
*
* Semantically, there is no reason to predict HOLDS: The fluent does not hold by inertia, nor is it
* initiated by any rule. But we have that predictInitiated = -25.23524944624197 and
* predictInitiated = -33.901100113049495, because in both cases, the sum of weights of the non-firing
* is greater than that of the firing ones. Therefore, (and since predictInitiated > 25.23524944624197) we have
*
* holdsPredictionWeight = 0.0 + (-25.23524944624197) - (-33.901100113049495) > 0
*
* and we get a wrong prediction, while there is no reason for that.
*
* */
//val holdsPredictionWeight = inertiaExpertPrediction + predictInitiated - predictTerminated
//val predictAtomHolds = holdsPredictionWeight > 0.0
if (predictAtomHolds) {
// If the fluent we predicted that it holds is not in the inertia expert map, add it,
// with the weight it was predicted.
if (!inertiaExpert.keySet.contains(currentFluent)) {
// this is guaranteed to be positive from the prediction rule
val holdsWeight = inertiaExpertPrediction + predictInitiated
inertiaExpert += (currentFluent -> holdsWeight)
}
if (trueAtoms.contains(currentAtom)) {
// Then it's a TP. Simply return it without updating any weights, after properly scoring the rules.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 1.0)
updateRulesScore("TP", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"TP"
} else {
// Then it's an FP.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 0.0)
// That's for debugging
/*
reportMistake("FP", currentAtom, inertiaExpertPrediction, initiatedBy.size,
nonFiringInitRules.size, terminatedBy.size, nonFiringTermRules.size, initWeightSum,
termWeightSum, nonFiringInitRules.values.map(_.w).sum, nonFiringTermRules.values.map(_.w).sum, this.logger)
*/
if (!testOnly) {
// Decrease the weights of all rules that contribute to the FP: Rules that incorrectly initiate it.
reduceWeights(initiatedBy, markedMap, learningRate)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
// reduceWeights(nonFiringTermRules.keys.toVector, markedMap, learningRate)
// Reduce the weight of the inertia expert for the particular atom, if the inertia expert predicted that it holds.
if (inertiaExpert.keySet.contains(currentFluent)) {
val newWeight = inertiaExpert(currentFluent) * Math.pow(Math.E, (-1.0) * learningRate)
inertiaExpert += (currentFluent -> newWeight)
} else {
// Since we predicted it as holding, we should add it to the inertia map/
// No, that's not correct. We only add on TPs and remove on TNs.
// But in any case, even storing the fluent after an FP prediction does not change results.
//inertiaExpert += (currentFluent -> holdsWeight)
}
// Increase the weights of rules that can fix the mistake:
// Rules that terminate the fluent and initiation rules that do not fire (NO!).
increaseWeights(terminatedBy, markedMap, learningRate)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
//increaseWeights(nonFiringInitRules.keys.toVector, markedMap, learningRate)
}
updateRulesScore("FP", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"FP" // result returned to the calling method.
}
} else {
// We predicted that the atom does not hold...
if (trueAtoms.contains(currentAtom)) {
// ...while it actually does, so we have an FN.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 1.0)
/*
reportMistake("FN", currentAtom, inertiaExpertPrediction, initiatedBy.size,
nonFiringInitRules.size, terminatedBy.size, nonFiringTermRules.size, initWeightSum,
termWeightSum, nonFiringInitRules.values.map(_.w).sum, nonFiringTermRules.values.map(_.w).sum, this.logger)
*/
if (!testOnly) {
// Increase the weights of all rules that initiate it
increaseWeights(initiatedBy, markedMap, learningRate)
// and all rules that do not terminate it (NO!!)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
//increaseWeights(nonFiringTermRules.keys.toVector, markedMap, learningRate)
// Increase the weight of the inertia expert for that particular atom,
// if the inertia expert predicted that it holds.
if (inertiaExpert.keySet.contains(currentFluent)) {
var newWeight = inertiaExpert(currentFluent) * Math.pow(Math.E, 1.0 * learningRate)
newWeight = if (newWeight.isPosInfinity) inertiaExpert(currentFluent) else newWeight
inertiaExpert += (currentFluent -> newWeight)
}
// Also, reduce the weights of all initiation rules that do not fire (NO!) and all termination rules that fire.
//reduceWeights(nonFiringInitRules.keys.toVector, markedMap, learningRate) // No, maybe that's wrong, there's no point in penalizing a rule that does not fire.
reduceWeights(terminatedBy, markedMap, learningRate)
}
updateRulesScore("FN", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"FN" // result returned to the calling method.
} else {
// Then we have an atom which was erroneously inferred by the (un-weighted) rules (with ordinary logical
// inference), but which eventually not inferred, thanks to the expert-based weighted framework. That is,
// either the total weight of the non-firing "initiatedAt" fragment of the theory was greater than the weight
// of the firing part, or the the total weight of the firing "terminatedAt" path of the theory was greater
// than the weight of the "initiatedAt" fragment. In either case, the atom is eventually a TN. We don't do
// anything with it, but we need to instruct the the inertia expert to "forget" the atom.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 0.0)
if (inertiaExpert.keySet.contains(currentFluent)) {
inertiaExpert -= currentFluent
}
updateRulesScore("TN", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"TN"
}
}
}
}
| 55,908 | 43.301902 | 174 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/MiniBatchInference.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
class MiniBatchInference {
}
| 745 | 31.434783 | 72 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/PrequentialInference.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import app.runutils.RunningOptions
import logic.Clause
import logic.Examples.Example
import oled.mwua.ExpertAdviceFunctions.{getFeedback, ground, groundEnsemble, predict, predictHedge, sortGroundingsByTime}
import oled.mwua.HelperClasses.AtomTobePredicted
class PrequentialInference(
val batch: Example,
val inps: RunningOptions,
val stateHandler: StateHandler,
val trueAtoms: Set[String],
val hedgePredictionThreshold: Double,
val testingMode: Boolean = false,
val streaming: Boolean = false,
val feedBackGap: Int = 0,
val withInertia: Boolean = true) {
private val withInputTheory = testingMode
private var batchError = 0
private var batchFPs = 0
private var batchFNs = 0
private var batchAtoms = 0
private var atomCounter = 0 //used for feedback gap
private var alreadyProcessedAtoms = Set.empty[String]
private var finishedBatch = false
val weightUpdateStrategy = "hedge" // this needs to become a global parameter
def predictAndUpdate() = {
while (!finishedBatch) {
val (markedProgram, markedMap, groundingsMap, times, sortedAtomsToBePredicted, orderedTimes) =
ground(batch, inps, stateHandler, withInputTheory, streaming)
}
}
def predict(atom: AtomTobePredicted, markedMap: Map[String, Clause]) = {
val currentAtom = atom.atom
if (!alreadyProcessedAtoms.contains(currentAtom)) {
if (feedBackGap != 0) atomCounter += 1 // used for experiments with the feedback gap
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
batchAtoms += 1
stateHandler.totalNumberOfRounds += 1
var prediction = 0.0
var inertiaExpertPrediction = 0.0
var initWeightSum = 0.0
var termWeightSum = 0.0
var totalWeight = 0.0
var selected = ""
val (_prediction, _inertiaExpertPrediction, _initWeightSum, _termWeightSum) =
/*
if (weightUpdateStrategy == "winnow") predict(atom, stateHandler, markedMap)
else predictHedge(atom, stateHandler, markedMap, withInertia)
//else ClassicSleepingExpertsHedge.predictHedge_NO_INERTIA(atom, stateHandler, markedMap)
*/
predictHedge(atom, stateHandler, markedMap, withInertia)
prediction = _prediction
inertiaExpertPrediction = _inertiaExpertPrediction
initWeightSum = _initWeightSum
termWeightSum = _termWeightSum
val feedback = getFeedback(atom, Map[String, Double](), None, None, trueAtoms)
val predictedLabel = if (prediction >= hedgePredictionThreshold) "true" else "false"
}
}
}
| 3,281 | 33.914894 | 121 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/RuleEnsemble.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import app.runutils.RunningOptions
import logic.{Clause, Theory}
/**
* Created by nkatz at 10/2/2019
*/
class RuleEnsemble {
var initiationRules: List[Clause] = List[Clause]()
var terminationRules: List[Clause] = List[Clause]()
/* The "action" variable here is either "add" or "replace" */
def updateRules(newRules: List[Clause], action: String, inps: RunningOptions) = {
newRules foreach { rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals) }
val (init, term) = newRules.partition(x => x.head.functor == "initiatedAt")
action match {
case "add" =>
initiationRules = initiationRules ++ init
terminationRules = terminationRules ++ term
case "replace" =>
initiationRules = init
terminationRules = term
}
}
/* Currently not used anywhere. */
def removeZeroWeights = {
// Maybe I should remove rules with zero weight only when they have at least one literal at their body...
val noZeroInit = initiationRules.filter(x => x.w_pos > 0.0)
val noZeroTerm = terminationRules.filter(x => x.w_pos > 0.0)
initiationRules = noZeroInit
terminationRules = noZeroTerm
}
val initiationRuleSets: scala.collection.mutable.Map[Int, Set[Int]] = scala.collection.mutable.Map[Int, Set[Int]]()
val terminationRuleSets: scala.collection.mutable.Map[Int, Set[Int]] = scala.collection.mutable.Map[Int, Set[Int]]()
def updateRuleSets(awakeFraction: String, ruleSet: scala.collection.mutable.Map[Int, Set[Int]], markedMap: Map[String, Clause]) = {
}
/*
// From each subsumption lattice return the rule with the highest weight. This is
// used in a cross-validation setting
def outputRules = {
def findBest(c: Clause) = {
val _bestRef = c.refinements.sortBy(x => -x.w_pos)
if (_bestRef.nonEmpty) {
val bestRef = _bestRef.head
if (c.w_pos > bestRef.w_pos) c else bestRef
} else {
c
}
}
val bestInit = initiationRules.map(x => findBest(x))
val bestTerm = terminationRules.map(x => findBest(x))
(bestInit, bestTerm)
}
*/
/*
* "rules" is used in case we are not learning with the Event Calculus. In the opposite case this var it is empty.
* */
var rules: List[Clause] = List[Clause]()
// if testHandCrafted is true we do not update weights or structure.
def merged(inps: RunningOptions, testHandCrafted: Boolean = false) = {
val _merged = if (rules.isEmpty) Theory(initiationRules ++ terminationRules) else Theory(rules)
if (testHandCrafted) {
_merged
} else {
_merged.clauses foreach (rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals))
// add the bottom rules here as well
val mergedWithRefs = Theory((_merged.clauses ++ _merged.clauses.flatMap(_.refinements)) ++ _merged.clauses.map(x => x.supportSet.clauses.head))
//val mergedWithRefs = Theory( _merged.clauses ++ _merged.clauses.flatMap(_.refinements) )
mergedWithRefs
}
}
/* Currently not used anywhere. */
def normalizeWeights = {
val initWeightsSum = initiationRules.map(x => x.w_pos).sum
val termWeightsSum = terminationRules.map(x => x.w_pos).sum
val totalWeight = initWeightsSum + termWeightsSum
initiationRules.foreach(x => x.w_pos = x.w_pos / totalWeight.toDouble)
terminationRules.foreach(x => x.w_pos = x.w_pos / totalWeight.toDouble)
}
}
| 4,141 | 35.017391 | 149 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/Runner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import akka.actor.{ActorSystem, Props}
import app.runners.MLNDataHandler
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.CMDArgs
import app.runutils.IOHandling.MongoSource
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.typesafe.scalalogging.LazyLogging
import experiments.caviar.FullDatasetHoldOut.MongoDataOptions
import experiments.caviar.{FullDatasetHoldOut, MeetingTrainTestSets}
import experiments.datautils.caviar_data.CopyCAVIAR.{mc, newDB, newDBName}
import logic.Examples.Example
import utils.DataUtils.Interval
import com.mongodb.casbah.Imports._
/**
* Created by nkatz at 2/11/2018
*/
//--inpath=/home/nkatz/dev/BKExamples/BK-various-taks/DevTest/caviar-bk/meeting --train=caviar --delta=0.001 --prune=0.8 --repfor=5 --chunksize=50 --try-more-rules=false --prequential=true --target=meeting --preprune=0.9 --onlineprune=false --spdepth=1
object Runner extends LazyLogging {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
logger.error(argsok._2); System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
// This is the running setting with DefaultMongoDataOptions and the getMongoData function found in this class
/*
val trainingDataOptions = new DefaultMongoDataOptions(
dbName = runningOptions.train,
collectionName = runningOptions.mongoCollection,
chunkSize = runningOptions.chunkSize,
limit = runningOptions.dataLimit,
targetConcept = runningOptions.targetHLE,
sortDbByField = "None"
)
val testingDataOptions = trainingDataOptions
val trainingDataFunction: DefaultMongoDataOptions => Iterator[Example] = getMongoData
val testingDataFunction: DefaultMongoDataOptions => Iterator[Example] = getMongoData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runningOptions.evalth != "None") "eval" else "start"
system.actorOf(Props(new Learner(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction,
testingDataFunction)), name = "Learner") ! startMsg
*/
// This is the running setting in the object FullDatasetHoldOut
/*
val train1 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-2-meeting-moving", "caviar-video-5",
"caviar-video-6", "caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-4", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
*/
///*
val train1 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-2-meeting-moving", "caviar-video-5",
"caviar-video-6", "caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-4", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
//*/
// just a re-ordering of train1, so that videos with positive examples are not left at the end of the sequence
val train2 =
Vector("caviar-video-21-meeting-moving", "caviar-video-7", "caviar-video-28-meeting", "caviar-video-25", "caviar-video-30",
"caviar-video-11", "caviar-video-6", "caviar-video-14-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-20-meeting-moving", "caviar-video-13-meeting", "caviar-video-19-meeting-moving",
"caviar-video-12-moving", "caviar-video-1-meeting-moving", "caviar-video-9", "caviar-video-16", "caviar-video-23-moving",
"caviar-video-29", "caviar-video-5", "caviar-video-22-meeting-moving", "caviar-video-18", "caviar-video-4",
"caviar-video-24-meeting-moving", "caviar-video-8", "caviar-video-10", "caviar-video-2-meeting-moving",
"caviar-video-15", "caviar-video-3", "caviar-video-17")
val train3 = Vector("caviar-streaming-meeting")
val openSSH = Vector("openssh")
//val trainShuffled = scala.util.Random.shuffle(train1)
//logger.info(s"\nData order:\n$trainShuffled")
/* This is for running with the entire CAVIAR (no test set)*/
///*
val trainingDataOptions =
new MongoDataOptions(dbNames = train1, //trainShuffled ,//dataset._1,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions = trainingDataOptions
//*/
/* This is for running on the training set and then performing prequential evaluation on the test set. */
/*
val caviarNum = args.find(x => x.startsWith("caviar-num")).get.split("=")(1)
val trainSet = Map(1 -> MeetingTrainTestSets.meeting1, 2 -> MeetingTrainTestSets.meeting2, 3 -> MeetingTrainTestSets.meeting3,
4 -> MeetingTrainTestSets.meeting4, 5 -> MeetingTrainTestSets.meeting5, 6 -> MeetingTrainTestSets.meeting6,
7 -> MeetingTrainTestSets.meeting7, 8 -> MeetingTrainTestSets.meeting8, 9 -> MeetingTrainTestSets.meeting9,
10 -> MeetingTrainTestSets.meeting10)
//val dataset = MeetingTrainTestSets.meeting1
val dataset = trainSet(caviarNum.toInt)
val trainingDataOptions =
new MongoDataOptions(dbNames = dataset._1,//trainShuffled, //
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions =
new MongoDataOptions(dbNames = dataset._2,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "testing")
*/
val trainingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val testingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = "start"
system.actorOf(Props(new Learner_NEW(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction,
testingDataFunction)), name = "Learner") ! startMsg
/*
// This is used to generate the "streaming" version of CAVIAR, where each entry in the database
// consists of the observations at time t plus the labels at time t+1.
val data = trainingDataFunction(trainingDataOptions)
val dataPairs = data.sliding(2)
val mc = MongoClient()
val newDBName = s"caviar-streaming-${runningOptions.targetHLE}"
val newDB = mc(newDBName)("examples")
mc.dropDatabase(newDBName)
var count = 0
dataPairs foreach { pair =>
val (first, second) = (pair.head, pair.tail.head)
// The predictionTime atom is used by Aux.computeRuleGroundings to generate query atoms at the appropriate time point
//val observations = first.narrative :+ List(s"time(${first.time.toInt+40})", s"predictionTime(${first.time.toInt+40})")
val observations = first.narrative :+ s"time(${first.time.toInt+40})"
val labels = second.annotation
//val entry = MongoDBObject("time" -> first.time) ++ ("annotation" -> labels ) ++ ("narrative" -> observations)
val entry = MongoDBObject("time" -> count) ++ ("annotation" -> labels ) ++ ("narrative" -> observations)
count += 1
if (labels.nonEmpty) println(entry)
newDB.insert(entry)
}
*/
}
}
private class DefaultMongoDataOptions(val dbName: String, val collectionName: String = "examples", val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt, val targetConcept: String = "None",
val sortDbByField: String = "time", val sort: String = "ascending",
val intervals: List[Interval] = Nil, val examplesIds: List[String] = Nil) extends MongoSource
def getMongoData(opts: DefaultMongoDataOptions): Iterator[Example] = {
val mc = MongoClient()
val collection: MongoCollection = mc(opts.dbName)(opts.collectionName)
val data = opts.allData(collection, opts.sort, opts.sortDbByField) map { x =>
val e = Example(x)
opts.targetConcept match {
case "None" => new Example(annot = e.annotation, nar = e.narrative, _time = e.time)
case _ => new Example(annot = e.annotation filter (_.contains(opts.targetConcept)), nar = e.narrative, _time = e.time)
}
}
opts.chunkSize > 1 match {
case false => data
case _ =>
data.grouped(opts.chunkSize).map { x =>
//data.sliding(opts.chunkSize).map { x =>
x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
}
}
}
}
| 10,573 | 46.846154 | 252 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/Runner_Streaming.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import app.runutils.IOHandling.MongoSource
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.typesafe.scalalogging.LazyLogging
import experiments.caviar.FullDatasetHoldOut
import experiments.caviar.FullDatasetHoldOut.MongoDataOptions
import logic.Examples.Example
import oled.mwua.Runner.logger
object Runner_Streaming extends LazyLogging {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
logger.error(argsok._2); System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
val trainingDataOptions = new StreamingMongoDataOptions(dbName = runningOptions.train, targetConcept = runningOptions.targetHLE)
val testingDataOptions = trainingDataOptions
val trainingDataFunction: StreamingMongoDataOptions => Iterator[Example] = getMongoData
val testingDataFunction: StreamingMongoDataOptions => Iterator[Example] = getMongoData
val system = ActorSystem("HoeffdingLearningSystem")
// use also start to evaluate a hand-crafted theory, the whole thing is hard-coded in Learner_NEW
val startMsg = "start-streaming" //"start"//if (runningOptions.evalth != "None") "eval" else "start"
system.actorOf(Props(new Learner_NEW(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction,
testingDataFunction)), name = "Learner") ! startMsg
}
}
class StreamingMongoDataOptions(val dbName: String, val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt,
val targetConcept: String = "None", val sortDbByField: String = "time",
val sort: String = "ascending") extends MongoSource
def getMongoData(opts: StreamingMongoDataOptions): Iterator[Example] = {
val mc = MongoClient()
val collection: MongoCollection = mc(opts.dbName)("examples")
val dataIterator = opts.allData(collection, opts.sort, opts.sortDbByField) map { x =>
val e = Example(x)
opts.targetConcept match {
case "None" => new Example(annot = e.annotation, nar = e.narrative, _time = e.time)
case _ => new Example(annot = e.annotation filter (_.contains(opts.targetConcept)), nar = e.narrative, _time = e.time)
}
}
dataIterator //.take(1000)
}
}
| 3,102 | 38.782051 | 141 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/StateHandler.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import logic.Clause
import oled.mwua.HelperClasses.AtomTobePredicted
/**
* Created by nkatz at 10/2/2019
*/
//val delayedUpdate = new mwua.StateHandler.DelayedUpdate(atom, prediction, inertiaExpertPrediction,
// predictedLabel, feedback, stateHandler, epsilon, markedMap, totalWeight)
class DelayedUpdate(val atom: AtomTobePredicted, val prediction: Double, val inertiaExpertPrediction: Double,
val initWeightSum: Double, val termWeightSum: Double, val predictedLabel: String,
val markedMap: Map[String, Clause], val feedback: String, val stateHandler: StateHandler,
val learningRate: Double, val weightUpdateStrategy: String, val withInertia: Boolean = true,
val orderedTimes: Vector[Int]) {
var generateNewRuleFlag: Boolean = false
}
class StateHandler {
/*===============================================================================================================*/
/* This is all helper/test code for updating weights after mini-batch prediction from all mistakes cumulatively. */
/* ============================================ Test-helper code start ==========================================*/
var delayedUpdates = Vector.empty[DelayedUpdate]
def clearDelayedUpdates = delayedUpdates = Vector.empty[DelayedUpdate]
/* ============================================ Test-helper code end ============================================*/
/*===============================================================================================================*/
/*------------------------------------*/
/* Stuff related to the rule ensemble */
/*------------------------------------*/
var ensemble = new RuleEnsemble
val inertiaExpert = new InertiaExpert
def normalizeWeights(awakeExperts: Vector[Clause], currentFluent: String) = {
val totalAwakeRulesWeight = awakeExperts.map(x => x.w_pos).sum
val inertiaWeight = inertiaExpert.getWeight(currentFluent)
val totalWeight = totalAwakeRulesWeight + inertiaWeight
awakeExperts.foreach(x => x.w_pos = x.w_pos / totalWeight.toDouble)
if (inertiaWeight > 0) inertiaExpert.updateWeight(currentFluent, inertiaWeight / totalWeight)
}
def addRule(rule: Clause) = {
if (rule.head.functor.contains("initiated")) {
ensemble.initiationRules = ensemble.initiationRules :+ rule
} else if (rule.head.functor.contains("terminated")) {
ensemble.terminationRules = ensemble.terminationRules :+ rule
} else {
ensemble.rules = ensemble.rules :+ rule
}
}
def removeRule(rule: Clause) = {
def remove(clauses: List[Clause], r: Clause) = {
clauses.filter(x => !x.equals(r))
}
if (rule.head.functor.contains("initiated")) {
ensemble.initiationRules = remove(ensemble.initiationRules, rule)
} else if (rule.head.functor.contains("terminated")) {
ensemble.terminationRules = remove(ensemble.terminationRules, rule)
} else {
ensemble.rules = remove(ensemble.rules, rule)
}
}
def pruneUnderPerformingRules(weightThreshold: Double) = {
def pruneRefinements(topRule: Clause) = {
val goodRefs = topRule.refinements.filter(x => x.w_pos >= weightThreshold)
topRule.refinements = goodRefs
}
ensemble.initiationRules.foreach(x => pruneRefinements(x))
ensemble.terminationRules.foreach(x => pruneRefinements(x))
val goodInitRules = ensemble.initiationRules.filter(x => x.body.isEmpty || (x.body.nonEmpty && x.w_pos >= weightThreshold))
ensemble.initiationRules = goodInitRules
val goodTermRules = ensemble.terminationRules.filter(x => x.body.isEmpty || (x.body.nonEmpty && x.w_pos >= weightThreshold))
ensemble.terminationRules = goodTermRules
}
// "what" here is either "weight" of "score". If what=weight then acceptableScore
// should be a weight threshold, e.g. 0.005. what=score then acceptableScore is a
// threshold on the rule's precision set via the --prune parameter. Pruning with score
// does not work, a large number of redundant rules have very good score but very low coverage.
def pruneRules(what: String, acceptableScore: Double, logger: org.slf4j.Logger) = {
/* Remove rules by score */
def removeBadRules(rules: List[Clause]) = {
rules.foldLeft(List.empty[Clause]) { (accum, rule) =>
if (what == "score") {
if (rule.body.length >= 2 && rule.score <= 0.5) accum else accum :+ rule
} else {
if (rule.body.length >= 3 && rule.w_pos < acceptableScore) {
logger.info(s"\nRemoved rule (weight threshold is $acceptableScore)\n${rule.showWithStats}")
accum
} else {
accum :+ rule
}
}
}
}
ensemble.initiationRules = removeBadRules(ensemble.initiationRules)
ensemble.terminationRules = removeBadRules(ensemble.terminationRules)
}
/*-----------------------------*/
/* Grounding-related variables */
/*-----------------------------*/
var groundingTimes: Vector[Double] = Vector[Double]()
def updateGrndsTimes(t: Double) = { groundingTimes = groundingTimes :+ t }
/*-----------------*/
/* Stats variables */
/*-----------------*/
var totalTPs = 0
var totalFPs = 0
var totalFNs = 0
var totalTNs = 0
var batchCounter = 0
var perBatchError: Vector[Int] = Vector.empty[Int]
var runningF1Score: Vector[Double] = Vector.empty[Double]
var runningRulesNumber: Vector[Int] = Vector.empty[Int]
def updateRunningF1Score = {
val currentPrecision = totalTPs.toDouble / (totalTPs + totalFPs)
val currentRecall = totalTPs.toDouble / (totalTPs + totalFNs)
val _currentF1Score = 2 * currentPrecision * currentRecall / (currentPrecision + currentRecall)
val currentF1Score = if (_currentF1Score.isNaN) 0.0 else _currentF1Score
runningF1Score = runningF1Score :+ currentF1Score
}
var receivedFeedback = 0
var totalNumberOfRounds = 0
var totalAtoms = 0
var predictedWithInitRule = 0
var predictedWithTermRule = 0
var predictedWithInertia = 0
}
| 6,751 | 37.582857 | 128 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/StructureLearning.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import app.runutils.RunningOptions
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.functions.SingleCoreOLEDFunctions
import oled.mwua.ExpertAdviceFunctions.{generateNewRule, generateNewRule_1, is_FN_mistake, is_FP_mistake}
import oled.mwua.HelperClasses.AtomTobePredicted
object StructureLearning {
def splitAwakeAsleep(rulesToSplit: List[Clause], awakeIds: Set[String]) = {
val rulesToSplitIds = rulesToSplit.map(_##).toSet
val (topLevelAwakeRules, topLevelAsleepRules) = rulesToSplit.foldLeft(Vector.empty[Clause], Vector.empty[Clause]) { (x, rule) =>
val isAwake = awakeIds.contains(rule.##.toString)
val isTopLevel = rulesToSplitIds.contains(rule.##)
if (isAwake) if (isTopLevel) (x._1 :+ rule, x._2) else (x._1, x._2) // then it's a refinement rule
else if (isTopLevel) (x._1, x._2 :+ rule) else (x._1, x._2) // then it's a refinement rule
}
(topLevelAwakeRules, topLevelAsleepRules)
}
def updateStructure_NEW_HEDGE(
atom: AtomTobePredicted,
markedMap: Map[String, Clause],
predictedLabel: String,
feedback: String,
batch: Example,
currentAtom: String,
inps: RunningOptions,
logger: org.slf4j.Logger,
stateHandler: StateHandler,
percentOfMistakesBeforeSpecialize: Int,
randomizedPrediction: Boolean,
selected: String,
specializeAllAwakeOnMistake: Boolean,
conservativeRuleGeneration: Boolean,
generateNewRuleFlag: Boolean) = {
def getAwakeBottomRules(what: String) = {
if (what == "initiatedAt") atom.initiatedBy.filter(x => markedMap(x).isBottomRule)
else atom.terminatedBy.filter(x => markedMap(x).isBottomRule)
}
var updatedStructure = false
if (is_FP_mistake(predictedLabel, feedback)) {
val awakeBottomRules = getAwakeBottomRules("terminatedAt")
if (generateNewRuleFlag) {
// Recombining low-weight Awake rules:
// New Rules:
if (awakeBottomRules.isEmpty) {
if (stateHandler.inertiaExpert.knowsAbout(atom.fluent)) {
updatedStructure = generateNewRule(batch, currentAtom, inps, "FP", logger, stateHandler, "terminatedAt", 1.0)
}
}
}
// Also, in the case of an FP mistake we try to specialize awake initiation rules.
if (atom.initiatedBy.nonEmpty) {
// We are doing this after each batch
///*
val (topLevelAwakeRules, topLevelAsleepRules) = splitAwakeAsleep(stateHandler.ensemble.initiationRules, atom.initiatedBy.toSet)
val expandedInit = SingleCoreOLEDFunctions.
expandRules(Theory(topLevelAwakeRules.toList.filter(x => x.refinements.nonEmpty)), inps, logger)
if (expandedInit._2) {
stateHandler.ensemble.initiationRules = expandedInit._1.clauses ++ topLevelAsleepRules
updatedStructure = true
}
//*/
}
}
if (is_FN_mistake(predictedLabel, feedback)) {
val awakeBottomRules = getAwakeBottomRules("initiatedAt")
if (generateNewRuleFlag) { // atom.initiatedBy.isEmpty
// We don't have firing initiation rules. Generate one.
if (awakeBottomRules.isEmpty) {
// Let's try this: Discard the entire initiated ensemble generated so far
//if (atom.initiatedBy.nonEmpty) stateHandler.ensemble.initiationRules = Nil
updatedStructure = generateNewRule(batch, currentAtom, inps, "FN", logger, stateHandler, "initiatedAt", 1.0)
}
} else {
if (!conservativeRuleGeneration) {
// If we are not in conservative mode we try to generate new initiation rules even if awake initiation
// rules already exist. We only do so if the current example has not already been compressed into an existing
// bottom rule.
if (awakeBottomRules.isEmpty) {
updatedStructure = generateNewRule_1(batch, currentAtom, inps, logger, stateHandler, "initiatedAt", 1.0)
}
}
}
// Also, in the case of an FP mistake we try to specialize awake termination rules.
if (atom.terminatedBy.nonEmpty) {
// We are doing this after each batch
///*
val (topLevelAwakeRules, topLevelAsleepRules) = splitAwakeAsleep(stateHandler.ensemble.terminationRules, atom.terminatedBy.toSet)
val expandedInit = SingleCoreOLEDFunctions.
expandRules(Theory(topLevelAwakeRules.toList.filter(x => x.refinements.nonEmpty)), inps, logger)
if (expandedInit._2) {
stateHandler.ensemble.terminationRules = expandedInit._1.clauses ++ topLevelAsleepRules
updatedStructure = true
}
//*/
}
}
updatedStructure
}
}
| 5,428 | 40.128788 | 137 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/Worker.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import akka.actor.Actor
import app.runutils.RunningOptions
import logic.Clause
import oled.functions.SingleCoreOLEDFunctions._
import oled.mwua.MessageTypes.{FinishedBatchMsg, ProcessBatchMsg}
import org.slf4j.LoggerFactory
/**
* Created by nkatz at 1/11/2018
*/
class Worker(val inps: RunningOptions) extends Actor {
private val logger = LoggerFactory.getLogger(self.path.name)
def receive = {
case msg: ProcessBatchMsg =>
val p = utils.Utils.time { processExample(msg.theory, msg.batch, msg.targetClass, inps, logger, learningWeights = false) }
val (r, batchTime) = (p._1, p._2)
val fmsg = new FinishedBatchMsg(r._1, r._2, r._3, r._4, r._5, r._6, batchTime, msg.targetClass)
sender ! fmsg
}
}
| 1,454 | 31.333333 | 128 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/bandits/BanditFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua.bandits
/**
* Created by nkatz at 17/2/2019
*/
object BanditFunctions {
}
| 794 | 29.576923 | 72 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/bandits/PartialExpertTree.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua.bandits
import logic.Clause
/**
* Created by nkatz at 17/2/2019
*/
class PartialExpertTree(val bottomRule: Clause) {
val rootNode = new RootNode(Clause(bottomRule.head))
}
trait TreeNode {
var avgReward = 0.0
var visits = 0
var children: Vector[InnerNode] = Vector[InnerNode]()
val rule = Clause() // the rule represented by this node
val isRootNode: Boolean = false
def addChild(x: InnerNode): Unit = children = children :+ x
def isLeafNode() = this.children.isEmpty
// Running mean reward
def updateMeanReward(newReward: Double) = {
avgReward = ((avgReward * visits) + newReward) / (visits + 1)
visits += 1
}
def getBestChild(exploreRate: Double): InnerNode = this.children.maxBy(x => x.getMCTSScore(exploreRate))
/* Abstract methods */
def getMCTSScore(exploreRate: Double): Double
def getDepth(): Int
def getAncestorsPath(): Vector[TreeNode]
def propagateReward(reward: Double) = {
val ancestors = getAncestorsPath()
ancestors foreach { node =>
node.updateMeanReward(reward)
}
}
}
class RootNode(override val rule: Clause) extends TreeNode {
override val isRootNode = true
this.visits = 1 // increment its visits upon generation.
override def getMCTSScore(exploreRate: Double): Double = 0.0
override def getDepth(): Int = 0
override def getAncestorsPath() = Vector[TreeNode]()
def descendToBestChild(exploreRate: Double) = {
var reachedLeaf = false
var bestChild = this.getBestChild(exploreRate)
while (!reachedLeaf) {
if (!bestChild.isLeafNode()) {
bestChild = bestChild.getBestChild(exploreRate)
} else {
reachedLeaf = true
}
}
bestChild
}
}
class InnerNode(override val rule: Clause, val parentNode: TreeNode) extends TreeNode {
override def getMCTSScore(exploreRate: Double) = {
avgReward + exploreRate * Math.sqrt(2 * Math.log(parentNode.visits) / visits)
}
override def getDepth() = {
var reachedRoot = false
var parent = this.parentNode
var depth = 1
while (!reachedRoot) {
parent match {
case _: InnerNode =>
depth = depth + 1
parent = parent.asInstanceOf[InnerNode].parentNode
case _: RootNode => reachedRoot = true
}
}
depth
}
override def getAncestorsPath() = {
var reachedRoot = false
var parent = this.parentNode
var ancestors = Vector[TreeNode]()
while (!reachedRoot) {
parent match {
case _: InnerNode =>
ancestors = ancestors :+ parent
parent = parent.asInstanceOf[InnerNode].parentNode
case _: RootNode =>
ancestors = ancestors :+ parent
reachedRoot = true
}
}
ancestors
}
}
| 3,447 | 25.9375 | 106 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/experiments_/ExpLearner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua.experiments_
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import com.typesafe.scalalogging.LazyLogging
import logic.{Clause, Theory}
import logic.Examples.Example
import oled.mwua.{ExpertAdviceFunctions, StateHandler}
import org.slf4j.LoggerFactory
import scala.util.matching.Regex
/**
* Created by nkatz at 31/3/2019
*/
class ExpLearner[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example],
val writeExprmtResultsTo: String = "") extends LazyLogging {
val learningRate: Double = Globals.sleepingExpertsLearningRate
val receiveFeedbackBias: Double = Globals.sleepingExpertsFeedBackBias
val withInertia: Boolean = Globals.hedgeInertia
val epsilon = 0.9 // used in the randomized version
val randomizedPrediction = false
// If this is false, some non-determinism is introduced (number of mistakes may vary slightly from round to round)
val specializeAllAwakeRulesOnFPMistake = false
// This is either 'winnow' or 'hedge'
val weightUpdateStrategy = "hedge" //"winnow"
val conservativeRuleGeneration = false
// A rule must make this much % of the total FPs before it is specialized
val percentOfMistakesBeforeSpecialize = 0
// have this set to "" for a regular run without an input theory
//val inputTheoryFile = "/home/nkatz/Desktop/theory"
val inputTheoryFile = ""
val inputTheory: List[Clause] = {
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
if (inputTheoryFile == "") {
Nil
} else {
val rules = scala.io.Source.fromFile(inputTheoryFile).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val rulesParsed = rules.map(r => Clause.parse(r))
rulesParsed
}
}
val stateHandler: StateHandler = {
val stateHandler = new StateHandler
if (inputTheory.isEmpty) {
stateHandler
} else {
val (inputInitRules, inputTermRules) = inputTheory.foldLeft(List.empty[Clause], List.empty[Clause]){ (x, y) =>
if (y.head.functor.contains("initiated")) (x._1 :+ y, x._2) else (x._1, x._2 :+ y)
}
stateHandler.ensemble.initiationRules = inputInitRules
stateHandler.ensemble.terminationRules = inputTermRules
stateHandler
}
}
//private val logger = LoggerFactory.getLogger(self.path.name)
private val withec = true
// Control learning iterations over the data
private var repeatFor = inps.repeatFor
// Used to count examples for holdout evaluation
private var exampleCounter = 0
// Local data variable. Cleared at each iteration (in case repfor > 1).
private var data = Iterator[Example]()
// This is optional. A testing set (for holdout evaluation) may not be provided.
private var testingData = Iterator[Example]()
// Counts the number of precessed batches. Used to determine when to
// perform holdout evaluation on the test set. Incremented whenever a
// new batch is fetched (see the getNextBatch() method)
private var batchCounter = 0
private var startTime = System.nanoTime()
private var endTime = System.nanoTime()
// Get the training data from the current inout source
private def getTrainData = trainingDataFunction(trainingDataOptions)
private def getTestingData = testingDataFunction(testingDataOptions)
private def getNextBatch(lleNoise: Boolean = false) = {
this.batchCounter += 1
if (data.isEmpty) {
Example()
} else {
if (!lleNoise) {
data.next()
} else {
val currentBatch = data.next()
val noisyNarrative = {
currentBatch.narrative map { x =>
x.replaceAll("active", "active_1")
}
}
Example(annot = currentBatch.annotation, nar = noisyNarrative, _time = currentBatch.time)
}
}
}
def run() = {
data = getTrainData
if (this.data.isEmpty) {
logger.error(s"Input source ${inps.train} is empty.")
System.exit(-1)
}
var done = false
var out = (0, 0, 0, 0.0, Vector.empty[Double], Vector.empty[Double])
while (!done) {
val nextBatch = getNextBatch(lleNoise = false)
logger.info(s"Processing batch $batchCounter")
if (nextBatch.isEmpty) {
logger.info(s"Finished the data.")
endTime = System.nanoTime()
logger.info("Done.")
//workers foreach(w => w ! PoisonPill)
out = wrapUp()
done = true
} else {
val trueLabels = nextBatch.annotation.toSet
if (inputTheory.isEmpty) {
ExpertAdviceFunctions.process(nextBatch, nextBatch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
receiveFeedbackBias, conservativeRuleGeneration, weightUpdateStrategy, withInertia)
} else {
ExpertAdviceFunctions.process(nextBatch, nextBatch.annotation.toSet, inps,
stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake,
receiveFeedbackBias, conservativeRuleGeneration, weightUpdateStrategy, withInertia, inputTheory = Some(inputTheory))
}
}
}
out
}
def wrapUp() = {
if (trainingDataOptions != testingDataOptions) {
// show the info so far:
wrapUp_NO_TEST()
// and do the test
logger.info("\n\nEvaluating on the test set\n\n")
val _stateHandler = new StateHandler
_stateHandler.ensemble = {
val ensemble = stateHandler.ensemble
val init = ensemble.initiationRules.filter(x => x.body.nonEmpty)
val term = ensemble.terminationRules.filter(x => x.body.nonEmpty)
ensemble.initiationRules = init
ensemble.terminationRules = term
ensemble
}
val testData = testingDataFunction(testingDataOptions)
val _receiveFeedbackBias = 0.0 // Give no supervision for training, we're only testing
testData foreach { batch =>
val trueLabels = batch.annotation.toSet
ExpertAdviceFunctions.process(batch, batch.annotation.toSet, inps,
_stateHandler, trueLabels, learningRate, epsilon, randomizedPrediction,
batchCounter, percentOfMistakesBeforeSpecialize, specializeAllAwakeRulesOnFPMistake, _receiveFeedbackBias,
conservativeRuleGeneration, weightUpdateStrategy)
}
logger.info(s"Prequential error vector:\n${_stateHandler.perBatchError.mkString(",")}")
logger.info(s"Prequential error vector (Accumulated Error):\n${_stateHandler.perBatchError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Total TPs: ${_stateHandler.totalTPs}, Total FPs: ${_stateHandler.totalFPs}, Total FNs: ${_stateHandler.totalFNs}, Total TNs: ${_stateHandler.totalTNs}")
if (_receiveFeedbackBias != 1.0) {
logger.info(s"\nReceived feedback on ${_stateHandler.receivedFeedback} rounds")
}
val tps = _stateHandler.totalTPs
val fps = _stateHandler.totalFPs
val fns = _stateHandler.totalFNs
(tps, fps, fns, 0.0, Vector.empty[Double], Vector.empty[Double])
} else {
wrapUp_NO_TEST()
}
}
def wrapUp_NO_TEST() = {
def show(in: List[Clause]) = {
in.sortBy(x => -x.w_pos).
map(x => x.showWithStats + "\n" + x.refinements.sortBy(x => -x.w_pos).map(x => x.showWithStats).mkString("\n ")).mkString("\n")
}
//logger.info(show(stateHandler.ensemble.initiationRules))
//logger.info(show(stateHandler.ensemble.terminationRules))
logger.info(Theory(stateHandler.ensemble.initiationRules.sortBy(x => -x.w_pos)).showWithStats)
logger.info(Theory(stateHandler.ensemble.terminationRules.sortBy(x => -x.w_pos)).showWithStats)
logger.info(s"Prequential error vector:\n${stateHandler.perBatchError.mkString(",")}")
val accumError = stateHandler.perBatchError.scanLeft(0.0)(_ + _).tail
logger.info(s"Prequential error vector (Accumulated Error):\n${stateHandler.perBatchError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Prequential F1-score:\n${stateHandler.runningF1Score}")
logger.info(s"Average prequential F1-score: ${stateHandler.runningF1Score.sum / stateHandler.runningF1Score.length}")
logger.info(s"Total TPs: ${stateHandler.totalTPs}, Total FPs: ${stateHandler.totalFPs}, Total FNs: ${stateHandler.totalFNs}, Total TNs: ${stateHandler.totalTNs}")
logger.info(s"Total time: ${(endTime - startTime) / 1000000000.0}")
if (randomizedPrediction) {
logger.info(s"\nPredicted with initiation rules: ${stateHandler.predictedWithInitRule} times")
logger.info(s"\nPredicted with terminated rules: ${stateHandler.predictedWithTermRule} times")
logger.info(s"\nPredicted with inertia: ${stateHandler.predictedWithInertia} times")
}
//logger.info(s"Predictions vector:\n${stateHandler.predictionsVector}")
logger.info(s"Total number of rounds: ${stateHandler.totalNumberOfRounds}")
///*
val tps = stateHandler.totalTPs
val fps = stateHandler.totalFPs
val fns = stateHandler.totalFNs
val microPrecision = tps.toDouble / (tps.toDouble + fps.toDouble)
val microRecall = tps.toDouble / (tps.toDouble + fns.toDouble)
val microFscore = (2 * microPrecision * microRecall) / (microPrecision + microRecall)
//println(s"Micro F1-score: $microFscore")
//*/
if (receiveFeedbackBias != 1.0) {
logger.info(s"\nReceived feedback on ${stateHandler.receivedFeedback} rounds")
}
(tps, fps, fns, microFscore, accumError, stateHandler.runningF1Score)
}
}
| 10,789 | 37.673835 | 172 | scala |
OLED | OLED-master/src/main/scala/oled/mwua/experiments_/ExpRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua.experiments_
import java.io.{BufferedWriter, File, FileWriter}
import akka.actor.{ActorSystem, Props}
import app.runutils.{CMDArgs, Globals}
import com.typesafe.scalalogging.LazyLogging
import experiments.caviar.FullDatasetHoldOut.MongoDataOptions
import experiments.caviar.{FullDatasetHoldOut, MeetingTrainTestSets}
import logic.Examples.Example
import oled.mwua.Learner
/**
* Created by nkatz at 2/11/2018
*/
/* NOT USED IN ANYTHING YET!! */
object ExpRunner extends LazyLogging {
def main(args: Array[String]) = {
val learningRates = List(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9)
val feedBackBias = List(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val inertia = List(true, false)
/*
val dataSets = Vector(MeetingTrainTestSets.meeting1, MeetingTrainTestSets.meeting2,
MeetingTrainTestSets.meeting3,MeetingTrainTestSets.meeting4,
MeetingTrainTestSets.meeting5,MeetingTrainTestSets.meeting6,
MeetingTrainTestSets.meeting7,MeetingTrainTestSets.meeting8,
MeetingTrainTestSets.meeting9,MeetingTrainTestSets.meeting10)
*/
val train2 =
Vector("caviar-video-21-meeting-moving", "caviar-video-7", "caviar-video-28-meeting", "caviar-video-25", "caviar-video-30",
"caviar-video-11", "caviar-video-6", "caviar-video-14-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-20-meeting-moving", "caviar-video-13-meeting", "caviar-video-19-meeting-moving",
"caviar-video-12-moving", "caviar-video-1-meeting-moving", "caviar-video-9", "caviar-video-16", "caviar-video-23-moving",
"caviar-video-29", "caviar-video-5", "caviar-video-22-meeting-moving", "caviar-video-18", "caviar-video-4",
"caviar-video-24-meeting-moving", "caviar-video-8", "caviar-video-10", "caviar-video-2-meeting-moving",
"caviar-video-15", "caviar-video-3", "caviar-video-17")
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
logger.error(argsok._2); System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
learningRates.foreach { rate =>
Globals.sleepingExpertsLearningRate = rate
feedBackBias.foreach { bias =>
Globals.sleepingExpertsFeedBackBias = bias
inertia.foreach { withInertia =>
Globals.hedgeInertia = withInertia
val trainingDataOptions =
new MongoDataOptions(dbNames = train2, chunkSize = runningOptions.chunkSize,
targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions = trainingDataOptions
val trainingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val testingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val learner = new ExpLearner(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)
val result = learner.run()
//(Int, Int, Int, Double, Vector[Double])
val (tps, fps, fns, fscore, errorVec, fscoreVec) = (result._1, result._2, result._3, result._4, result._5, result._6)
val msg = s"${runningOptions.targetHLE}, rate: $rate, feedback bias: $bias, " +
s"inertia: $withInertia\ntps: $tps, fps: $fps, fns: $fns, total error: ${fps + fns}, " +
s"fscore: $fscore\nerror vector:\n$errorVec\nprequential " +
s"F1-score vector:\n$fscoreVec\nAverage prequential F1-score:${fscoreVec.sum / fscoreVec.length}\n\n"
println(msg)
val fw = new FileWriter(s"/home/nkatz/Desktop/TPLP-2019-results/${runningOptions.targetHLE}", true)
try {
fw.write(msg)
} finally fw.close()
}
}
}
}
}
}
| 4,596 | 40.790909 | 140 | scala |
OLED | OLED-master/src/main/scala/oled/non_blocking/Dispatcher.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.non_blocking
import akka.actor.{Actor, Props}
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import logic.{Clause, LogicUtils, Theory}
import oled.distributed.Structures.{FinalTheoryMessage, Initiated, Terminated}
import oled.functions.DistributedOLEDFunctions.crossVal
/**
* Created by nkatz on 3/14/17.
*
*
* This actor starts two top-level actors to coordinate learning
* the initiated and the terminated part of the theory respectively.
*
*/
class Dispatcher[T <: InputSource](
val dataOptions: List[(T, T => Iterator[Example])],
val inputParams: RunningOptions,
val tasksNumber: Int,
testingDataOptions: T,
testingDataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
private var counter = tasksNumber
private var initTheory = List[Clause]()
private var termTheory = List[Clause]()
private var initTrainingTime = ""
private var termTrainingTime = ""
private var theory = List[Clause]() // this is for future use with single-predicate learning
private var theoryTrainingTime = ""
private var initTotalMsgNum = 0
private var initTotalMsgSize = 0L
private var termTotalMsgNum = 0
private var termTotalMsgSize = 0L
def updateMessages(m: FinalTheoryMessage, what: String) = {
what match {
case "init" =>
initTotalMsgNum = m.totalMsgNum
initTotalMsgSize = m.totalMsgSize
case "term" =>
termTotalMsgNum = m.totalMsgNum
termTotalMsgSize = m.totalMsgSize
case _ => logger.info("UNKNOWN MESSAGE!")
}
}
def receive = {
case "go" =>
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Initiated)), name = "InitTopLevelActor") ! "go"
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Terminated)), name = "TermTopLevelActor") ! "go"
/*---------------------------------------------------------------------------*/
// For debugging (trying to see if the sub-linear speed-up is due to blocking)
/*---------------------------------------------------------------------------*/
case "go-no-communication" =>
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Initiated)), name = "InitTopLevelActor") ! "go-no-communication"
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Terminated)), name = "TermTopLevelActor") ! "go-no-communication"
case msg: FinalTheoryMessage =>
msg.targetPredicate match {
case x: Initiated =>
this.initTheory = msg.theory
this.initTrainingTime = msg.trainingTime
updateMessages(msg, "init")
case x: Terminated =>
this.termTheory = msg.theory
this.termTrainingTime = msg.trainingTime
updateMessages(msg, "term")
case _ =>
this.theory = msg.theory
this.theoryTrainingTime = msg.trainingTime
//updateMessages(msg)
}
counter -= 1
if (counter == 0) {
logger.info(s"\n\nThe initiated part of the theory is\n${Theory(this.initTheory).showWithStats}\nTraining" +
s" time: $initTrainingTime\nTotal messages: $initTotalMsgNum\nTotal message size: $initTotalMsgSize")
logger.info(s"\n\nThe terminated part of the theory is\n${Theory(this.termTheory).showWithStats}\nTraining" +
s" time: $termTrainingTime\nTotal messages: $termTotalMsgNum\nTotal message size: $termTotalMsgSize")
/*
* Cross-validation...
* */
val merged_ = Theory(this.initTheory ++ this.termTheory)
val compressed = Theory(LogicUtils.compressTheory(merged_.clauses))
/*------------------*/
// DEBUGGING-TESTING
/*------------------*/
//val filtered = Theory(compressed.clauses.filter(x => x.tps > 50))
val filtered = compressed
val data = testingDataFunction(testingDataOptions)
val (tps, fps, fns, precision, recall, fscore) =
crossVal(filtered, data = data, handCraftedTheoryFile = inputParams.evalth, globals = inputParams.globals, inps = inputParams)
val time = Math.max(this.initTrainingTime.toDouble, this.termTrainingTime.toDouble)
val theorySize = filtered.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: $fns\nprecision:" +
s" $precision\nrecall: $recall\nf-score: $fscore\ntraining time: " +
s"$time\ntheory size: $theorySize\n" +
s"Total number of messages: ${initTotalMsgNum + termTotalMsgNum}\n" +
s"Total message size: ${initTotalMsgSize + termTotalMsgSize}")
logger.info(s"\nDone. Theory found:\n ${filtered.showWithStats}")
logger.info(s"Mean time per batch: ${Globals.timeDebug.sum / Globals.timeDebug.length}")
logger.info(s"Total batch time: ${Globals.timeDebug.sum}")
context.system.terminate()
}
}
def showTheory(t: Theory) = {
val showClause = (c: Clause) => {
s"score (${if (c.head.functor == "initiatedAt") "precision" else "recall"}):" +
s"${c.score}, tps: ${c.tps}, fps: ${c.fps}, fns: ${c.fns} Evaluated on: ${c.seenExmplsNum} examples\n$$c.tostring}"
}
t.clauses.map(x => showClause(x)).mkString("\n")
}
/*
def crossVal() = {
val merged_ = Theory(this.initTheory ++ this.termTheory)
val compressed = Theory(LogicUtils.compressTheory(merged_.clauses))
/*------------------*/
// DEBUGGING-TESTING
/*------------------*/
//val filtered = Theory(compressed.clauses.filter(x => x.tps > 50))
val filtered = compressed
val crossValJep = new Jep()
val data = testingDataFunction(testingDataOptions)
val (tps,fps,fns,precision,recall,fscore) = crossVal(filtered, crossValJep, data = data, handCraftedTheoryFile = inps.evalth, globals = inps.globals, inps = inps)
val time = Math.max(this.initTrainingTime.toDouble, this.termTrainingTime.toDouble)
val theorySize = filtered.clauses.foldLeft(0)((x,y) => x + y.body.length + 1)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: $fns\nprecision:" +
s" $precision\nrecall: $recall\nf-score: $fscore\ntraining time: " +
s"$time\ntheory size: $theorySize\n" +
s"Total number of messages: ${initTotalMsgNum+termTotalMsgNum}\n" +
s"Total message size: ${initTotalMsgSize+termTotalMsgSize}")
logger.info(s"\nDone. Theory found:\n ${filtered.showWithStats}")
logger.info(s"Mean time per batch: ${Globals.timeDebug.sum/Globals.timeDebug.length}")
logger.info(s"Total batch time: ${Globals.timeDebug.sum}")
crossValJep.close()
}
*/
}
| 7,433 | 39.846154 | 166 | scala |
OLED | OLED-master/src/main/scala/oled/non_blocking/Node.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.non_blocking
import akka.actor.{Actor, PoisonPill}
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import com.madhukaraphatak.sizeof.SizeEstimator
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.distributed.Structures._
import oled.functions.NonBlockingOLEDFunctions._
import org.slf4j.LoggerFactory
/**
* Created by nkatz on 2/15/17.
*/
/* Represents a processing node in OLED's distributed setting. */
class Node[T <: InputSource](
val otherNodesNames: List[String],
val targetConcept: TargetConcept,
val inputParameters: RunningOptions,
val trainingDataOptions: T,
val trainingDataFunction: T => Iterator[Example]) extends Actor {
private val initorterm = targetConcept match {
case x: Initiated => "initiatedAt"
case x: Terminated => "terminatedAt"
}
// Get the training data from the current database
def getTrainData: Iterator[Example] = trainingDataFunction(trainingDataOptions)
private var data = Iterator[Example]()
private var currentTheory = List[Clause]()
// This variable stores the replies from other nodes in response to a StatsRequest from this node
private var statsReplies = List[StatsReply]()
private var statsRepliesCount = 0
// Control learning iterations over the data
private var repeatFor = inputParameters.repeatFor
/* FOR LOGGING-DEBUGGING */
def showCurrentClauseUUIDs = s"(${this.currentTheory.length}): ${this.currentTheory.map(x => x.uuid).mkString(" ")}"
/* FOR LOGGING-DEBUGGING */
def showCurrentExpansionCandidatesUUIDS =
s"(${getCurrentExpansionCandidates.length}): ${getCurrentExpansionCandidates.map(x => x.uuid).mkString(" ")}"
/* FOR LOGGING-DEBUGGING */
def showAlreadySpecialized = s"(${this.specializedSoFar.length}): ${this.specializedSoFar.mkString(" ")}"
/* LOGGING-DEBUGGING */
val showDebugMsgInLogs = false
/* LOGGING-DEBUGGING */
def showClausesDebugMsg = {
if (showDebugMsgInLogs) {
s"\nCurrent theory contains:" +
s" $showCurrentClauseUUIDs\nExpansion candidates: $showCurrentExpansionCandidatesUUIDS\nAlready specialized: $showAlreadySpecialized\n"
} else { "" }
}
def getCurrentExpansionCandidates = {
this.currentExpansionCandidates.filter(p => !this.specializedSoFar.contains(p.uuid))
}
// Monitor current state (that's just for debugging)
var state = "starting"
// for logging
def showState = s"[in ${this.state} state] "
// for logging
val NORMAL_STATE = "normal"
// for logging
val EXPANSION_NODE_WAITING_STATE = "expansion node waiting"
// for logging
val STATS_REQUEST_SENDER_WAITING_STATE = "stats request sender waiting"
// for logging
val STATS_REQUEST_RECEIVER_WAITING_STATE = "stats request receiver waiting"
// for logging
val EXPANSION_NODE_NON_PRIORITY_STATE = "expansion node non-priority"
// for logging
def logNormalState = this.state = NORMAL_STATE
// This variable stores the uuid's of clauses that have been already specialized.
// When the node finds an expansion candidate, it only proceeds to the necessary actions
// to specialize that candidate, if its uuid is not found in this list. This is to avoid
// a situation when the node infers that a clause C must be specialized, while it has
// also received a similar request for C from another node. In that case, if C gets
// specialized in the other node, there is no point in trying to specialize it again.
private var specializedSoFar = List[String]()
private var currentExpansionCandidates = List[Clause]()
var finishedAndSentTheory = false
/*
* The logger for this class. Getting a logger this way instead of mixin-in the LazyLogging trait allows to
* name the logger after a particular class instance, which is helpful for tracing messages
* between different instances of the same class.
* */
private val slf4jLogger = LoggerFactory.getLogger(self.path.name)
def logger_info(msg: String) = this.slf4jLogger.info(s"$showState $msg $showClausesDebugMsg")
def logger_dubug(msg: String) = this.slf4jLogger.debug(s"$showState $msg $showClausesDebugMsg")
private var messages = List[Long]()
def updateMessages(m: AnyRef) = {
val size = SizeEstimator.estimate(m)
messages = messages :+ size
}
def receive = {
// Start processing data. This message is received from the top level actor
case "go" => start()
case "start-over" =>
logger_info(s"$showState Starting a new training iteration (${this.repeatFor - 1} iterations remaining.)")
start() // re-starts according to the repeatFor parameter
case _: ShutDown => self ! PoisonPill
case p: Ping => logger_info(s"Pinged by ${p.senderName}")
case chunk: Iterator[Example] =>
processNewChunck(chunk)
case result: BatchProcessResult =>
handleBatchResult(result)
case reply: StatsReply =>
handleStatsReply(reply)
case reply: ExpansionReply =>
handleExpansionReply(reply)
if (!this.finishedAndSentTheory) self ! getNextBatch
case nc: NewClauses =>
this.currentTheory = this.currentTheory ++ nc.newClauses
logger_info(s"Received new clauses from ${nc.senderName}")
case request: StatsRequest => handleStatsRequest(request)
case _: TheoryRequestMessage =>
val msgNum = this.messages.length
val msgSize = this.messages.sum
sender ! new NodeTheoryMessage(Theory(this.currentTheory), msgNum, msgSize, self.path.name)
}
def getNextBatch = {
if (data.isEmpty) Iterator[Example]()
else Utils.getNextBatch(data, inputParameters.processBatchBeforeMailBox)
}
def start() = {
this.repeatFor -= 1
logger_info(s"$showState Getting training data from db ${this.inputParameters.train}")
// Get the training data into a fresh iterator
this.data = getTrainData
if (this.data.isEmpty) {
slf4jLogger.error(s"DB ${inputParameters.train} is empty.")
System.exit(-1)
}
// Send the first batch to self
self ! getNextBatch
}
def processNewChunck(chunk: Iterator[Example]) = {
if (!this.finishedAndSentTheory) {
if (chunk.isEmpty) {
logger_info(s"Finished the data")
if (this.repeatFor > 0) {
self ! "start-over"
} else if (this.repeatFor == 0) {
this.finishedAndSentTheory = true
context.parent ! new NodeDoneMessage(self.path.name)
logger_info(s"Sent the theory to the top-level actor")
} else {
throw new RuntimeException("This should never have happened (repeatfor is now negative?)")
}
} else {
self ! processBatch(chunk, this.slf4jLogger)
}
}
}
def handleBatchResult(result: BatchProcessResult) = {
if (result.newClauses.nonEmpty) {
this.currentTheory = this.currentTheory ++ result.newClauses
val copies = result.newClauses.map(x => Utils.copyClause(x))
logger_info(s"Generated new clauses, sending them over...")
Utils.getOtherActors(context, otherNodesNames) foreach { x =>
val cls = new NewClauses(copies, self.path.name)
updateMessages(cls)
x ! cls
}
}
// Handle expansion candidates
if (result.toExpandClauses.nonEmpty) {
this.currentExpansionCandidates = result.toExpandClauses.filter(p => !this.specializedSoFar.contains(p.uuid))
val candidates = getCurrentExpansionCandidates
if (candidates.nonEmpty) {
logger_info(s"Found candidates for expansion: ${candidates.map(x => x.uuid).mkString(" ")} Requesting stats")
// send a StatsRequest msg to all other nodes
val otherNodes = Utils.getOtherActors(context, otherNodesNames)
val x = new StatsRequest(candidates map (_.uuid), self.path.name)
println(s"Msg size: ${SizeEstimator.estimate(x)}")
otherNodes foreach { x =>
val request = new StatsRequest(candidates map (_.uuid), self.path.name)
updateMessages(request)
x ! request
}
this.statsRepliesCount = otherNodes.length // clear the reply counter
this.statsReplies = List[StatsReply]() // clear the reply storage
}
}
// When everything is done, send a request to self to process next batch.
if (!this.finishedAndSentTheory) self ! getNextBatch
}
def handleStatsReply(reply: StatsReply) = {
logger_info(s"Received a StatsReply from node ${reply.sender}: $reply")
this.statsRepliesCount -= 1
this.statsReplies = this.statsReplies :+ reply
if (this.statsRepliesCount == 0) {
// all replies have been received from all nodes.
// Time to decide which candidates will eventually be expanded.
val (delta, ties, minseen) = (inputParameters.delta, inputParameters.breakTiesThreshold, inputParameters.minSeenExmpls)
val checked = getCurrentExpansionCandidates map (clause =>
Utils.expand_?(clause, this.statsReplies, delta, ties, minseen, showState, self.path.name, inputParameters, slf4jLogger, blocking = false))
val (expanded, notExpanded) = checked.foldLeft(List[Clause](), List[Clause]()){ (x, y) =>
val (expandAccum, notExpandAccum) = (x._1, x._2)
val (expandedFlag, clause) = (y._1, y._2)
if (expandedFlag) (expandAccum :+ clause, notExpandAccum) else (expandAccum, notExpandAccum :+ clause)
}
// replace the expanded in the current theory
if (expanded.nonEmpty) {
expanded.foreach { expanded =>
val theoryWithout = this.currentTheory.filter(p => p.uuid != expanded.parentClause.uuid)
/*
if (this.currentTheory.length == theoryWithout.length) {
// then the parent clause of the expanded one is not found in current theory, which is an error...
throw new RuntimeException(s"$showState Cannot find parent clause in current theory")
}
*/
val theoryWith = theoryWithout :+ expanded
// Remember the uuid's of expanded clauses
this.specializedSoFar = this.specializedSoFar :+ expanded.parentClause.uuid
this.currentTheory = theoryWith
}
}
// Clear the currentExpansionCandidates variable
this.currentExpansionCandidates = Nil
// finally, send the reply to all other actors (which are currently in a statsReceiverwaitingState)
val otherNodes = Utils.getOtherActors(context, otherNodesNames)
otherNodes foreach { x =>
val reply = new ExpansionReply(notExpanded, expanded.map(p => Utils.copyClause(p)), self.path.name)
updateMessages(reply)
x ! reply
}
if (!this.finishedAndSentTheory) self ! getNextBatch
}
}
/* Handle a stats request */
def handleStatsRequest(request: StatsRequest) = {
logger_info(s"Received a StatsRequest from node ${request.senderName}")
val statsObject =
(for (uuid <- request.candidatesIds) yield this.currentTheory.find(c => c.uuid == uuid).getOrElse(Clause())
).map(a => a.uuid -> Stats(ClauseStats(a.tps, a.fps, a.fns, a.seenExmplsNum),
a.refinements.map(r => r.uuid -> ClauseStats(r.tps, r.fps, r.fns, r.seenExmplsNum)).toMap)).toMap
val reply = new StatsReply(statsObject, self.path.name)
updateMessages(reply)
Utils.getActorByName(context, request.senderName) ! reply
}
/* Handle an expansion reply */
def handleExpansionReply(reply: ExpansionReply) = {
logger_info(s"Received an ExpansionReply from node ${reply.senderName}")
// We don't do anything for intact clauses. But we need to replace the expanded clauses in the
// current theory.
if (reply.expandedClauses.nonEmpty) {
reply.expandedClauses.foreach { expanded =>
val theoryWithout = this.currentTheory.filter(p => p.uuid != expanded.parentClause.uuid)
/*
if (this.currentTheory.length == theoryWithout.length) {
// then the parent clause of the expanded one is not found in current theory, which is an error...
throw new RuntimeException(s"$showState Cannot find parent clause in current theory")
}
*/
val theoryWith = theoryWithout :+ expanded
// Remember the uuid's of expanded clauses
this.specializedSoFar = this.specializedSoFar :+ expanded.parentClause.uuid
this.currentTheory = theoryWith
}
}
}
/*
*
* Process a small batch of examples. This method returns two lists:
* The first contains all new rules that were created from the input
* batch, while the second list contains all rules that are about to be
* expanded.
* */
def processBatch(exmpls: Iterator[Example], logger: org.slf4j.Logger): BatchProcessResult = {
def filterTriedRules(newRules: List[Clause]) = {
val out = newRules.filter{ newRule =>
val bottomClauses = this.currentTheory.map(x => x.supportSet.clauses.head)
val bottom = newRule.supportSet.clauses.head
!bottomClauses.exists(x => x.thetaSubsumes(bottom) && bottom.thetaSubsumes(x))
}
if (out.length != newRules.length) logger.info("Dropped new clause (repeated bottom clause)")
out
}
val out = utils.Utils.time {
var newTopTheory = Theory(this.currentTheory)
val (newRules_, expansionCandidateRules_) = exmpls.foldLeft(List[Clause](), List[Clause]()) { (x, e) =>
var (newRules, expansionCandidateRules) = (x._1, x._2)
val startNew = newTopTheory.growNewRuleTest(e, initorterm, this.inputParameters.globals)
if (startNew) {
newRules = generateNewRules(newTopTheory, e, initorterm, this.inputParameters.globals, this.otherNodesNames)
// Just to be on the same side...
newRules.filter(x => x.head.functor == this.initorterm)
newTopTheory = Theory(newTopTheory.clauses ++ newRules)
}
if (newTopTheory.clauses.nonEmpty) {
newTopTheory.scoreRules(e, this.inputParameters.globals)
}
for (rule <- newTopTheory.clauses) {
//.filter(p => !this.getCurrentExpansionCandidates.contains(p))) {
val (delta, ties, seen) = (inputParameters.delta, inputParameters.breakTiesThreshold, inputParameters.minSeenExmpls)
if (shouldExpand(rule, delta, ties, seen)) {
expansionCandidateRules = expansionCandidateRules :+ rule
}
}
(newRules, expansionCandidateRules)
}
(newRules_, expansionCandidateRules_)
}
val (newRules, expansionCandidateRules, time) = (out._1._1, out._1._2, out._2)
//Globals.timeDebug = Globals.timeDebug :+ time
if (inputParameters.compressNewRules) {
new BatchProcessResult(filterTriedRules(newRules), expansionCandidateRules)
} else {
new BatchProcessResult(newRules, expansionCandidateRules)
}
}
}
| 15,531 | 40.198939 | 147 | scala |
OLED | OLED-master/src/main/scala/oled/non_blocking/PingActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.non_blocking
import akka.actor.Actor
/**
* Created by nkatz on 3/15/17.
*
* This is just a debugging tool.
* It pings a set of actors to check their state
*
*/
class PingActor(learningActorsNames: List[String]) extends Actor {
var startTime = System.currentTimeMillis()
val actorRefs = Utils.getOtherActors(context, learningActorsNames)
def receive = {
case "go" =>
while (true) {
val now = System.currentTimeMillis()
if (now - startTime > 10000) {
actorRefs.foreach(_ ! "ping")
startTime = System.currentTimeMillis()
}
}
}
}
| 1,320 | 27.717391 | 72 | scala |
OLED | OLED-master/src/main/scala/oled/non_blocking/TopLevelActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.non_blocking
import akka.actor._
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import com.madhukaraphatak.sizeof.SizeEstimator
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.distributed.Structures._
import org.slf4j.LoggerFactory
/**
* Created by nkatz on 2/15/17.
*/
class TopLevelActor[T <: InputSource](
val dataOptions: List[(T, T => Iterator[Example])],
val inputParams: RunningOptions,
val targetConcept: TargetConcept) extends Actor {
import context._
var actorsPoolSize = 0
var nodesCounter = 0
var startTime = 0L
var endTime = 0L
/* This function starts the learning Nodes. */
def getActorsPool() = {
val NodeActorNames = (1 to dataOptions.length).toList map (i => s"Node-$i-${targetConcept.toString}")
val nodeActorsPool = (NodeActorNames zip this.dataOptions) map { node =>
val (nodeName, nodeOptions, nodeDataDunction) = (node._1, node._2._1, node._2._2)
val otherActors = NodeActorNames.filter(_ != nodeName)
context.actorOf(Props(new Node(otherActors, targetConcept, inputParams, nodeOptions, nodeDataDunction)), name = nodeName)
}
nodeActorsPool
}
val actorsPool: List[ActorRef] = getActorsPool()
val actorNames: List[String] = actorsPool.map(x => x.path.name)
var nodeHavingTheSlot = "" // that's only for logging
def getOtherActorNames(actorName: String) = actorNames.filter(name => name != actorName)
def getOtherActorRefs(a: String) = getOtherActorNames(a) map (actorName => context.actorSelection(s"${self.path}/$actorName"))
private var finalTheories = List[Theory]() // these should all be the same
private val logger = LoggerFactory.getLogger(self.path.name)
private var messages = List[Long]()
def updateMessages(m: AnyRef) = {
val size = SizeEstimator.estimate(m)
messages = messages :+ size
}
private var childrenMsgNums = List[Int]()
private var childrenMsgSizes = List[Long]()
def receive = {
case "go" =>
this.actorsPoolSize = actorsPool.length
this.nodesCounter = actorsPool.length
Thread.sleep(4000)
this.startTime = System.nanoTime()
actorsPool foreach (a => a ! "go")
case "go-no-communication" =>
this.actorsPoolSize = actorsPool.length
this.nodesCounter = actorsPool.length
Thread.sleep(4000)
this.startTime = System.nanoTime()
actorsPool foreach (a => a ! "go-no-communication")
//become(replyHandler)
case msg: NodeDoneMessage =>
acceptNewDoneMsg(msg)
case msg: NodeTheoryMessage =>
acceptNewLearntTheory(msg)
}
def acceptNewDoneMsg(msg: NodeDoneMessage) = {
this.actorsPoolSize -= 1
logger.info(s"Node ${msg.sender} is done. ${this.actorsPoolSize} nodes remaining")
if (this.actorsPoolSize == 0) {
logger.info("All processing nodes are done")
val theoryRequest = new TheoryRequestMessage(self.path.name)
this.actorsPool foreach (a => a ! theoryRequest)
}
}
def acceptNewLearntTheory(msg: NodeTheoryMessage) = {
this.nodesCounter -= 1
logger.info(s"Node ${msg.sender} sent:\n${msg.theory.clauses.map(x => x.showWithStats + s"evaluated on ${x.seenExmplsNum} exmpls | refs: ${x.refinements.length}").mkString("\n")}")
this.finalTheories = this.finalTheories :+ msg.theory
this.childrenMsgNums = this.childrenMsgNums :+ msg.msgNum
this.childrenMsgSizes = this.childrenMsgSizes :+ msg.msgSize
if (this.nodesCounter == 0) {
this.endTime = System.nanoTime()
this.actorsPool.foreach(_ ! PoisonPill)
val totalTime = (this.endTime - this.startTime) / 1000000000.0
logger.info(s"Total training time: $totalTime sec")
val totalMsgNum = childrenMsgNums.sum + messages.length
val totalMsgSize = childrenMsgSizes.sum + messages.sum
context.parent ! new FinalTheoryMessage(getFinalTheory(), totalTime.toString, totalMsgNum, totalMsgSize, targetConcept)
}
}
def getFinalTheory() = {
this.finalTheories.head.clauses.foldLeft(List[Clause]()){ (accum, clause) =>
val clauseCopies = this.finalTheories.tail.flatMap(theory => theory.clauses.filter(c => c.uuid == clause.uuid))
if (clauseCopies.length + 1 != this.finalTheories.length) {
logger.info(s"\nCLAUSE\n${clause.tostring} (uuid: ${clause.uuid}) \nIS NOT FOUND IS SOME FINAL THEORY")
}
val sumCounts = clauseCopies.foldLeft(clause.tps, clause.fps, clause.fns, clause.seenExmplsNum) { (x, y) =>
(x._1 + y.tps, x._2 + y.fps, x._3 + y.fns, x._4 + y.seenExmplsNum)
}
clause.tps = sumCounts._1
clause.fps = sumCounts._2
clause.fns = sumCounts._3
clause.seenExmplsNum = sumCounts._4
if (clause.seenExmplsNum > inputParams.minEvalOn && clause.score >= inputParams.pruneThreshold) {
accum :+ clause
} else {
accum
}
}
}
}
| 5,600 | 35.607843 | 184 | scala |
OLED | OLED-master/src/main/scala/oled/non_blocking/Utils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.non_blocking
import java.util.UUID
import akka.actor.{ActorContext, ActorSelection}
import app.runutils.RunningOptions
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoCollection}
import logic.Clause
import logic.Examples.Example
import oled.distributed.Structures.{Stats, StatsReply}
import oled.functions.DistributedOLEDFunctions._
import oled.functions.NonBlockingOLEDFunctions
import utils.DataUtils.DataAsIntervals
/**
* Created by nkatz on 2/15/17.
*/
object Utils {
def getCaviarData(mc: MongoClient, dbName: String, chunkSize: Int): Iterator[List[String]] = {
val collection = mc(dbName)("examples")
collection.find().map(x => Example(x)).grouped(chunkSize).map(x =>
x.foldLeft(List[String]())((z, y) => z ++ y.annotation ++ y.narrative))
}
//, dataSize: Double = Double.PositiveInfinity
/* utility function for retrieving data */
def getDataFromDB(dbName: String, HLE: String, chunkSize: Int,
intervals: DataAsIntervals = DataAsIntervals()): Iterator[Example] = {
// No worry about removing prior annotation from the examples, since in any case inertia
// is not used during learning. Even if a pair is passed where in both times
// there is positive annotation, the first positive example will be covered by
// the initalTime axiom, while the second positive will be covered by abduction (no inertia).
val mc = MongoClient()
val collection = mc(dbName)("examples")
if (intervals.isEmpty) {
//collection.createIndex(MongoDBObject("time" -> 1))
val data = collection.find().sort(MongoDBObject("time" -> 1)).map { x =>
val e = Example(x)
new Example(annot = e.annotation filter (_.contains(HLE)), nar = e.narrative, _time = e.time)
}
val dataChunked = data.grouped(chunkSize)
val dataIterator = dataChunked.map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
dataIterator
} else {
utils.CaviarUtils.getDataFromIntervals(collection, HLE, intervals.trainingSet, chunkSize)
}
}
def intervalsToDB(dbToReadFrom: String, intervals: DataAsIntervals, HLE: String,
chunkSize: Int, withChunking: Boolean = true) = {
val dbToWriteTo = s"d-oled-DB-${UUID.randomUUID()}"
val mongoClient = MongoClient()
val collectionWriteTo = mongoClient(dbToWriteTo)("examples")
val collectionReadFrom = mongoClient(dbToReadFrom)("examples")
println(s"Inserting data to $dbToWriteTo")
for (interval <- intervals.trainingSet) {
val batch = collectionReadFrom.find("time" $gte interval.startPoint $lte interval.endPoint).
sort(MongoDBObject("time" -> 1))
val examples = batch.map(x => Example(x)) //.toList
val HLExmpls = examples map { x =>
val a = x.annotation filter (_.contains(HLE))
new Example(annot = a, nar = x.narrative, _time = x.time)
}
val chunked = if (withChunking) HLExmpls.sliding(chunkSize, chunkSize - 1) else HLExmpls.sliding(HLExmpls.length)
val out = chunked map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
out.foreach{ e =>
val entry = MongoDBObject("time" -> e._time.toInt) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative)
collectionWriteTo.insert(entry)
}
}
dbToWriteTo
}
def getExmplIteratorSorted(collection: MongoCollection) = {
collection.find().sort(MongoDBObject("time" -> 1))
}
def getExmplIteratorShuffle(collection: MongoCollection) = {
}
// Utility function, returns a list of other Node actors
def getOtherActors(context: ActorContext, otherNodesNames: List[String]): List[ActorSelection] = {
otherNodesNames map (actorName => context.actorSelection(s"${context.parent.path}/$actorName"))
}
def getActorByName(context: ActorContext, name: String) = {
context.actorSelection(s"${context.parent.path}/$name")
}
// Utility function, returns a new small example batch for processing
def getNextBatch(data: Iterator[Example], processBatchBeforeMailBox: Int) = {
data.take(processBatchBeforeMailBox)
}
/*
* Decide if a clause will be expanded or not, after taking into account the new counts
* from all nodes. clause is the clause in question, replies is a list of StatsReply objects
* received from all nodes and the remaining parameters are for calculating the hoeffding bound.
* This method returns a (b, c) tuple, where b is true of false, according to whether the input
* clause will be expanded or not and c either the input clause (if b = false) or its best
* specialization (if b = true).
* */
def expand_?(clause: Clause, replies: List[StatsReply], delta: Double,
breakTiesThreshold: Double, minSeenExmpls: Int,
currentNodeState: String, nodeName: String, params: RunningOptions, logger: org.slf4j.Logger,
blocking: Boolean = true) = {
// A StatsReply is a reply from a node. So it should contain stats
// for any requested clause. If a clause id is not found in a reply an exception
// is thrown from r.getClauseStats
val repliesGroupedByNode = (for (r <- replies) yield (r.sender, r.getClauseStats(clause.uuid, blocking = false))).toMap
// update the counts per node for each node, for this clause and for each one of its refinements
repliesGroupedByNode.keys foreach { node =>
updateCountsPerNode(clause, node, repliesGroupedByNode, currentNodeState, nodeName)
}
// Re-check the clause for expansion
if (blocking) {
expandRule(clause, delta, breakTiesThreshold, minSeenExmpls, nodeName, params, logger)
} else {
NonBlockingOLEDFunctions.expandRule(clause, delta, breakTiesThreshold, minSeenExmpls, nodeName, params, logger)
}
}
/*
* Returns the new counts (by subtracting the old ones from the received ones)
* for clause c and for node nodeName. The output is a new stats object with the counts,
* along with nodeName (in order to update c.previousCountsPerNode). The replies map
* is a (k,v) map where k is a node id and v is a stats object sent from node k for clause c.
* */
def updateCountsPerNode(clause: Clause, nodeName: String, replies: Map[String, Stats], currentNodeState: String, currentlyOnNode: String): Unit = {
val receivedStats = replies.getOrElse(nodeName, Stats())
if (receivedStats != Stats()) {
val parentClauseStats = receivedStats.parentStats
val refinementsStats = receivedStats.refinementsStats
clause.countsPerNode(nodeName) = parentClauseStats // update the countsPerNode map
clause.updateTotalCounts(currentlyOnNode) // Update the accumulated counts variables
// just to be on the safe side...
if (refinementsStats.size != clause.refinements.length) {
throw new RuntimeException(s"$currentNodeState Problem with refinements reply!")
}
clause.refinements.foreach{ ref =>
val refStats = refinementsStats.getOrElse(ref.uuid, throw new RuntimeException(s"$currentNodeState Refinement ${ref.uuid} not found in the returned stats map"))
ref.countsPerNode(nodeName) = refStats // update the refinement's countsPerNode map
ref.updateTotalCounts(currentlyOnNode) // Update the accumulated counts variables
}
}
}
def copyClause(c: Clause) = {
def basicopy(clause: Clause) = {
val copy_ = Clause(head = clause.head, body = clause.body, uuid = clause.uuid)
//copy_.uuid = clause.uuid
copy_.tps = clause.tps
copy_.fps = clause.fps
copy_.fns = clause.fns
copy_.seenExmplsNum = clause.seenExmplsNum
copy_.countsPerNode = clause.countsPerNode
//copy_.generatedAtNode = clause.generatedAtNode
// don't copy these, there's no need (nothing changes in the parent clause or the support set) and copying
// it makes it messy to retrieve ids in other nodes
copy_.parentClause = clause.parentClause
copy_.supportSet = clause.supportSet
copy_
}
val copy = basicopy(c)
val refinementsCopy = c.refinements.map(ref => basicopy(ref))
copy.refinements = refinementsCopy
copy
}
}
| 9,221 | 40.540541 | 168 | scala |
OLED | OLED-master/src/main/scala/oled/selftraining/DataBatch.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.selftraining
/**
* Created by nkatz at 24/11/2018
*/
class DataBatch(val pos: Set[String], val neg: Set[String], val unknown: Set[String], unkown: Set[String]) {
}
| 880 | 31.62963 | 108 | scala |
OLED | OLED-master/src/main/scala/oled/selftraining/Learner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.selftraining
import akka.actor.Actor
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import logic.Examples.Example
/**
* Created by nkatz at 24/11/2018
*/
class Learner[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example]) extends Actor {
def receive = {
case exmpl: DataBatch => ???
}
}
| 1,211 | 27.186047 | 72 | scala |
OLED | OLED-master/src/main/scala/oled/selftraining/Master.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.selftraining
import akka.actor.Actor
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import logic.Examples.Example
class Master[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example]) extends Actor {
def receive = {
case "go" => ???
}
}
| 1,153 | 28.589744 | 72 | scala |
OLED | OLED-master/src/main/scala/oled/single_core/Dispatcher.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.single_core
import java.io.File
import akka.actor.{Actor, PoisonPill, Props}
import app.runutils.IOHandling.InputSource
import app.runutils.{Debug, Globals, RunningOptions}
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import logic.{LogicUtils, Theory}
import utils.Implicits._
import oled.functions.SingleCoreOLEDFunctions.crossVal
import oled.weightlearn.EvalAfterWeightLearning
import utils.Utils
/**
* Created by nkatz on 28/2/2016.
*/
class Dispatcher[T <: InputSource](
inps: RunningOptions,
trainingDataOptions: T,
testingDataOptions: T,
trainingDataFunction: T => Iterator[Example],
testingDataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
private var size = inps.globals.MODEHS.size // One process for each target concept.
private var theories = List[(Theory, Double)]()
private var merged = Theory()
private var time = 0.0
private val weightLearning = Globals.glvalues("weight-learning").toBoolean
def receive = {
case "eval" =>
if (!inps.evalth.isFile) {
logger.error(s"${inps.evalth} is not a file."); System.exit(-1)
} else {
println(s"Evaluating theory from ${inps.evalth}")
val data = testingDataFunction(testingDataOptions)
if (!weightLearning) {
val (tps, fps, fns, precision, recall, fscore) = crossVal(merged,
data = data, handCraftedTheoryFile = inps.evalth, globals = inps.globals, inps = inps)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: $fns\nprecision: " +
s"$precision\nrecall: $recall\nf-score: $fscore\ntraining time:" +
s"$time\ntheory size: 0.0")
} else {
//private val mlnClauses = theory.clauses.map(x => x.to_MLNClause())
val mlnClauses = scala.io.Source.fromFile(inps.evalth).getLines.filter(p => !p.startsWith("//") && p.trim != "").toList
val evaluator = new EvalAfterWeightLearning(mlnClauses, data)
val (tps, fps, fns) = evaluator.getCounts()
logger.info(s"\nEvaluation (weight learning):\ntps: $tps\nfps: $fps\nfns: $fns")
}
}
context.system.terminate()
case "start" =>
if (!weightLearning) { // !weightLearning // quick & dirty stuff to run this
if (inps.withEventCalculs) {
context.actorOf(Props(
new TheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "initiated")),
name = s"initiated-learner-${this.##}") ! "go"
context.actorOf(Props(
new TheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "terminated")),
name = s"terminated-learner-${this.##}") ! "go"
} else {
context.actorOf(Props(
new TheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "None")),
name = s"learner-${this.##}") ! "go"
}
} else {
context.actorOf(Props(new woled.Learner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "initiated")), name = s"learner-${this.##}") ! "go"
// This is for running the old version without actual MPE inference
/*
if (! inps.parallelClauseEval) {
context.actorOf(Props(new oled.weightlearn.WeightedTheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "initiated")), name = s"initiated-learner-${this.##}") ! "go"
context.actorOf(Props(new oled.weightlearn.WeightedTheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "terminated")), name = s"terminated-learner-${this.##}") ! "go"
} else {
context.actorOf(Props(new oled.weightlearn.parallel.WeightedTheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "initiated")), name = s"initiated-learner-${this.##}") ! "go"
context.actorOf(Props(new oled.weightlearn.parallel.WeightedTheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, "terminated")), name = s"terminated-learner-${this.##}") ! "go"
}
*/
}
case x: (Theory, Double) =>
theories = theories :+ x
size -= 1
sender ! PoisonPill // kill the child actor
//logger.info(s"Error:\n${Globals.errorProb}")
if (size == 0) {
// merge the theories and do cross-validation
val first = theories.head
val second = if (theories.tail.nonEmpty) theories.tail.head else (Theory(), 0.0)
merged = first._1.clauses ++ second._1.clauses
val theorySize = merged.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
time = Math.max(first._2, second._2)
val data = testingDataFunction(testingDataOptions)
logger.info("Evaluating on the test set")
if (!weightLearning) {
/* THIS MAY TAKE TOO LONG FOR LARGE AND COMPLEX THEORIES!! */
logger.info("Compressing theory...")
val merged_ = Theory(LogicUtils.compressTheory(merged.clauses))
logger.info(s"\nDone. Theory found:\n ${merged_.showWithStats}")
if (inps.saveTheoryTo != "") {
Utils.writeToFile(new File(inps.saveTheoryTo), "overwrite") { p => Vector(merged_.tostring).foreach(p.println) }
}
val (tps, fps, fns, precision, recall, fscore) = crossVal(merged_, data = data, globals = inps.globals, inps = inps)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: " +
s"$fns\nprecision: $precision\nrecall: $recall\nf-score: $fscore\ntraining time: " +
s"$time\ntheory size: $theorySize")
/*
println(s"\ntps: $tps\nfps: $fps\nfns: " +
s"$fns\nprecision: $precision\nrecall: $recall\nf-score: $fscore\ntraining time: " +
s"$time\ntheory size: $theorySize")
*/
//val test = merged_.withTypePreds(inps.globals)
context.system.terminate()
} else { // cross-validation after weight learning with AdaGrad
logger.info(s"\nAll clauses:\n${merged.clauses.map(x => x.to_MLNClause()).mkString("\n")}")
val merged_ = Theory(LogicUtils.compressTheory(merged.clauses))
//val merged_ = Theory(LogicUtils.compressTheory_RemoveSubsumers(merged.clauses))
val _mlnClauses = merged_.clauses.filter(x => x.weight > 0.0 && x.seenExmplsNum >= inps.minEvalOn && x.score >= inps.pruneThreshold)
val theorySize = _mlnClauses.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
//val _mlnClauses = merged_.clauses.filter(x => x.score >= inps.pruneThreshold && x.seenExmplsNum > 2000)
// Keep negative weights
//val _mlnClauses = merged_.clauses.filter(x => x.seenExmplsNum > 2000)
val mlnClausesString = _mlnClauses.map(x => x.to_MLNClause())
val evaluator = new EvalAfterWeightLearning(mlnClausesString, data)
val (tps, fps, fns) = evaluator.getCounts()
val precision = tps.toDouble / (tps.toDouble + fps.toDouble)
val recall = tps.toDouble / (tps.toDouble + fns.toDouble)
val fscore = 2 * precision * recall / (precision + recall)
logger.info(s"\n\nMLN theory (kept clauses):\n${mlnClausesString.mkString("\n")}")
logger.info(s"\n\nKept clauses in ASP format:\n${_mlnClauses.showWithStats}")
logger.info(s"\nWeight learning evaluation:\ntps: $tps\nfps: $fps\nfns: " +
s"$fns\nprecision: $precision\nrecall: $recall\ntheory size: ${}\nf-score: $fscore\ntraining time: $time")
val msg = s"\nMLN theory (kept clauses):\n${mlnClausesString.mkString("\n")}\ntps: $tps\nfps: $fps\nfns: " +
s"$fns\nprecision: $precision\nrecall: $recall\nf-score: $fscore\ntraining time: $time\ntheory size: $theorySize\n"
Utils.writeLine(msg, s"/home/nkatz/Desktop/weight-learning-experiments/weight-learn-results-δ=" +
s"${inps.delta}-ties=${inps.breakTiesThreshold}-adaδ=${inps.adaGradDelta}-adaλ=${inps.adaRegularization}-adaη=" +
s"${inps.adaLearnRate}-winsize=${inps.chunkSize}", "append")
context.system.terminate()
}
}
}
}
| 9,203 | 43.463768 | 237 | scala |
OLED | OLED-master/src/main/scala/oled/single_core/Master.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.single_core
import akka.actor.{Actor, PoisonPill, Props}
import app.runutils.IOHandling.{MongoSource, InputSource}
import app.runutils.RunningOptions
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
/**
* Created by nkatz on 9/14/16.
*/
class Master[T <: InputSource](
inps: RunningOptions,
trainingDataOptions: T,
testingDataOptions: T,
trainingDataFunction: T => Iterator[Example],
testingDataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
def receive = {
case "eval" =>
context.actorOf(Props(
new Dispatcher(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)),
name = s"Dispatcher-Actor-eval-mode") ! "eval"
case "start" =>
context.actorOf(Props(
new Dispatcher(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)),
name = s"Dispatcher-Actor-learning-mode") ! "start"
}
}
| 1,724 | 31.54717 | 114 | scala |
OLED | OLED-master/src/main/scala/oled/single_core/TheoryLearner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.single_core
import akka.actor.Actor
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic.Theory
import org.slf4j.LoggerFactory
import utils.Utils
import oled.functions.SingleCoreOLEDFunctions._
/**
* Created by nkatz on 27/2/2016.
*
*/
/*
*
* This is the old version of the core OLED learner, where (in case we're
* learning with the Event Calculus) initiation and termination rules are
* learnt separately and in parallel.
*
* */
class TheoryLearner[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example],
val targetClass: String) extends Actor {
private val logger = LoggerFactory.getLogger(self.path.name)
private var totalBatchProcessingTime = 0.0
private var totalRuleScoringTime = 0.0
private var totalNewRuleTestTime = 0.0
private var totalCompressRulesTime = 0.0
private var totalExpandRulesTime = 0.0
private var totalNewRuleGenerationTime = 0.0
def receive = {
case "go" => sender ! run
}
val initorterm: String =
if (targetClass == "initiated") "initiatedAt"
else if (targetClass == "terminated") "terminatedAt"
else inps.globals.MODEHS.head.varbed.tostring
//private val withInertia = Globals.glvalues("with-inertia").toBoolean
def run: (Theory, Double) = {
def runOnce(inTheory: Theory): Theory = {
val trainingData = trainingDataFunction(trainingDataOptions)
if (trainingData.isEmpty) {
logger.error(s"DB ${inps.train} is empty.")
System.exit(-1)
}
trainingData.foldLeft(inTheory){ (topTheory, newExample) =>
if (inps.showStats) println(newExample.time)
val res = Utils.time {
val t =
if (Globals.glvalues("with-ec").toBoolean) processExample(topTheory, newExample, targetClass, inps, logger)
else processExampleNoEC(topTheory, newExample, inps, logger)
val th = t._1
totalRuleScoringTime += t._2
totalNewRuleTestTime += t._3
totalCompressRulesTime += t._4
totalExpandRulesTime += t._5
totalNewRuleGenerationTime += t._6
// This is used only when learning with inertia. But I think
// its ok to keep it so that I can print out stats for the current
// joint theory (for debugging).
//if (withInertia) updateGlobalTheoryStore(t, initorterm, inps.globals)
if (Globals.glvalues("with-ec").toBoolean) updateGlobalTheoryStore(th, initorterm, inps.globals)
th
}
if (inps.showStats) logger.info(s"Total batch process time: ${res._2}")
this.totalBatchProcessingTime += res._2
res._1
}
}
logger.info(s"Starting learning for $targetClass")
val _finalTheory = Utils.time{ (1 to inps.repeatFor).foldLeft(Theory())((t, _) => runOnce(t)) }
val (finalTheory, time) = (_finalTheory._1, _finalTheory._2)
logger.info(s"\nTraining time for $targetClass: $time")
val output = inps.withPostPruning match {
case true =>
val data = trainingDataFunction(trainingDataOptions)
reScoreAndPrune(inps, data, finalTheory, initorterm, logger)
case _ =>
// no re-scoring
val pruned = finalTheory.clauses.filter(x => x.score > inps.pruneThreshold && x.seenExmplsNum > inps.minEvalOn)
logger.info(s"\nLearnt hypothesis (non-pruned):\n${finalTheory.showWithStats}")
Theory(pruned)
}
logger.debug(s"\n$targetClass theory found:\n${output.tostring}")
logger.info(s"Total batch processing time: $totalBatchProcessingTime")
logger.info(s"Total rule scoring time: $totalRuleScoringTime")
logger.info(s"Total rule expansion time: $totalExpandRulesTime")
logger.info(s"Total rule compression time: $totalCompressRulesTime")
logger.info(s"Total testing for new rule generation time: $totalNewRuleTestTime")
logger.info(s"Total new rule generation time: $totalNewRuleGenerationTime")
(output, time)
}
}
| 4,927 | 36.052632 | 121 | scala |
OLED | OLED-master/src/main/scala/oled/single_core/WholeTheoryLearner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.single_core
/**
* Created by nkatz on 19/6/2017.
*/
class WholeTheoryLearner {}
/*
class WholeTheoryLearner(override val DB: Database,
override val delta: Double,
override val breakTiesThreshold: Double,
override val pruningThreshold: Double,
override val minSeenExmpls: Double,
val trainingSetSize: Int,
override val repeatFor: Int,
override val chunkSize: Int,
targetClass: String,
val withInertia: Boolean,
val withPostPruning: Boolean,
val onlinePruning: Boolean,
val trainingData: TrainingSet,
override val HLE: String,
override val learningInitWithInertia: Boolean = false,
//handCraftedTheoryFile: String = "",
kernelSet: Theory = Theory(),
globals: Globals)
extends TheoryLearner(DB, delta, breakTiesThreshold, pruningThreshold,
minSeenExmpls, trainingSetSize, repeatFor, chunkSize, targetClass,
withInertia, withPostPruning, onlinePruning, trainingData,
HLE, learningInitWithInertia, kernelSet, globals) {
//override val bottomClauses = super.kernelSet
override def receive = {
case "go" => sender ! this.run
}
override def run: (Theory, Double) = {
def runOnce(inTheory: Theory): Theory = {
val data = getTrainingData
data.foldLeft(inTheory){ (topTheory, newExample) =>
//println(newExample.time)
val res = Utils.time {
this.processExample(topTheory, newExample)
}
res._1
}
}
logger.info(s"Starting learning....")
val _finalTheory = Utils.time{ (1 to repeatFor).foldLeft(Theory())( (t,_) => runOnce(t)) }
val (finalTheory,time) = (_finalTheory._1,_finalTheory._2)
logger.info(s"\nTraining time: $time")
val output = withPostPruning match {
case true =>
//logger.info(s"Starting pruning...")
//logger.info(s"Rescoring...")
//reScore(DB,finalTheory,chunkSize,this.jep,trainingSetSize,targetClass, withInertia)
//val pruned = finalTheory.clauses.filter(x => x.score > pruningThreshold)
//Theory(pruned)
finalTheory
case _ => finalTheory
}
this.jep.close()
(output,time)
}
def processExample(topTheory: Theory, e: Exmpl): Theory = {
var newTopTheory = topTheory
if (this.bottomClauses.clauses.isEmpty){
val startNew = newTopTheory.growNewRuleTestWholeTheories(e, this.jep, globals)
if (startNew) {
val newRules = generateNewRules(topTheory, e)
newTopTheory = topTheory.clauses ++ newRules
}
}
if (newTopTheory.clauses.nonEmpty) {
//val t0 = System.nanoTime()
newTopTheory.scoreRules2(e.exmplWithInertia, this.jep, globals)
//val t1 = System.nanoTime()
//println(s"scoreRules time: ${(t1-t0)/1000000000.0}")
try {
val expanded = expandRules(newTopTheory)
if (onlinePruning) {
pruneRules(expanded)
} else {
expanded
}
} catch {
case z: IndexOutOfBoundsException =>
println(s"top theory:\n ${topTheory.tostring}")
println(e.id)
Theory()
}
} else {
if (this.bottomClauses.clauses.isEmpty) {
newTopTheory
} else {
// generate a top theory from the already constructed bottom clauses
val top = this.bottomClauses.clauses map { x =>
val c = Clause(head=x.head, body = List())
logger.debug(s"Started growing new rule: \n ${c.tostring} from bottom clause: \n ${x.tostring}")
c.addToSupport(x)
c
}
Theory(top)
}
}
}
def generateNewRules(topTheory: Theory, e: Exmpl) = {
val (_, varKernel) = Utils.generateKernel(e.exmplWithInertia.toMapASP, jep = this.jep, learningTerminatedOnly=false)
val bottomTheory = topTheory.clauses flatMap(x => x.supportSet.clauses)
val goodKernelRules =
varKernel.filter(newBottomRule => !bottomTheory.exists(supportRule => newBottomRule.thetaSubsumes(supportRule)))
goodKernelRules map { x =>
val c = Clause(head=x.head, body = List())
logger.debug(s"Started growing new rule: \n ${c.tostring} from bottom clause: \n ${x.tostring}")
//println(x.tostring)
c.addToSupport(x)
c
}
}
def getAverages(parentRule: Clause) = {
val (observedDiff,best,secondBest) = parentRule.meanDiff2
// Note that the seen examples count for each "top" clause and
// each of its refinements is updated at the score method of the Theory class
val epsilon = Utils.hoeffding(delta, parentRule.seenExmplsNum)
val passesTest = if (epsilon < observedDiff) true else false
val tie =
if (observedDiff < epsilon && epsilon < breakTiesThreshold && parentRule.seenExmplsNum >= minSeenExmpls) true
else false
val couldExpand = passesTest || tie
(couldExpand,epsilon,observedDiff,best,secondBest)
}
override def expandRules(topTheory: Theory): Theory = {
//val t0 = System.nanoTime()
val out = topTheory.clauses flatMap { parentRule =>
val (couldExpand, epsilon, observedDiff, best, secondBest) = getAverages(parentRule)
couldExpand match {
case true =>
(best.score > parentRule.score) && (best.score - parentRule.score > epsilon) match { //&& (1.0/best.body.size+1 > 1.0/parentRule.body.size+1) match {
case true =>
val refinedRule = best
logger.info(showInfo(parentRule, best, secondBest, epsilon, observedDiff, parentRule.seenExmplsNum))
refinedRule.seenExmplsNum = 0 // zero the counter
refinedRule.supportSet = parentRule.supportSet // only one clause here
List(refinedRule)
case _ => List(parentRule)
}
case _ => List(parentRule)
}
}
//val t1 = System.nanoTime()
//println(s"expandRules time: ${(t1-t0)/1000000000.0}")
Theory(out)
}
/*
override def reScore(DB: Database, theory: Theory, chunkSize: Int, jep: Jep, trainingSetSize: Int, what: String, withInertia: Boolean) = {
val dataChunks = getTrainingData
theory.clauses foreach (p => p.clearStatistics) // zero all counters before re-scoring
for (x <- dataChunks) {
//println(x.id)
theory.scoreRules(x.exmplWithInertia,jep,globals, postPruningMode = true)
}
logger.debug( theory.clauses map { p => s"score: ${p.score}, tps: ${p.tps}, fps: ${p.fps}, fns: ${p.fns}\n${p.tostring}" } mkString("\n") )
}
*/
}
*/
| 7,511 | 36.748744 | 159 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/AdaGrad.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn
import app.runutils.RunningOptions
import com.typesafe.scalalogging.LazyLogging
import logic.{Clause, Literal}
object AdaGrad extends LazyLogging {
/* Non-empty clauseIds are passed when rules are evaluated in parallel.
* See also MLNClauseHandlingWorker:
*
* AdaGrad.adagrad(groundNetwork, x.clauses, trueGroundingsPerClause.toList, x.clauseIds.toList)
*
* and
*
* solver.infer(groundNetwork, clausesWithUpdatedWeights, x.clauseIds.toList)
* */
def adagrad(
inps: RunningOptions,
groundNetwork: Vector[Literal],
liftedClauses: Vector[Clause],
trueGroundingsPerClause: List[Int],
annotation: Vector[Literal],
correctlyNotTerminated: Vector[Literal],
incorrectlyTerminated: Vector[Literal],
targetClass: String,
clauseIds: List[Int] = Nil): Vector[Clause] = {
val lambda: Double = inps.adaRegularization //0.001 // 0.01 default
val eta: Double = inps.adaLearnRate //1.0 // default
val delta: Double = inps.adaGradDelta //1.0
val enumClauses = clauseIds match {
case Nil => (1 to liftedClauses.length).toList
case _ => clauseIds
}
val solver = new MAPInference
val inferredGroundNetwork = solver.infer(groundNetwork, liftedClauses, clauseIds)
val trueCounts = trueGroundingsPerClause
if (inps.adaLossFunction == "default") {
val inferredCounts = enumClauses map { clauseId =>
inferredGroundNetwork.filter(p => p.derivedFrom == clauseId) count { x =>
// we don't want the true negative counts...
x.mlnTruthValue && !x.isNAF //|| (!x.mlnTruthValue && x.isNAF)
}
}
//logger.info(s"True/Inferred counts:\n$trueCounts\n$inferredCounts")
liftedClauses.zipWithIndex.foreach { case (c, idx) =>
val currentSubgradient = inferredCounts(idx) - trueCounts(idx)
c.subGradient += currentSubgradient * currentSubgradient
val coefficient = eta / (delta + math.sqrt(c.subGradient))
val value = c.weight - coefficient * currentSubgradient
val difference = math.abs(value) - (lambda * coefficient)
if (difference > 0) c.weight = if (value >= 0) difference else -difference
else c.weight = 0.0
}
} else { // custom loss function
val lossVector = getCustomLoss(enumClauses, inferredGroundNetwork, annotation,
correctlyNotTerminated, incorrectlyTerminated, targetClass)
liftedClauses.zipWithIndex.foreach { case (c, idx) =>
// This is exactly the same with the default loss function
// (as above) for the initiation case.
val currentSubgradient =
if (targetClass == "terminatedAt") {
val relatedTps_term = correctlyNotTerminated.filter(p => p.derivedFrom == idx)
lossVector(idx) - relatedTps_term.size
} else lossVector(idx) - trueCounts(idx)
c.subGradient += currentSubgradient * currentSubgradient
val coefficient = eta / (delta + math.sqrt(c.subGradient))
val value = c.weight - coefficient * currentSubgradient
val difference = math.abs(value) - (lambda * coefficient)
if (difference > 0) c.weight = if (value >= 0) difference else -difference
else c.weight = 0.0
}
}
liftedClauses
}
/* It seems that the only reason to use this is for weighting tps, fps, fns.
* A different function for termination is not needed. The way we've designed it,
* fps in termination (instances where a termination clause erroneousely fires)
* correspond to fns in termination for OLED. And since we counts tps and fps here, we're done. */
def getCustomLoss(enumClauses: List[Int], inferredGroundNetwork: Vector[Literal], annotation: Vector[Literal],
correctlyNotTerminated: Vector[Literal], incorrectlyTerminated: Vector[Literal],
targetClass: String) = {
/*
if (targetClass == "terminatedAt") {
val tps_fns_per_clause = getCountsTerminated(enumClauses, inferredGroundNetwork,
correctlyNotTerminated, incorrectlyTerminated)
tps_fns_per_clause map {x =>
val (tps, fns) = (x._1, x._2)
//val recall = if (tps == 0) 0.0 else tps.toDouble/(tps.toDouble+fns.toDouble)
//recall
(tps+fns).toDouble
} // tps - fns
} else { // this works for general concepts to (not necessarily initiatedAt)
val tps_fps_fns_per_clause = getCountsInitiated(enumClauses, annotation, inferredGroundNetwork)
tps_fps_fns_per_clause map {x =>
val (tps, fps) = (x._1, x._2)
//val precision = if (tps == 0) 0.0 else tps.toDouble/(tps.toDouble+fps.toDouble)
//precision
(tps+fps).toDouble
} //tps - fps
}
*/
val tps_fps_fns_per_clause = getCounts(enumClauses, annotation, inferredGroundNetwork)
tps_fps_fns_per_clause map { x =>
val (tps, fps) = (x._1, x._2)
//val precision = if (tps == 0) 0.0 else tps.toDouble/(tps.toDouble+fps.toDouble)
//precision
(tps + fps).toDouble
} //tps - fps
}
/*
def getCountsTerminated(enumClauses: List[Int], inferredGroundNetwork: Vector[Literal],
correctlyNotTerminated: Vector[Literal],
incorrectlyTerminated: Vector[Literal]) = {
def isTrue(inferredAtom: Literal) = { inferredAtom.mlnTruthValue && !inferredAtom.isNAF }
def isTPAtom(relevantCorrectlyNotTerminated: Set[String], inferredAtom: Literal) = {
!isTrue(inferredAtom) && relevantCorrectlyNotTerminated.contains(inferredAtom.tostring_mln)
}
def isFNAtom(relevantIncorrectlyTerminated: Set[String], inferredAtom: Literal) = {
isTrue(inferredAtom) && relevantIncorrectlyTerminated.contains(inferredAtom.tostring_mln)
}
val tps_fns_per_clause = enumClauses map { clauseId =>
val relevantCorrectlyNotTerminated = correctlyNotTerminated.
filter(p => p.derivedFrom == clauseId).map(x => x.tostring_mln).toSet
val relevantIncorrectlyTerminated = incorrectlyTerminated.
filter(p => p.derivedFrom == clauseId).map(x => x.tostring_mln).toSet
inferredGroundNetwork.filter(p => p.derivedFrom == clauseId).foldLeft(0,0){ (counts, inferredAtom) =>
if (isTPAtom(relevantCorrectlyNotTerminated, inferredAtom)) (counts._1 + 1, counts._2)
else if (isFNAtom(relevantIncorrectlyTerminated , inferredAtom)) (counts._1, counts._2 + 1)
else (counts._1, counts._2) // we don't count anything else.
}
}
println(s"(tps, fns): $tps_fns_per_clause")
tps_fns_per_clause
}
*/
def getCounts(enumClauses: List[Int], annotation: Vector[Literal],
inferredGroundNetwork: Vector[Literal]) = {
def isTrue(inferredAtom: Literal) = { inferredAtom.mlnTruthValue && !inferredAtom.isNAF }
def isTPAtom(annotation: Set[String], inferredAtom: Literal) = {
isTrue(inferredAtom) && annotation.contains(inferredAtom.tostringMLN)
}
def isFPAtom(annotation: Set[String], inferredAtom: Literal) = {
isTrue(inferredAtom) && !annotation.contains(inferredAtom.tostringMLN)
}
def isFNAtom(annotation: Set[String], inferredAtom: Literal) = {
!isTrue(inferredAtom) && annotation.contains(inferredAtom.tostringMLN)
}
val tps_fps_fns_per_clause = enumClauses map { clauseId =>
val relevantAnnotationAtoms = annotation.filter(p => p.derivedFrom == clauseId).map(x => x.tostringMLN).toSet
inferredGroundNetwork.filter(p => p.derivedFrom == clauseId).foldLeft(0, 0, 0){ (counts, inferredAtom) =>
if (isTPAtom(relevantAnnotationAtoms, inferredAtom)) {
(counts._1 + 1, counts._2, counts._3)
} else if (isFPAtom(relevantAnnotationAtoms, inferredAtom)) {
(counts._1, counts._2 + 1, counts._3)
} else if (isFNAtom(relevantAnnotationAtoms, inferredAtom)) {
(counts._1, counts._2, counts._3 + 1)
} else {
(counts._1, counts._2, counts._3) // we don't count true negatives.
}
}
}
println(s"(tps, fps, fns): $tps_fps_fns_per_clause")
tps_fps_fns_per_clause
}
}
| 8,877 | 36.778723 | 115 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/Auxil.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn
import logic.Examples.Example
import logic.{Clause, Constant, Literal}
object Auxil {
def updateClauseStats(clause: Clause, uuidsToRuleIdsMap: Map[String, Int],
inferredTrue: Vector[Literal], _actuallyTrue: Vector[Literal],
incorrectlyTerminated: Vector[Literal], correctlyNotTerminated: Vector[Literal],
clausesWithUpdatedWeights: scala.Vector[Clause], targetClass: String) = {
if (targetClass == "initiatedAt" || targetClass == "terminatedAt") {
val clauseId = uuidsToRuleIdsMap(clause.uuid)
val inferredTrueByThisClause = inferredTrue.filter(p => p.derivedFrom == clauseId).map(x => x.tostringMLN).toSet
val actuallyTrue = _actuallyTrue.filter(p => p.derivedFrom == clauseId).map(x => x.tostringMLN).toSet
val tps =
if (targetClass == "terminatedAt") {
val tpsCrisp = correctlyNotTerminated.filter(p => p.derivedFrom == clauseId).map(x => x.tostringMLN).toSet
tpsCrisp.diff(inferredTrueByThisClause).size
} else {
inferredTrueByThisClause.intersect(actuallyTrue).size
}
val fps = inferredTrueByThisClause.diff(actuallyTrue).size
val fns =
if (targetClass == "terminatedAt") {
val fnsCrisp = incorrectlyTerminated.filter(p => p.derivedFrom == clauseId).map(x => x.tostringMLN).toSet
inferredTrueByThisClause.intersect(fnsCrisp).size
} else {
actuallyTrue.diff(inferredTrueByThisClause).size
}
clause.tps += tps
clause.fps += fps
clause.fns += fns
val updtWeightCl = clausesWithUpdatedWeights.find(c => c.uuid == clause.uuid).
getOrElse(throw new RuntimeException(s"Cannot find clause ${clause.uuid} in the updated weights clause vector returned from AdaGrad."))
clause.weight = updtWeightCl.weight
clause.subGradient = updtWeightCl.subGradient
/*
logger.info(s"\nClause:\n${clause.tostring}\ntps: ${clause.tps} | fps: ${clause.fps} |" +
s" fns: ${clause.fns} | seen: ${clause.seenExmplsNum} | mln-weight: ${clause.mlnWeight}" +
s" | sungradient: ${clause.subGradient}")
*/
} else { // for termination conditions???
}
}
def debug(groundNetwork: Array[Literal]) = {
groundNetwork.sortBy(x => (x.terms(2).name.split("_")(1).toInt,
x.terms(1).name.toInt, x.terms.head.name.split("_")(1), x.terms.head.name.split("_")(1)))
.map(z => z.tostringMLN).mkString("\n")
}
def getAnnotation(e: Example, enumClauses: List[Int]) = {
e.annotation.flatMap { x =>
val p = Literal.parseWPB2(x)
val functor = "initiatedAt"
val terms = p.terms.take(p.terms.length - 1)
val time = p.terms.last.name.toInt - 1
val p1 = enumClauses map { clauseId =>
val a = Literal(predSymbol = functor, terms = terms ++ List(Constant(time.toString), Constant(s"ruleId_$clauseId")))
Literal.toMLNFlat(a)
}
p1
}
}
/* Converts a set of CNF clauses learned by OSLa to rules. For example:
*
* 0.410215389776 HoldsAt(meet(x4, x1),t1) v !Happens(inactive(x1),t0) v !Happens(walking(x4),t0) v !Next(t1,t0) v !Close(x1,x4,24,t0)
*
* turns into
*
* 0.410215389776 InitiatedAt(meeting(x4,x1),t0) :- HappensAt(inactive(x1),t0) ^ HappensAt(walking(x4),t0) ^ Close(x1,x4,24,t0)
*
* */
def cnfToRules = {
def cnfClauseToRule(clause: String) = {
val noOr = clause.replaceAll(", ", ",").replaceAll(" v ", " ").split(" ").map(x => x.trim).filter(x => x != "")
val weight = noOr.take(1).head
val atoms = noOr.drop(1)
val (head, body) = atoms.foldLeft(Vector.empty[String], Vector.empty[String]) { (accum, atom) =>
if (atom.contains("HoldsAt")) (accum._1 :+ atom, accum._2) else (accum._1, accum._2 :+ atom)
}
val _head =
if (head.head.startsWith("!")) {
head.head.replaceAll("!", "").replaceAll("HoldsAt", "TerminatedAt").replaceAll("t1", "t0")
} else {
head.head.replaceAll("HoldsAt", "InitiatedAt").replaceAll("t1", "t0")
}
val _body = body.filter(x => !x.contains("Next")).map { x =>
if (x.startsWith("!")) x.replaceAll("!", "")
else "!" + x
}.mkString(" ^ ")
s"$weight ${_head} :- ${_body}".
replaceAll("meet", "meeting").replaceAll("enter", "appear").
replaceAll("exit", "disappear").replaceAll("Happens", "HappensAt")
}
scala.io.Source.fromFile("/home/nkatz/dev/learned.mln").
getLines.filter(x => x != "").toVector.map(x => cnfClauseToRule(x)).mkString("\n")
}
}
| 5,336 | 37.121429 | 143 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/EvalAfterWeightLearning.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn
import logic.Examples.Example
import logic.{Constant, Literal}
import utils.Utils
import scala.io.Source
import scala.sys.process._
class EvalAfterWeightLearning(val mlnClauses: List[String], val testData: Iterator[Example]) {
private val cwd = System.getProperty("user.dir")
private val scriptsPath = if (cwd.contains("scripts")) cwd + "/weight-learn-eval-meet" else cwd + "/scripts/weight-learn-eval-meet"
//private val scriptsPath = if (cwd.contains("scripts")) cwd+"/weight-learn-eval-move" else cwd+"/scripts/weight-learn-eval-move"
private val bkFile = scriptsPath + "/bk.mln"
private val resultsFile = scriptsPath + "/results"
private val inferScript = scriptsPath + "/infer.sh"
private val compileScript = scriptsPath + "/compile.sh"
private val compiled = scriptsPath + "/compiled.mln"
private val evidenceFile = scriptsPath + "/evidence.db"
private val domainFile = scriptsPath + "/domain.lp"
private val bk = Source.fromFile(bkFile).getLines.
toList.takeWhile(line => line != "// LEARNT RULES FROM HERE!") ++
List("// LEARNT RULES FROM HERE!") ++ mlnClauses
Utils.writeLine(bk.mkString("\n"), bkFile, "overwrite")
/* Compile the MLN. This must take place just once for all testing data. */
private val command1 = Seq(compileScript, bkFile, compiled).mkString(" ")
private val res1 = command1 !!
def getCounts() = {
testData.foldLeft(0, 0, 0) { (accum, testBatch) =>
val (_tps, _fps, _fns) = (accum._1, accum._2, accum._3)
/* Get the domain signatures for events and fluents */
val (sigs, nexts) = getEventFluentSignatures(testBatch).
partition(p => p.startsWith("event") || p.startsWith("fluent"))
val signatures = sigs.map { atom =>
val parsed = Literal.parseWPB2(atom)
val compound = {
val innerSignatureAtom = parsed.terms.head.asInstanceOf[Literal]
// All inner terms of the signature atom should be constants,
// otherwise something's wrong...
Literal(predSymbol = innerSignatureAtom.predSymbol,
terms = innerSignatureAtom.terms.map(x => Constant(x.name.capitalize))).tostring
}
val flattened = Literal.toMLNFlat(parsed).terms.head.tostring
s"$flattened = $compound"
}
val nextAtoms = nexts.toVector.map { x => toMLNFormat(x) }
val _narrativeToMLN = testBatch.narrative.toVector.map { x => toMLNFormat(x) }
val (__narrativeToMLN, startTimePred) = _narrativeToMLN.partition(x => !x.startsWith("Starttime"))
val narrativeToMLN = __narrativeToMLN :+ startTimePred.head.replace("Starttime", "StartTime")
val evidence = (signatures.toVector ++ Vector("\n\n") ++ narrativeToMLN ++ nextAtoms).mkString("\n")
Utils.writeLine(evidence, evidenceFile, "overwrite")
/* Run MAP inference */
val command2 = Seq(inferScript, compiled, evidenceFile, resultsFile).mkString(" ")
val res2 = command2 !!
/* Read off the inference results */
val inferredAtoms = Source.fromFile(resultsFile).getLines.
filter(p => p.startsWith("HoldsAt") && p.split(" ")(1) == "1").map(x => x.split(" ")(0)).toSet
val annotationAtoms = testBatch.annotation.map { x => toMLNFormat(x) }.toSet
/* Calculate tps, fps, fns */
val tps = inferredAtoms.intersect(annotationAtoms).size
val fps = inferredAtoms.diff(annotationAtoms).size
val fns = annotationAtoms.diff(inferredAtoms).size
println(tps, fps, fns)
(_tps + tps, _fps + fps, _fns + fns)
}
}
/* Utility functions */
def toMLNFormat(x: String) = {
val parsed = Literal.parseWPB2(x)
Literal.toMLNFlat(parsed).tostringMLN
}
def getEventFluentSignatures(e: Example) = {
val all = (e.narrativeASP ++ List(s"""#include "$domainFile".""")).mkString("\n")
val f = Utils.getTempFile("ground", ".lp")
Utils.writeLine(all, f.getCanonicalPath, "overwrite")
val cores = Runtime.getRuntime.availableProcessors
val command = Seq("clingo", f.getCanonicalPath, "-Wno-atom-undefined", s"-t$cores", "--verbose=0").mkString(" ")
val result = command.lineStream_!
val results = result.toVector
val atoms = results(0)
val status = results(1)
if (status == "UNSATISFIABLE") throw new RuntimeException("UNSATISFIABLE program!")
atoms.split(" ")
}
}
| 5,075 | 37.454545 | 133 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/MAPInference.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn
import app.runutils.Debug
import com.typesafe.scalalogging.LazyLogging
import logic.{Clause, Literal}
import optimus.algebra.AlgebraOps._
import optimus.algebra.Expression
import optimus.optimization._
import optimus.optimization.enums.{PreSolve, SolverLib}
import optimus.optimization.model.MPFloatVar
class MAPInference extends LazyLogging {
implicit val problem = MPModel(SolverLib.LpSolve)
/* Non-empty clauseIds are passed when rules are evaluated in parallel.
* See also MLNClauseHandlingWorker:
*
* AdaGrad.adagrad(groundNetwork, x.clauses, trueGroundingsPerClause.toList, x.clauseIds.toList)
*
* and
*
* solver.infer(groundNetwork, clausesWithUpdatedWeights, x.clauseIds.toList)
* */
def infer(
groundNetwork: Vector[Literal],
liftedClauses: Vector[Clause], clauseIds: List[Int] = Nil): Vector[Literal] = {
val enumClauses = clauseIds match {
case Nil => (1 to liftedClauses.length).toList
case _ => clauseIds
}
val idsToRuleIdsMap = (enumClauses zip liftedClauses).toMap
val sTranslation = System.currentTimeMillis()
var literalLPVars = Map.empty[Int, MPFloatVar]
var expressions: List[Expression] = Nil
groundNetwork.zipWithIndex.foreach { case (_, idx) =>
literalLPVars += idx -> MPFloatVar(s"y$idx", 0, 1)
}
groundNetwork.zipWithIndex.foreach { case (lit, idx) =>
// Literal weight:
val weight = idsToRuleIdsMap(lit.derivedFrom).weight
val floatVar = literalLPVars(idx)
if (!lit.isNAF && weight != 0) expressions ::= weight * floatVar
if (lit.isNAF) add((1 - floatVar) >:= 1)
}
val eTranslation = System.currentTimeMillis()
//logger.info("Translation time: " + (eTranslation - sTranslation))
// Step 4: Optimize function subject to the constraints introduced
val solveTimed = utils.Utils.time{
maximize(sum(expressions))
start(PreSolve.CONSERVATIVE)
release()
}
//logger.info("Solver time: " + solveTimed._2)
Debug.totalILPSolverTime += solveTimed._2
var nonIntegralSolutionsCounter = 0
var fractionalSolutions = Vector.empty[Int]
for ((id, lpVar) <- literalLPVars) {
val value = lpVar.value.getOrElse {
logger.error(s"There is no solution for variable '${lpVar.symbol}'")
sys.exit()
}
val normalisedValue = if (value > 0.99) 1.0 else value
if (normalisedValue != 0.0 && normalisedValue != 1.0) {
nonIntegralSolutionsCounter += 1
fractionalSolutions +:= id
} else {
val currentAtom = groundNetwork(id)
currentAtom.mlnTruthValue = if (normalisedValue == 0) false else true
}
}
val sRoundUp = System.currentTimeMillis()
if (nonIntegralSolutionsCounter > 0) {
for (i <- fractionalSolutions.indices) {
val id = fractionalSolutions(i)
val currentAtom = groundNetwork(id)
val weight = idsToRuleIdsMap(currentAtom.derivedFrom).weight
if (currentAtom.mlnTruthValue && !currentAtom.isNAF && weight >= 0)
currentAtom.mlnTruthValue = true
else if (currentAtom.mlnTruthValue && !currentAtom.isNAF && weight < 0)
currentAtom.mlnTruthValue = false
else if (!currentAtom.mlnTruthValue && !currentAtom.isNAF && weight >= 0)
currentAtom.mlnTruthValue = false
else if (!currentAtom.mlnTruthValue && !currentAtom.isNAF && weight < 0)
currentAtom.mlnTruthValue = true
else if (currentAtom.isNAF) currentAtom.mlnTruthValue = false
/* else if (currentAtom.mlnTruthValue && currentAtom.isNAF && weight >= 0)
currentAtom.mlnTruthValue = false
else if (currentAtom.mlnTruthValue && currentAtom.isNAF && weight < 0)
currentAtom.mlnTruthValue = true
else if (!currentAtom.mlnTruthValue && !currentAtom.isNAF && weight >= 0)
currentAtom.mlnTruthValue = false
else if (!currentAtom.mlnTruthValue && !currentAtom.isNAF && weight < 0)
currentAtom.mlnTruthValue = true
else if (!currentAtom.mlnTruthValue && currentAtom.isNAF && weight >= 0)
currentAtom.mlnTruthValue = true
else if (!currentAtom.mlnTruthValue && currentAtom.isNAF && weight < 0)
currentAtom.mlnTruthValue = false*/
}
}
val eRoundUp = System.currentTimeMillis()
//logger.info("Roundup time: " + (eRoundUp - sRoundUp))
groundNetwork
}
}
| 5,163 | 33.891892 | 98 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/parallel/IO.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn.parallel
import akka.actor.ActorRef
import app.runutils.RunningOptions
import logic.Examples.Example
import logic.{Clause, Literal}
object IO {
class MLNClauseHandlingInput(val clauses: Vector[Clause], val clauseIds: Vector[Int], val example: Example,
val inps: RunningOptions, val targetClass: String)
class MLNClauseHandlingOutput(val inferredTrue: Vector[Literal], val actuallyTrue: Vector[Literal],
val incorrectlyTerminated: Vector[Literal], val correctlyNotTerminated: Vector[Literal],
val clausesWithUpdatedWeights: Vector[Clause], val totalExampleCount: Int)
/* The splitEvery parameter is used to split clauses into smaller batches.
* Workers is a list of actors (as many as the available cores) that have already been started. */
class MLNClauseHandingMasterInput(val clauses: Vector[Clause], val example: Example,
val inps: RunningOptions, val targetClass: String,
val splitEvery: Int, val workers: Vector[ActorRef])
class NodeDoneMessage(val sender: String)
class FinishedBatch
class TheoryRequestMessage
}
| 1,796 | 37.234043 | 109 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/parallel/MLNClauseEvalMaster.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn.parallel
import akka.actor.{Actor, ActorRef}
import app.runutils.RunningOptions
import logic.Examples.Example
import logic.{Clause, Literal}
import oled.weightlearn.parallel.IO.{MLNClauseHandingMasterInput, MLNClauseHandlingInput, MLNClauseHandlingOutput}
import org.slf4j.LoggerFactory
class MLNClauseEvalMaster(inps: RunningOptions, targetClass: String) extends Actor {
//private val logger = LoggerFactory.getLogger(self.path.name)
private var counter = 0
private var resultsVector = Vector.empty[MLNClauseHandlingOutput]
private var clausesZippedWithIndex: Iterator[(Vector[Clause], Vector[Int])] = Iterator[(Vector[Clause], Vector[Int])]()
private var example = Example()
private var workers = Vector.empty[ActorRef]
def reset() = {
this.counter = 0
this.resultsVector = Vector.empty[MLNClauseHandlingOutput]
this.clausesZippedWithIndex = Iterator[(Vector[Clause], Vector[Int])]()
this.example = Example()
this.workers = Vector.empty[ActorRef]
}
def receive = {
case x: MLNClauseHandingMasterInput =>
reset()
example = x.example
val clauseBatches = x.clauses.grouped(x.splitEvery).toVector
val clauseIds = (1 to x.clauses.length).toVector.grouped(x.splitEvery).toVector
val zipped = clauseBatches zip clauseIds
/*
if (workers.length > clauseBatches.length) {
throw new RuntimeException("Workers > clause batches. I need to handle this dynamically...")
System.exit(-1)
}
*/
workers = x.workers.take(clauseBatches.length)
this.counter = zipped.length
this.clausesZippedWithIndex = zipped.toIterator
// Send the first batches to the worker actors
workers foreach { worker =>
val batch = clausesZippedWithIndex.next()
val (clauses, ids) = (batch._1, batch._2)
val workerInput = new MLNClauseHandlingInput(clauses, ids, example, inps, targetClass)
worker ! workerInput
}
// send clause batches to workers in a round-robin manner
/*
var i = 0
zipped foreach { pair =>
val (clauseBatch, batchIds) = (pair._1, pair._2)
if ( i == workers.length) i = 0
val worker = workers(i)
val input = new MLNClauseHandlingInput(clauseBatch, batchIds, x.example, x.inps, x.targetClass, x.jep)
worker ! input
i += 1
}
*/
case x: MLNClauseHandlingOutput =>
counter -= 1
resultsVector = resultsVector :+ x
//logger.info(s"Remaining: $counter")
if (counter == 0) {
// check the total examples count, just to be on the safe side.
// All counts returned by each worker should be equal.
val (totalExmplCount, allCountsEqual) =
resultsVector.foldLeft(resultsVector.head.totalExampleCount, true) { (a, y) =>
val (previousCount, areCountsEqualSoFar) = (a._1, a._2)
val currentExmplCount = y.totalExampleCount
(currentExmplCount, areCountsEqualSoFar && previousCount == currentExmplCount)
}
if (!allCountsEqual) {
//val stop = "stop"
throw new RuntimeException("Example counts returned from multiple workers are not equal.")
System.exit(-1)
}
val (inferredTrue, actuallyTrue, incorrectlyTerminated, correctlyNotTerminated, clausesWithUpdatedWeights) =
this.resultsVector.foldLeft(Vector.empty[Literal], Vector.empty[Literal], Vector.empty[Literal],
Vector.empty[Literal], Vector.empty[Clause]) { (accum, y) =>
val _inferredTrue = accum._1 ++ y.inferredTrue
val _actuallyTrue = accum._2 ++ y.actuallyTrue
val _incorrectlyTerminated = accum._3 ++ y.incorrectlyTerminated
val _correctlyNotTerminated = accum._4 ++ y.correctlyNotTerminated
val _clausesWithUpdatedWeights = accum._5 ++ y.clausesWithUpdatedWeights
(_inferredTrue, _actuallyTrue, _incorrectlyTerminated, _correctlyNotTerminated, _clausesWithUpdatedWeights)
}
context.parent ! new MLNClauseHandlingOutput(inferredTrue,
actuallyTrue, incorrectlyTerminated, correctlyNotTerminated,
clausesWithUpdatedWeights, totalExmplCount) // the parent actor here should be a WeightedTheoryLearner
} else {
// send the next batch to the sender for processing
if (clausesZippedWithIndex.nonEmpty) {
val nextBatch = clausesZippedWithIndex.next()
val (clauses, ids) = (nextBatch._1, nextBatch._2)
val workerInput = new MLNClauseHandlingInput(clauses, ids, example, inps, targetClass)
sender ! workerInput
}
}
}
}
| 5,535 | 36.659864 | 155 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/parallel/MLNClauseEvalWorker.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn.parallel
import akka.actor.Actor
import com.typesafe.scalalogging.LazyLogging
import logic.Literal
import oled.functions.WeightLearningFunctions._
import oled.weightlearn.parallel.IO.{MLNClauseHandlingInput, MLNClauseHandlingOutput}
import oled.weightlearn.{AdaGrad, MAPInference}
import org.slf4j.LoggerFactory
import utils.Utils
class MLNClauseEvalWorker extends Actor {
private val logger = LoggerFactory.getLogger(self.path.name)
def receive = {
case x: MLNClauseHandlingInput => sender ! process(x)
}
def process(x: MLNClauseHandlingInput) = {
val ((groundNetwork, trueGroundingsMap, totalExmplCount, annotationMLN, incorrectlyTerminated, correctlyNotTerminated), groundingTime) = {
val timed = Utils.time{ getGroundTheory(x.clauses, x.example, x.inps, x.targetClass, x.clauseIds.toList) }
(timed._1, timed._2)
}
val trueGroundingsPerClause = x.clauseIds map (clauseId => trueGroundingsMap(clauseId).sum)
val (clausesWithUpdatedWeights, adagradTime) = {
val timed = Utils.time{
AdaGrad.adagrad(x.inps, groundNetwork, x.clauses,
trueGroundingsPerClause.toList, annotationMLN,
correctlyNotTerminated, incorrectlyTerminated, x.targetClass, x.clauseIds.toList)
}
(timed._1, timed._2)
}
// Perform inference
val solver = new MAPInference
val (newInferredGroundNetwork, mapInferenceTime): (Vector[Literal], Double) = {
val timed = Utils.time { solver.infer(groundNetwork, clausesWithUpdatedWeights, x.clauseIds.toList) }
(timed._1, timed._2)
}
// Atoms that inferred as true:
val inferredTrue = newInferredGroundNetwork.filter(x => x.mlnTruthValue)
val actuallyTrue = annotationMLN
new MLNClauseHandlingOutput(inferredTrue, actuallyTrue, incorrectlyTerminated,
correctlyNotTerminated, clausesWithUpdatedWeights, totalExmplCount)
}
}
| 2,661 | 36.492958 | 142 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/parallel/Test.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn.parallel
import app.runutils.RunningOptions
import logic.Examples.Example
import logic.{Literal, Theory}
import oled.weightlearn.{Auxil, MAPInference}
import utils.Utils
import oled.functions.WeightLearningFunctions.getGroundTheory
object Test extends App {
println(Auxil.cnfToRules)
def predictSate(topTheory: Theory, e: Example, inps: RunningOptions, targetClass: String) = {
val clauses = topTheory.clauses.map { topClause =>
val bestRef = topClause.refinements.sortBy(x => -x.weight).head
if (topClause.weight > bestRef.weight) topClause else bestRef
}
val ((groundNetwork, trueGroundingsMap, totalExmplCount, annotationMLN, incorrectlyTerminated, correctlyNotTerminated), groundingTime) = {
val timed = Utils.time{ getGroundTheory(clauses.toVector, e, inps, targetClass) }
(timed._1, timed._2)
}
// Perform inference
val solver = new MAPInference
val (inferredGroundNetwork, mapInferenceTime): (Vector[Literal], Double) = {
val timed = Utils.time { solver.infer(groundNetwork, clauses.toVector) }
(timed._1, timed._2)
}
val inferredTrue = inferredGroundNetwork.filter(x => x.mlnTruthValue)
val actuallyTrue = annotationMLN
(inferredTrue, actuallyTrue, incorrectlyTerminated, correctlyNotTerminated, clauses, totalExmplCount)
}
}
| 2,052 | 34.396552 | 142 | scala |
OLED | OLED-master/src/main/scala/oled/weightlearn/parallel/WeightedTheoryLearner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.weightlearn.parallel
import akka.actor.{Actor, ActorRef, Props}
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic.{Clause, Constant, Literal, Theory}
import oled.single_core.TheoryLearner
import org.slf4j.LoggerFactory
import utils.{ASP, Utils}
import oled.functions.SingleCoreOLEDFunctions._
import utils.Implicits._
import oled.functions.WeightLearningFunctions._
import oled.weightlearn.{Auxil, MAPInference}
import oled.weightlearn.parallel.IO.{FinishedBatch, MLNClauseHandingMasterInput, MLNClauseHandlingOutput, NodeDoneMessage, TheoryRequestMessage}
import woled.WoledUtils
/* This class is used when learning weights while allowing for parallel clause evaluation.
* The task requires collaboration with MLNClauseEvalMaster and uses blocking (we need to
* wait until the evaluator class finishes its job before moving on to the next mini-batch).
* For this reason, the functionality here differs significantly from the regular one of
* oled.weightlearn.WeightedTheoryLearner (where all clauses are evaluated in one go).
*
* */
class WeightedTheoryLearner[T <: InputSource](inps: RunningOptions, trainingDataOptions: T,
testingDataOptions: T, trainingDataFunction: T => Iterator[Example],
testingDataFunction: T => Iterator[Example],
targetClass: String) extends TheoryLearner(inps,
trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, targetClass) {
import context.become
private val startTime = System.nanoTime()
private val logger = LoggerFactory.getLogger(self.path.name)
private var repeatFor = inps.repeatFor
private var data = Iterator[Example]()
private var topTheory = Theory()
private var workers: Vector[ActorRef] = Vector.empty[ActorRef]
private val master: ActorRef = context.actorOf(Props(new MLNClauseEvalMaster(inps, initorterm)), name = s"${this.initorterm}-master")
override def receive = {
case "go" =>
workers = getWorkersPool
start
}
def getTrainData = trainingDataFunction(trainingDataOptions)
def getNextBatch = if (data.isEmpty) Example() else data.next()
def start = {
this.repeatFor -= 1
data = getTrainData
if (data.isEmpty) { logger.error(s"No data received."); System.exit(-1) }
become(normalState)
self ! getNextBatch
}
def normalState: Receive = {
case exmpl: Example =>
if (exmpl == Example()) {
logger.info(s"Finished the data")
if (repeatFor > 0) {
self ! "start-over"
} else if (repeatFor == 0) {
val endTime = System.nanoTime()
val totalTime = (endTime - startTime) / 1000000000.0
val theory = if (topTheory.clauses.nonEmpty) topTheory.clauses else inps.globals.state.getAllRules(inps.globals, "top")
// used for printing out the avegare loss vector
def avgLoss(in: Vector[Int]) = {
in.foldLeft(0, 0, Vector.empty[Double]){ (x, y) =>
val (count, prevSum, avgVector) = (x._1, x._2, x._3)
val (newCount, newSum) = (count + 1, prevSum + y)
(newCount, newSum, avgVector :+ newSum.toDouble / newCount)
}
}
logger.info(s"\nTheory:\n${Theory(theory).showWithStats}\nTraining time: $totalTime")
logger.info(s"Mistakes per batch:\n${inps.globals.state.perBatchError}")
logger.info(s"Accumulated mistakes per batch:\n${inps.globals.state.perBatchError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Average loss vector:\n${avgLoss(inps.globals.state.perBatchError)}")
logger.info(s"Sending the theory to the parent actor")
if (trainingDataOptions != testingDataOptions) { // test set given, eval on that
val testData = testingDataFunction(testingDataOptions)
WoledUtils.evalOnTestSet(testData, theory, inps)
}
context.parent ! (topTheory, totalTime)
} else {
throw new RuntimeException("This should never have happened (repeatFor is now negative?)")
}
} else {
become(processingState)
self ! exmpl
}
case _: FinishedBatch => self ! getNextBatch
case _: TheoryRequestMessage => sender ! topTheory
case "start-over" =>
logger.info(s"Starting a new training iteration (${this.repeatFor - 1} iterations remaining.)")
start
}
def processingState: Receive = {
case e: Example =>
// All the work takes place here
var newTopTheory = topTheory
val useToPredict =
if (newTopTheory.clauses.nonEmpty) {
newTopTheory.clauses.toVector foreach
(rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals))
newTopTheory.clauses.map { topClause =>
val bestRef = topClause.refinements.sortBy(x => -x.weight).head
if (topClause.weight > bestRef.weight) topClause else bestRef
}
} else newTopTheory.clauses
//val error = predictSate(useToPredict, e, inps, initorterm, jep)
//Globals.errorProb = Globals.errorProb :+ error
val (_, startNewRulesTime) = Utils.time {
val startNew =
//if (this.inps.tryMoreRules && (this.targetClass == "terminated" ||this.targetClass == "initiated" )) true
if (this.inps.tryMoreRules && targetClass == "terminated") {
true
} else {
//newTopTheory.growNewRuleTest(e, jep, initorterm, inps.globals)
Theory(useToPredict).growNewRuleTest(e, initorterm, inps.globals)
}
//*/
if (startNew) {
val newRules_ = if (inps.tryMoreRules) {
// Don't use the current theory here to force the system to generate new rules
generateNewRules(Theory(), e, initorterm, inps.globals)
} else {
//generateNewRules(topTheory, e, jep, initorterm, inps.globals)
generateNewRules(Theory(useToPredict), e, initorterm, inps.globals)
}
// Just to be on the safe side...
val newRules = newRules_.toVector.filter(x => x.head.functor == initorterm)
if (newRules.nonEmpty) {
logger.info(s"Generated ${newRules.length} new rules.")
if (topTheory.clauses.nonEmpty) {
//val largestWeight = topTheory.clauses.sortBy(x => -x.mlnWeight).head.mlnWeight
//newRules.foreach(x => x.mlnWeight = largestWeight)
//newRules.foreach(x => x.mlnWeight = 1.0)
}
}
if (inps.compressNewRules) newTopTheory = topTheory.clauses ++ filterTriedRules(topTheory, newRules, logger)
else newTopTheory = topTheory.clauses ++ newRules
}
}
if (newTopTheory.clauses.nonEmpty) {
val generate_refs_timed = Utils.time {
newTopTheory.clauses.toVector foreach
(rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals))
}
val allClauses = newTopTheory.clauses.flatMap { x =>
if (x.body.nonEmpty) List(x) ++ x.refinements
else x.refinements
}
val enumClauses = (1 to allClauses.length).toList
val uuidsToRuleIdsMap = (allClauses.map(_.uuid) zip enumClauses).toMap
val input = new MLNClauseHandingMasterInput(allClauses.toVector, e, inps, initorterm, splitEvery = 3, workers)
//val error = predictSate(useToPredict, e, inps, initorterm, jep)
//Globals.errorProb = Globals.errorProb :+ error
become(waitingState(newTopTheory, uuidsToRuleIdsMap))
master ! input
} else {
topTheory = newTopTheory
become(normalState)
self ! new FinishedBatch
}
case _ => throw new RuntimeException("This shouldn't have happened.") // just to be on the safe side...
}
def waitingState(newTopTheory: Theory, uuidsToRuleIdsMap: Map[String, Int]): Receive = {
case result: MLNClauseHandlingOutput =>
val exmplCount = result.totalExampleCount
Utils.time {
newTopTheory.clauses foreach { clause =>
if (clause.body.nonEmpty) Auxil.updateClauseStats(clause,
uuidsToRuleIdsMap, result.inferredTrue, result.actuallyTrue, result.incorrectlyTerminated,
result.correctlyNotTerminated, result.clausesWithUpdatedWeights, this.initorterm)
clause.refinements.foreach(ref =>
Auxil.updateClauseStats(ref, uuidsToRuleIdsMap, result.inferredTrue,
result.actuallyTrue, result.incorrectlyTerminated, result.correctlyNotTerminated,
result.clausesWithUpdatedWeights, this.initorterm))
clause.seenExmplsNum += exmplCount
clause.refinements.toVector.foreach(y => y.seenExmplsNum += exmplCount)
clause.supportSet.clauses.toVector.foreach(y => y.seenExmplsNum += exmplCount)
}
}
val (expanded, expTimed) = Utils.time { expandRules(newTopTheory, inps, logger) }
if (inps.onlinePruning) topTheory = pruneRules(expanded._1, inps, logger)
else topTheory = expanded._1
become(normalState)
self ! new FinishedBatch
case _ => throw new RuntimeException("This shouldn't have happened.") // just to be on the safe side...
}
def getWorkersPool = {
///*
val cores = Runtime.getRuntime.availableProcessors()
val workerNames = (1 to cores).toList map (i => s"Worker-${this.initorterm}-$i")
workerNames map { name => context.actorOf(Props(new MLNClauseEvalWorker), name = name) } toVector
//*/
//Vector(context.actorOf(Props( new MLNClauseEvalWorker ), name = "worker"))
}
/* Perform MAP inference to detect whether a new rule should be added.
* This is half-finished and is currently not used anywhere. */
def predictSate(useToPredict: List[Clause], e: Example, inps: RunningOptions, targetClass: String) = {
val clauses = useToPredict
val ((groundNetwork, trueGroundingsMap, totalExmplCount, annotationMLN, incorrectlyTerminated, correctlyNotTerminated), groundingTime) = {
val timed = Utils.time{ getGroundTheory(clauses.toVector, e, inps, targetClass) }
(timed._1, timed._2)
}
// Perform inference
val solver = new MAPInference
val (inferredGroundNetwork, mapInferenceTime): (Vector[Literal], Double) = {
val timed = Utils.time { solver.infer(groundNetwork, clauses.toVector) }
(timed._1, timed._2)
}
val inferredTrue = inferredGroundNetwork.filter(x => x.mlnTruthValue)
val actuallyTrue = annotationMLN
val _inferredTrue = inferredTrue.map(x => x.tostringMLN).toSet
val _actuallyTrue = actuallyTrue.map(x => x.tostringMLN).toSet
val tps = _inferredTrue.intersect(_actuallyTrue).size
val fps = _inferredTrue.diff(_actuallyTrue).size
val error = Math.abs(_actuallyTrue.size - (tps + fps))
error
//Math.abs(_inferredTrue.size - _actuallyTrue.size)
//(inferredTrue, actuallyTrue, incorrectlyTerminated, correctlyNotTerminated, clauses, totalExmplCount)
}
}
| 11,976 | 39.056856 | 150 | scala |
OLED | OLED-master/src/main/scala/utils/ASP.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import java.io.File
import app.runutils.{Globals, RunningOptions}
import com.typesafe.scalalogging._
import logic.Examples._
import logic._
import utils.parsers.ASPResultsParser
import scala.sys.process._
object ASP extends ASPResultsParser with LazyLogging {
/**
* Transforms input to an ASP program. The program is written in an output file that is passed to the ASP solver.
* the writeTo file is the only non-optional parameter of the method.
*
* @param writeToFile @tparam String path to file where the ASP program is written.
* @param program @tparam List[String] an (optional) set of ground or non-ground rules and/or ground facts.
* @param generateDirectives @tparam List[String] an (optional) list containing declarations for atoms to be be generated during the computation
* of answer sets.
* @example of such input:
*
* List("father(X,Y):person(X):person(Y)","grandfather(X,Y):person(X):person(Y)")
*
* Such a list is transformed into the "generate" part of the program:
*
* {father(X,Y):person(X):person(Y), grandfather(X,Y):person(X):person(Y)}.
* @param generateAtLeast @tparam Int an (optional) lower bound for the number of generated atoms to be included in an answer set.
* @param generateAtMost @tparam Int an (optional) upper bound for the number of generated atoms to be included in an answer set.
* @param minimizeStatements @tparam List[String] an (optional) list of atoms whose instances in an anser set should be minimized.
* @example of such input:
*
* List("father(X,Y)","grandfather(X,Y)"))
*
* Such a list is transformed into a minimize statement:
*
* #minimize{father(X,Y),grandfather(X,Y)}.
* @param maximizeStatements @tparam List[String] similar as above for maximize directives.
* @param constraints @tparam List[List[String]] a set of integrity constraints. Example:
*
* List(List("father(X,Y)","mother(X,Y)"), List("father(X,Y)","not male(X)"))
*
* Such input is transformed to integrity constraints in the ASP program:
*
* :- father(X,Y), mother(X,Y).
* :- father(X,Y), not male(X).
* @param show @tparam List[String] an (optional) list of atoms that are to be displayed. All other atoms in an answer set are hidden.
* A #hide directive is generated is this list is not empty.
* @example of such input:
*
* List("father(X,Y)","mother(X,Y)") or
*
* List("father/2","mother2")
*
* Such input is transformed into
*
*
* #hide.
* #show father(X,Y).
* #show mother(X,Y)
* @param extra @tparam List[String] any extra knowledge, that is simply written in the ASP file
*/
/*
def toASPprogram(program: List[String] = Nil,
generateDirectives: List[String] = Nil,
generateAtLeast: Int = 1000000000,
generateAtMost: Int = 1000000000,
minimizeStatements: List[String] = Nil,
maximizeStatements: List[String] = Nil,
constraints: List[List[String]] = Nil,
show: List[String] = Nil,
extra: List[String] = Nil,
writeToFile: String): Any = {
// Create generate-and-test statements
val generates =
genAndTestDirectives(generateDirectives,generateAtLeast,generateAtMost,writeToFile).mkString("\n")
// Create the minimize statements
val minStatement = minimizeStatements match { // This is a single string
case Nil => ""
case _ => "#minimize{"+minimizeStatements.mkString(",")+"}.\n"
}
// Create the maximize statements
val maxStatement = maximizeStatements match { // This is a single string
case Nil => ""
case _ => "#maximize{"+maximizeStatements.mkString(",")+"}.\n"
}
// Create integrity constraints
val constrs = constraints match { // This is a list of strings
case Nil => ""
case _ => (constraints map (x => ":- "+x.mkString(",")+".")).mkString("\n")
}
// Create the show/hide directives:
val (hideDir, showDirs) = show match {
case Nil => ("", "")
case _ => ("#hide.\n", (for (x <- show) yield "#show " + x + ".").mkString("\n") )
}
val all =
List(program.mkString("\n"),generates,minStatement,
maxStatement,constrs,hideDir,showDirs,extra.mkString("\n")).mkString("\n")
val debug =
List(generates,minStatement,
maxStatement,constrs,hideDir,showDirs,extra.mkString("\n")).mkString("\n")
Utils.writeLine(all, writeToFile, "append")
logger.debug("\nAbduction program:\n" + all + "\n")
}
def genAndTestDirectives(gens: List[String],atLeast:Int,atMost: Int,file: String) = {
val genStatems = (gens, atLeast, atMost) match {
case x @ (Nil, _, _) => List()
case x @ (head :: tail, 1000000000, 1000000000) => for (e <- x._1) yield "{" + e + "}."
case x @ (head :: tail, lower, 1000000000) => (head :: tail).map(y => "$lower {" + y + "}.")
case x @ (head :: tail, 1000000000, upper) => (head :: tail).map(y => "0 {" + y + "} $upper.")
case x @ (head :: tail, lower, upper) => (head :: tail).map(y => "$lower {" + y + "} $upper.")
}
//Utils.writeToFile(new java.io.File(file), "append")(p => genStatems foreach (p.println))
genStatems
}
*/
def toASPprogram(
program: List[String] = Nil,
generateDirectives: List[String] = Nil,
generateAtLeast: Int = 1000000000,
generateAtMost: Int = 1000000000,
minimizeStatements: List[String] = Nil,
maximizeStatements: List[String] = Nil,
constraints: List[List[String]] = Nil,
show: List[String] = Nil,
extra: List[String] = Nil,
writeToFile: String): Any = {
Utils.clearFile(writeToFile) // clear here, append everywhere else.
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => program foreach (p.println))
val genStatems = (generateDirectives, generateAtLeast, generateAtMost) match {
case x @ (Nil, _, _) => List()
case x @ (head :: tail, 1000000000, 1000000000) => for (e <- x._1) yield "{" + e + "}."
case x @ (head :: tail, lower, 1000000000) => (head :: tail).map(y => "$lower {" + y + "}.\n")
case x @ (head :: tail, 1000000000, upper) => (head :: tail).map(y => "0 {" + y + "} $upper.\n")
case x @ (head :: tail, lower, upper) => (head :: tail).map(y => "$lower {" + y + "} $upper.\n")
}
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => genStatems foreach (p.println))
val minStatement = minimizeStatements match { // This is a single string
case Nil => ""
case _ => "#minimize{ " + minimizeStatements.mkString(",") + "}.\n"
}
val maxStatement = maximizeStatements match { // This is a single string
case Nil => ""
case _ => "#maximize{ " + maximizeStatements.mkString(",") + "}.\n"
}
val constrs = constraints match { // This is a list of strings
case Nil => List("")
case _ => for (x <- constraints) yield ":- " + x.mkString(",") + ".\n"
}
Utils.writeLine(minStatement, writeToFile, "append")
Utils.writeLine(maxStatement, writeToFile, "append")
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => constrs foreach (p.println))
val showDirs = show match {
case Nil => ""
case _ => "\n#show.\n" + (show map (x => s"\n#show $x.")).mkString("\n")
}
Utils.writeLine(showDirs, writeToFile, "append")
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => extra foreach (p.println))
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => showDirs foreach (p.println))
val debug = scala.io.Source.fromFile(writeToFile).mkString
logger.debug(s"\nASP Input:\n \n$debug\n")
}
/**
* This generates a helper ASP program to extract the mode declaration atoms (if any) that match
* each atom in an answer set returned by the solver. This helps to process the atoms and populate
* the objects the are constructed from them as their internal representations. In practice this
* program computes theta-subsumption between literals.
*
* @example This is a (slightly adapted) example from the E.coli case study from:
*
* Ray, O. (2009). Nonmonotonic abductive inductive learning. Journal of Applied Logic, 7(3), 329-340.
*
* %% Given Mode declarations:
* -----------------------------------------------
* modeh(happens(use(#sugar),+time)).
* modeh(happens(add(#sugar),+time)).
* modeb(holdsAt(available(#sugar),+time)).
* modeb(not_holdsAt(available(#sugar),+time)).
* -----------------------------------------------
* %% Generate the following program:
* ----------------------------------------------------------------------------------------------------------
* mode(1,happens(use(X),Y)) :- sugar(X),time(Y). %% one atom for each mode, counting them with the 1st arg.
* mode(2,happens(add(X),Y)) :- sugar(X),time(Y).
* mode(3,holdsAt(available(X),Y)) :- sugar(X),time(Y).
* mode(4,not_holdsAt(available(X),Y)) :- sugar(X),time(Y).
*
* modeCounter(1..4).
*
* matchesMode(ModeCounter,Atom,Mode) :-
* mode(ModeCounter,Atom), mode(ModeCounter,Mode), true(Atom), Atom = Mode.
*
* %% Add one such rule for each predicate (mode atom) you want to query. The purpose is to
* %% is to generate matchesMode/3 instances only for atoms that are included in an
* %% answer set (i.e. true atoms), in order to avoid huge amounts of irrelevant info.
*
* true(happens(use(X),Y)) :- happens(use(X),Y).
* true(happens(add(X),Y)) :- happens(add(X),Y).
* true(holdsAt(available(X),Y)) :- holdsAt(available(X),Y).
* true(holdsAt(not_available(X),Y)) :- holdsAt(not_available(X),Y).
*
* #hide.
* #show matchesMode/3.
* ---------------------------------------------------------------------------------------------------------
*
* An atom 'matchesMode(m,atom,_)' in an answer set of this program is interpreted as a true atom
* that matches with mode atom 'm'.
*
*
*/
def matchModesProgram(queryModePreds: List[Literal]): List[String] = {
val modeDecl: List[String] = for (
x <- queryModePreds;
y <- List.range(1, queryModePreds.length + 1) zip queryModePreds
) yield "mode(" + y._1 + "," + x.tostring + "," + y._2.tostring + ") :- " + x.typePreds.mkString(",") + "."
val modeCount: String = "modeCounter(1.." + queryModePreds.length + ")."
val clause = """matchesMode(ModeCounter,Atom,Mode) :-
mode(ModeCounter,Atom, Mode), true(Atom), Atom = Mode."""
val trues: List[String] = for (x <- queryModePreds) yield "true(" + x.tostring + ")" + " :- " + x.tostring + "."
val program = modeDecl ++ List(modeCount) ++ List(clause) ++ trues ++ List("\n#show matchesMode/3.")
//program.foreach(println)
program
}
/**
* Calls the ASP solver and returns the results.
*
* @param task an indicator for what we want to do with the solver, See the code for details. New task may be
* added easily by providing the proper input to the ASP solver.
* @todo Error handling needs fixing. Right now we do not intercept
* the stderr stream, but we flag an error in case of an empty model.
*
* FIX THIS!!!
*/
def solve(
task: String = "",
useAtomsMap: Map[String, Literal] = Map[String, Literal](),
aspInputFile: java.io.File = new File(""),
examples: Map[String, List[String]] = Map(),
fromWeakExmpl: Boolean = false): List[AnswerSet] = {
/*
val buffer = new StringBuffer()
val command = task match {
case Core.ABDUCTION => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
case Core.GET_QUERIES => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
case Core.DEDUCTION => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
case Core.XHAIL => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
case Core.ILED => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
case Core.CHECKSAT => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
// I'm not sure if finding all optimals is necessary here
//case Core.FIND_ALL_REFMS => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}", "solveMode=optN")
case Core.FIND_ALL_REFMS => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
case Core.INFERENCE => Seq("python", Core.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
//case "getGroundings" => Seq(Core.aspSolverPath + "/./clingo", Core.bkFile, aspInputFile.getCanonicalPath, "0", "--asp09")
//case "useWithKNNClassifier2" => Seq(Core.aspSolverPath + "/./clingo", Core.bkFile, aspInputFile.getCanonicalPath, "0", "--asp09")
//case "generate-aleph-negatives" => Seq(Core.aspSolverPath + "/./clingo", alternativePath, "0", "--asp09")
}
val strCommand = command.mkString(" ")
/*
def formHypothesis(answerSet: List[String], hypCount: Int) = {
answerSet match {
case Nil =>
case _ =>
val rules = getNewRules(answerSet,useAtomsMap)
val rulesWithTypes = rules.clauses map (x => x.withTypePreds())
val f = Utils.getTempFile(prefix = "theory", suffix = ".lp",deleteOnExit = true)
Utils.writeToFile(f, "overwrite")(p =>
rulesWithTypes foreach (x => p.println(x.tostring)))
val (tps, fps, fns, precision, recall, fscore) = {
//val c = new Crossvalidation(examples, rulesWithTypes)
//c.out
}
logger.info(
"\n---------------------------------\n" +
s"Enumerated hypothesis $hypCount:\n" +
"---------------------------------\n" +
(rules map (x => x.tostring)).mkString("\n") +
s"\n\nTPs: $tps\n" + s"FPs: $fps\n" + s"FNs: $fns\n" +
s"Precision: $precision\n" + s"Recall: $recall\n" + s"F-score: $fscore\n" +
"---------------------------------\n"
)
}
}
*/
var hypCount: Int = 0
var lout = new ListBuffer[AnswerSet]
val processLine = (x: String, y: Int) => parseAll(aspResult, x.replaceAll("\\s", "")) match {
case Success(result, _) =>
//formHypothesis(result, y)
hypCount += 1
if (result != List()) lout = new ListBuffer[AnswerSet]() += AnswerSet(result)
case f => None
}
val processLine1 = (x: String) => parseAll(aspResult, x.replaceAll("\\s", "")) match {
case Success(result, _) =>
if (result != List()) {
lout =task match {
case Core.FIND_ALL_REFMS => lout += AnswerSet(result) // keep all solutions
case _ => new ListBuffer[AnswerSet]() += AnswerSet(result) // keep only the last solution
}
logger.debug(s"$task: ${lout.mkString(" ")}")
}
case f => None
}
val dispatch = (x: String) => task match {
case Core.XHAIL => processLine(x, hypCount)
case _ => processLine1(x)
}
task match {
case "Not currently used" =>
val pio = new ProcessIO(_ => (),
stdout => scala.io.Source.fromInputStream(stdout).getLines.foreach(x => dispatch(x)),
stderr => scala.io.Source.fromInputStream(stderr).getLines.foreach(println))
command.run(pio)
lout.toList
case _ => //allTasks
//val out = command lines_! ProcessLogger(buffer append _)
val out = command.lineStream_!
//out.foreach(println)
if(out.head == Core.UNSAT) {
task match {
case Core.CHECKSAT => return List(AnswerSet.UNSAT)
case _ =>
task match {
// we need this in order to remove inconsistent weak support rules
case Core.FIND_ALL_REFMS => return List(AnswerSet.UNSAT)
case (Core.ABDUCTION | Core.ILED) =>
// some times a kernel cannot be created from garbage (weak) data points
// but we don't want a failure in this case. Some holds when generalizing
// a kernel from a weak data point, to gen a new rule, but end up in an
// UNSAT program. We don't want a crash in this case either, we simply want
// to quit learning from the particular example and move on.
if (fromWeakExmpl) {
if (task == Core.ILED) logger.info("Failed to learn something from that...")
return Nil
} else {
logger.error(s"\nTask: $task -- Ended up with an UNSATISFIABLE program")
throw new RuntimeException(s"\nTask: $task -- Ended up with an UNSATISFIABLE program")
}
case _ =>
logger.error(s"\nTask: $task -- Ended up with an UNSATISFIABLE program")
throw new RuntimeException(s"\nTask: $task -- Ended up with an UNSATISFIABLE program")
}
}
}
//out.foreach(x => processLine1(x))
out.foreach(x => dispatch(x))
}
lout.toList
*/
if (Globals.glvalues("with-jep").toBoolean) {
//solveASP(task,aspInputFile.getCanonicalPath,fromWeakExmpl)
solveASPNoJep(task, aspInputFile.getCanonicalPath, fromWeakExmpl)
} else {
solveASPNoJep(task, aspInputFile.getCanonicalPath, fromWeakExmpl)
}
}
/*
def solveMLNGrounding(aspFile: String) = {
val with_atom_undefiend = "-Wno-atom-undefined"
val cores = Runtime.getRuntime.availableProcessors
val aspCores = s"-t$cores"
val mode = ""
val command = Seq("clingo", aspFile, mode, with_atom_undefiend, aspCores)
val result = command.mkString(" ").lineStream_!.toVector
result
}
*/
def solveMLNGrounding(inps: RunningOptions, e: Example,
groundingDirectives: Vector[(Clause, Clause, Clause)],
targetClass: String): Array[String] = {
val q = groundingDirectives.map(a => s"${a._1.tostring}\n${a._2.tostring}\n${a._3.tostring}").mkString("\n")
val cwd = System.getProperty("user.dir") // Current working dir
val aspBKPath =
if (targetClass == "initiatedAt") s"${inps.entryPath}/ASP/ground-initiated.lp"
else s"${inps.entryPath}/ASP/ground-terminated.lp"
val all = (e.annotationASP ++ e.narrativeASP ++ List(s"""$q\n#include "$aspBKPath".""")).mkString("\n")
val f = Utils.getTempFile("ground", ".lp")
Utils.writeLine(all, f.getCanonicalPath, "overwrite")
val cores = Runtime.getRuntime.availableProcessors
//val aspHandlingScript = s"$cwd/asp/grounding.py"
//val command = Seq("python", aspHandlingScript, s"aspfile=${f.getCanonicalPath}", s"cores=$cores")
val command = Seq("clingo", f.getCanonicalPath, "-Wno-atom-undefined", s"-t$cores", "--verbose=0").mkString(" ")
val result = command.lineStream_!
val results = result.toVector
/*
val processLine = (x: String) => parseAll(aspResult, x.replaceAll("\\s", "")) match {
case Success(result, _) => result
case f => None
}
*/
val atoms = results(0)
val status = results(1)
if (status == "UNSATISFIABLE") throw new RuntimeException("UNSATISFIABLE program!")
atoms.split(" ")
}
def solveASPNoJep(task: String, aspFile: String, fromWeakExmpl: Boolean = false): List[AnswerSet] = {
val solveMode =
if (task == Globals.ABDUCTION && Globals.glvalues("iter-deepening").toBoolean) {
s"${Globals.glvalues("iterations")}"
} else {
"all"
}
val with_atom_undefiend = "-Wno-atom-undefined"
val cores = Runtime.getRuntime.availableProcessors
val aspCores = s"-t$cores"
val mode = if (List("all", "optN").contains(solveMode)) "0" else ""
//val command = Seq("clingo", aspFile, mode, with_atom_undefiend, aspCores, " > ", outFile.getCanonicalPath)
val command = Seq("/home/nkatz/software/oledhome/clingo/clingo-4.5.4-source/build/release/clingo", aspFile, mode, with_atom_undefiend, aspCores)
val result = command.mkString(" ").lineStream_!
val results = result.toList
val status = {
val statusLine = results.filter(x => x.contains("SATISFIABLE") || x.contains("UNSATISFIABLE") || x.contains("OPTIMUM FOUND"))
if (statusLine.isEmpty) throw new RuntimeException(s"No STATUS returned from Clingo.")
// extract the actual string literal (SATISFIABLE, UNSATISFIABLE or OPTIMUM FOUND)
statusLine.head.replaceAll("\\s", "")
}
if (status == Globals.UNSAT) {
task match {
case Globals.CHECKSAT => return List(AnswerSet.UNSAT)
case _ =>
task match {
// we need this in order to remove inconsistent weak support rules
case Globals.FIND_ALL_REFMS => return List(AnswerSet.UNSAT)
// searching alternative abductive explanations with iterative search
case Globals.SEARCH_MODELS => return List(AnswerSet.UNSAT)
case Globals.INFERENCE => return List(AnswerSet.UNSAT)
case Globals.SCORE_RULES => return List(AnswerSet.UNSAT)
case Globals.GROW_NEW_RULE_TEST => return List(AnswerSet.UNSAT)
case (Globals.ABDUCTION | Globals.ILED) =>
// some times a kernel cannot be created from garbage (weak) data points
// but we don't want a failure in this case. Same holds when generalizing
// a kernel from a weak data point, to gen a new rule, but end up in an
// UNSAT program. We don't want a crash in this case either, we simply want
// to quit learning from the particular example and move on.
if (fromWeakExmpl) {
if (task == Globals.ILED) logger.info("Failed to learn something from that...")
return Nil
} else {
/* Perhaps there's no need to crash because the solver get stuck with something... Learning sound stuff in a past no one wants to return to */
logger.error(s"Task: $task -- Abduction failed (UNSATISFIABLE program)")
return Nil
/*
logger.error(s"\nTask: $task -- Ended up with an UNSATISFIABLE program")
val program = Source.fromFile(aspFile).getLines.toList.mkString("\n")
throw new RuntimeException(s"\nTask: $task -- Ended up with an UNSATISFIABLE program:\n$program")
*/
}
case _ =>
logger.error(s"Task: $task -- Abduction failed (UNSATISFIABLE program)")
return Nil
/*
logger.error(s"\nTask: $task -- Ended up with an UNSATISFIABLE program")
val program = Source.fromFile(aspFile).getLines.toList.mkString("\n")
throw new RuntimeException(s"\nTask: $task -- Ended up with an UNSATISFIABLE program:\n$program")
*/
}
}
}
// get the models
val processLine = (x: String) => parseAll(aspResult, x.replaceAll("\\s", "")) match {
case Success(result, _) => result
case f => None
}
val _models = results.map(x => processLine(x)).filter(z => z != None).filter(p => p.asInstanceOf[List[String]].nonEmpty).reverse
//val models = _models filter (x => x.replaceAll("\\s", "") != "")
//=========================================================================
// This is a quick fix to get the result for abduction when
// perfect-fit=false. In this case numerous models are
// returned, and often the empty one is the optimal (smallest).
// But the empty one will be filtered out by the code val models = _models
// and we'll end up doing extra stuff with other models for no real reason
// I'm just adding this as a quick & dirty fix to make sure that nothing
// else breaks.
//=========================================================================
//-------------------------------------------------------------------------
if (_models.isEmpty) return Nil
if (task == Globals.ABDUCTION && _models.head == "") return Nil
//-------------------------------------------------------------------------
//outFile.delete() // it's deleted on exit but it's better to get rid of them as soon as we're done with them.
if (_models.isEmpty) Nil
else _models map (x => AnswerSet(x.asInstanceOf[List[String]]))
}
/**
* Creates and Writes the contents of the ASP file for SAT checking
*
* @param theory
* @param example
* @return the path to the file to be passed to the ASP solver
*/
def check_SAT_Program(theory: Theory, example: Example, globals: Globals): String = {
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
val exConstr = getCoverageDirectives(withCWA = Globals.glvalues("cwa"), globals = globals).mkString("\n")
val t = theory.map(x => x.withTypePreds(globals).tostring).mkString("\n")
val f = Utils.getTempFile("sat", ".lp")
Utils.writeToFile(f, "append")(
p => List(e, exConstr, t, s"\n#include " + "\"" + globals.BK_WHOLE_EC + "\".\n") foreach p.println
)
f.getCanonicalPath
}
def iterSearchFindNotCoveredExmpls(theory: Theory, example: Example, globals: Globals) = {
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val constr = varbedExmplPatterns.flatMap(x => List(s"posNotCovered($x) :- example($x), not $x.", s"negsCovered($x):- negExample($x), $x.")).mkString("\n")
val t = theory.map(x => x.withTypePreds(globals).tostring).mkString("\n")
val show = s"\n#show.\n#show posNotCovered/1.\n#show negsCovered/1."
val program = e + t + constr + show
val f = Utils.getTempFile("sat", ".lp", deleteOnExit = true)
Utils.writeLine(program, f.getCanonicalPath, "overwrite")
f.getCanonicalPath
}
/** @todo Refactor these, reduce code **/
def isConsistent_program(theory: Theory, example: Example, globals: Globals): String = {
// If annotation is given here, negatives may be covered by inertia.
// On the other hand, if the annotation is omitted then during inference
// a correct rule will (correctly) entail positives that will be treated
// as negatives (due to the lack of annotation). To overcome the issue,
// the annotation is provided but inertia is defused during inference.
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
//val e = (example.narrativeASP).mkString("\n")
val exConstr = getCoverageDirectives(checkConsistencyOnly = true, globals = globals).mkString("\n")
val t = theory.map(x => x.withTypePreds(globals).tostring).mkString("\n")
val f = Utils.getTempFile("isConsistent", ".lp", deleteOnExit = true)
Utils.writeToFile(f, "append")(
p => List(e, exConstr, t, s"\n#include " + "\"" + globals.ILED_NO_INERTIA + "\"\n.") foreach p.println
)
f.getCanonicalPath
}
def isConsistent_program_Marked(theory: Theory, example: Example, globals: Globals): String = {
// If annotation is given here, negatives may be covered by inertia.
// On the other hand, if the annotation is omitted then during inference
// a correct rule will (correctly) entail positives that will be treated
// as negatives (due to the lack of annotation). To overcome the issue,
// the annotation is provided but inertia is defused during inference.
// This method marks each rule in order to track the derivation of
// negative examples from the particular rule.
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
//val exConstr = getCoverageDirectives(checkConsistencyOnly = true).mkString("\n")
val markInit = "\ninitiatedAt(F,T) :- marked(I,J,initiatedAt(F,T)),rule(I),supportRule(J).\n"
val markTerm = "\nterminatedAt(F,T) :- marked(I,J,terminatedAt(F,T)),rule(I),supportRule(J).\n"
val show = "\n#show negsCovered/3.\n"
val markThem =
(for (
(c, i) <- theory.clauses zip List.range(0, theory.clauses.length);
(cs, j) <- c.supportSet.clauses zip List.range(0, c.supportSet.clauses.length);
y = cs.withTypePreds(globals);
marked = Clause(Literal(predSymbol = "marked", terms = List(Constant(i.toString), Constant(j.toString), y.head)), body = y.body)
) yield marked.tostring).mkString("\n")
val ruleGen = s"rule(0..${theory.clauses.length}).\n"
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val coverageConstr = varbedExmplPatterns.map(x =>
s"\nnegsCovered(I,J,$x):- marked(I,J,$x), not example($x),rule(I),supportRule(J).\n").mkString("\n")
val ssRuleGne = s"supportRule(0..${
theory.clauses.foldLeft(0){
(x, y) =>
val newMax = y.supportSet.clauses.length
if (newMax > x) newMax else x
}
}).\n"
val f = Utils.getTempFile("isConsistent", ".lp", deleteOnExit = true)
Utils.writeToFile(f, "append")(
p => List(e, coverageConstr, markThem, markInit, markTerm, s"\n#include " + "\"" + globals.BK_INITIATED_ONLY_MARKDED + "\".\n", ruleGen, ssRuleGne, show) foreach p.println
)
f.getCanonicalPath
}
/**
* Performs inference, returns the results (ground instances
* of example pattern atoms)
*
* @param p a Clause or Theory
* @param e an example (Herbrand interpretation)
*
*
*
*/
def inference(p: Expression, e: Example, globals: Globals): AnswerSet = {
val f = (p: Expression) => p match {
case x: Clause => x.withTypePreds(globals).tostring
case x: Theory => (x.clauses map (z => z.withTypePreds(globals).tostring)).mkString("\n")
}
val file = Utils.getTempFile("inference", ".lp", deleteOnExit = true)
val aspProgram =
(e.toMapASP("annotation") ++ e.toMapASP("narrative")).mkString("\n") +
"\n" + f(p) + globals.EXAMPLE_PATTERNS.map(x => s"\n#show ${x.tostring}:${x.tostring}.").mkString("\n")
ASP.toASPprogram(program = List(aspProgram),
writeToFile = file.getCanonicalPath)
val covers = ASP.solve("inference", aspInputFile = file)
covers.head
}
/**
*
* @param kernelSet analyzed using use/2 predicates
* @param priorTheory analyzed using use/3 predicates
* @param retained this (optional) program is used for inference as is.
* @param findAllRefs this is a 2-tuple consisting of a single rule R and one
* of R's support set rules. R is analyzed using use3
* predicates. This is used in order to search the particular
* support set rule for all of R's refinements w.r.t. a set of
* examples.
* @param examples
* @param aspInputFile
* @param learningTerminatedAtOnly
* @return
* @todo Simplify/refactor this
*/
def inductionASPProgram(
kernelSet: Theory = Theory(),
priorTheory: Theory = Theory(),
retained: Theory = Theory(),
findAllRefs: (Clause, Clause) = (Clause.empty, Clause.empty), // used only with Rules.getRefinedProgram.search method
examples: Map[String, List[String]] = Map(),
aspInputFile: java.io.File = new java.io.File(""),
learningTerminatedAtOnly: Boolean = false,
use3WithWholeSupport: Boolean = false,
withSupport: String = "fullSupport", globals: Globals) = {
val (defeasibleKS, use2AtomsMap) = kernelSet.use_2_split(globals)
/*
logger.debug("\nDefeasible Kernel Set:\n" +
defeasibleKS.clauses.map(x => x.tostring).mkString("\n"))
logger.debug("Use atoms -- kernel literals map:\n" +
use2AtomsMap.map(x => x._1 + "->" + x._2.tostring).mkString("\n"))
*/
val (defeasiblePrior, use3AtomsMap, use3generates) =
if (use3WithWholeSupport) priorTheory.use_3_split_all(withSupport = withSupport, globals = globals)
else priorTheory.use_3_spilt_one(withSupport = withSupport, globals = globals)
/*
logger.debug("\nDefeasible prior theory:\n" +
defeasiblePrior.clauses.map(x => x.tostring_debug).mkString("\n"))
logger.debug("Use atoms -- kernel literals map:\n" +
use3AtomsMap.map(x => x._1 + "->" + x._2.tostring).mkString("\n"))
*/
// This is used only to analyse one particular support set rule in
// order to search for all refinements that may be derived from it.
// It is necessary for the search method of Rules.getRefinedProgram
// to work
val (defeasible_Rule, use3AtomsMap_Rule, use3generates_Rule) = findAllRefs._1.use_3_split_one(1, findAllRefs._2, globals = globals)
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS
val coverageConstr = Globals.glvalues("perfect-fit") match {
case "true" => getCoverageDirectives(learningTerminatedAtOnly = learningTerminatedAtOnly, globals = globals)
case "false" => List()
case _ => throw new IllegalArgumentException("Unspecified parameter for perfect-fit")
}
val generateUse2 =
if (use2AtomsMap.nonEmpty) {
(for (
(cl, i) <- kernelSet.clauses zip
List.range(1, kernelSet.clauses.length + 1)
) yield "{use2(" + i + ",0.." + cl.body.length + ")}.").
mkString("\n")
} else { "" }
val generateUse3 = use3generates.mkString("\n")
val program =
defeasibleKS.extend(defeasiblePrior).
extend(defeasible_Rule).clauses.map(x => x.tostring_debug) ++
coverageConstr ++
List(generateUse2, generateUse3, use3generates_Rule) ++
retained.clauses.map(p => p.withTypePreds(globals).tostring)
val f = (x: Literal) => "1," + (x.variables(globals) map (y => y.tostring)).mkString(",")
val ff = varbedExmplPatterns.map(x =>
s"\n${f(x)},posNotCovered(${x.tostring}):example(${x.tostring})," +
s" not ${x.tostring};\n${f(x)},negsCovered(${x.tostring}):${x.tostring}," +
s" not example(${x.tostring})").mkString(";")
val minimize = Globals.glvalues("perfect-fit") match {
case "true" => "#minimize{1,I,J:use2(I,J) ; 1,I,J,K:use3(I,J,K)}."
case "false" => "\n#minimize{\n1,I,J:use2(I,J) ; 1,I,J,K:use3(I,J,K) ;" + ff ++ "\n}."
case _ => throw new RuntimeException("Unspecified parameter for perfect-fit")
}
ASP.toASPprogram(
program =
examples("annotation") ++
examples("narrative") ++
program ++ List(minimize) ++
List(s"\n#include " + "\"" + globals.BK_WHOLE_EC + "\".") ++
List("\n:- use2(I,J), not use2(I,0).\n") ++
List("\n#show use2/2.\n \n#show use3/3.\n"),
//constraints = constraints, show = show,
writeToFile = aspInputFile.getCanonicalPath)
(defeasibleKS, use2AtomsMap, defeasiblePrior, use3AtomsMap, defeasible_Rule, use3AtomsMap_Rule)
}
def getCoverageDirectives(
learningTerminatedAtOnly: Boolean = false,
withCWA: String = Globals.glvalues("cwa"),
checkConsistencyOnly: Boolean = false, globals: Globals): List[String] = {
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
varbedExmplPatterns.flatMap(x =>
Globals.glvalues("cwa") match {
// CWA on the examples:
case "true" =>
learningTerminatedAtOnly match {
case true => List(s":- example($x), not $x.", s":- $x, not example($x).", s"$x :- example($x).")
case _ =>
if (!checkConsistencyOnly) List(s":- example($x), not $x.", s":- $x, not example($x).")
else List(s":- $x, not example($x).")
}
// No CWA on the examples, agnostic with missing examples, explicit negatives:
case _ =>
//List(s":- example($x), not $x.", s":- $x, negExample($x).", s"$x :- example($x).")
List(s":- example($x), not $x.", s":- $x, negExample($x).")
/*
learningTerminatedAtOnly match {
case true => List(s":- example($x), not $x.", s":- $x, negExample($x).", s"$x :- example($x).")
case _ =>
if(!checkConsistencyOnly) List(s":- example($x), not $x.", s":- $x, not example($x).")
else List(s":- $x, not example($x).")
}
*/
})
}
}
| 38,019 | 45.536108 | 177 | scala |
OLED | OLED-master/src/main/scala/utils/DataHandler.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.Example
/**
* Created by nkatz on 6/26/17.
*/
object DataHandler {
trait InputOptions
class BasicInputOptionsWrapper(
val dbName: String,
val collectionName: String = "examples",
val chunkSize: Int = 1,
val targetConcept: String = "None",
val sortDbByField: String = "None",
val sort: String = "ascending") extends InputOptions
def basicDataFunction(options: BasicInputOptionsWrapper): Iterator[Example] = {
val mc = MongoClient()
val collection = mc(options.dbName)(options.collectionName)
collection.createIndex(MongoDBObject(options.sortDbByField -> 1))
val data = collection.find().sort(MongoDBObject(options.sortDbByField -> 1)).map { x =>
val e = Example(x)
if (options.targetConcept == "None") {
new Example(annot = e.annotation, nar = e.narrative, _time = e.time)
} else {
new Example(annot = e.annotation filter (_.contains(options.targetConcept)), nar = e.narrative, _time = e.time)
}
}
val dataChunked = data.grouped(options.chunkSize)
val dataIterator = dataChunked.map { x =>
x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
}
dataIterator
}
}
| 2,119 | 32.650794 | 119 | scala |
OLED | OLED-master/src/main/scala/utils/DataUtils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.Example
import logic.Theory
/**
* Created by nkatz on 9/13/16.
*/
/*
*
*
* MOST OF THIS IS THIS IS USELESS (NEED ITERATORS TO STORE THE DATA)
*
*
*
*
* */
object DataUtils {
trait Data
trait TrainingSet {
val trainingSet: List[Data] = Nil
val testingSet: List[Data] = Nil
def isEmpty = trainingSet == Nil
}
class DataSet(val trainingSet: List[(Int, Int)], val testingSet: List[(Int, Int)])
/*
* Find the positives and negative intervals in the database for the particular HLE
*/
object Interval {
def apply(HLE: String, start: Int) = {
new Interval(HLE, start, 0)
}
}
/*
* This class in used only to represent intervals, which in turn
* are used to fetch data from a CAVIAR (notice that the step is hard-coded) database.
* This is only for experiments with CAVIAR, it's a valid data format to use,
* i.e. it's an abstraction for data representation I'm gonna stick with.
* */
case class Interval(HLE: String, startPoint: Int, var endPoint: Int) extends Data {
val step = 40
//var endPoint = 0
def length = List.range(startPoint, endPoint + step, step).length
}
/* Companion object for the DataAsIntervals class */
object DataAsIntervals {
def apply() = {
new DataAsIntervals(Nil, Nil)
}
}
class DataAsIntervals(override val trainingSet: List[Interval], override val testingSet: List[Interval]) extends TrainingSet {
override def isEmpty = this.trainingSet.isEmpty
def showTrainingIntervals() = trainingSet.foreach(x => println(x))
def showTestingIntervals() = testingSet.foreach(x => println(x))
}
class DataAsExamples(override val trainingSet: List[Example], override val testingSet: List[Example]) extends TrainingSet
/* Use this to stream training data directly from the db. */
class DataFunction(val function: (String, String, Int, DataAsIntervals) => Iterator[Example]) extends TrainingSet
class ResultsContainer(val tps: Double, val fps: Double, val fns: Double,
val precision: Double, val recall: Double,
val fscore: Double, val theorySize: Double, val time: Double, val theory: Theory)
/* Contains utilities for collecting statistics */
object Stats {
def getExampleStats(exmpls: List[Example]) = {
def append(x: List[String], y: List[String]) = {
x ++ y.filter(p => !x.contains(p))
}
val (ratios, annotSizes, narSizes, totalSize, wholeAnnotation, wholeNarrative) =
exmpls.foldLeft(List[Double](), List[Double](), List[Double](), List[Double](), List[String](), List[String]()) { (s, e) =>
val (rats, annots, nars, total, a, n) = (s._1, s._2, s._3, s._4, s._5, s._6)
val holdsAtoms = e.annotation
val narrativeAtoms = e.narrative
val holdsSize = holdsAtoms.size.toDouble
val narrativeSize = narrativeAtoms.size.toDouble
val t = holdsSize + narrativeSize
val ratio = holdsSize / narrativeSize
//(rats :+ ratio, annots :+ holdsSize, nars :+ narrativeSize, a ++ holdsAtoms, n ++ narrativeAtoms)
(rats :+ ratio, annots :+ holdsSize, nars :+ narrativeSize, total :+ t, a, n)
}
val meanRatio = Utils.mean(ratios)
val meanAnnotSize = Utils.mean(annotSizes)
val meanNarrativeSize = Utils.mean(narSizes)
val totalAnnotSize = List[String]() // wholeAnnotation.distinct.size
val totalNarSize = List[String]() //wholeNarrative.distinct.size
println(s"Mean total example size: ${Utils.mean(totalSize)}" +
s"\nMean annotation size per example: $meanAnnotSize\nMean narrative size per example: $meanNarrativeSize\n" +
s"Mean ratio (annot/nar) per example: $meanRatio\nTotal annotation size: $totalAnnotSize\n" +
s"Total narrative size: $totalNarSize")
(meanRatio, meanAnnotSize, meanNarrativeSize, totalAnnotSize, totalNarSize)
}
}
}
| 4,676 | 35.539063 | 131 | scala |
OLED | OLED-master/src/main/scala/utils/Implicits.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import java.io.File
import logic.{Clause, Constant, PriorTheory, Theory}
import utils.DataUtils.{DataAsIntervals, DataSet, Interval}
/**
* Created by nkatz on 4/2/2016.
*/
/*
USE WITH CAUTION. IMPORT THE OBJECT WHERE YOU NEED IT
AND PAY ATTENTION TO THE ORDERING OF THE CONVERSIONS
TO AVOID CONFLICTS THAT MAY COMPILE BUT RESULT IN RUNTIME ERRORS.
ACTUALLY, THE BEST WOULD BE TO HAVE A SEPARATE OBJECT WRAPPING
IMPLICITS FOR EACH PART OF THE CODE THAT NEEDS ONE. WE'LL SEE ABOUT
THAT.
*/
object Implicits {
//implicit def asPriorTheory(x: List[Clause]): PriorTheory = new PriorTheory(retainedRules = Theory(x))
//implicit def asTheory(t: PriorTheory): Theory = t.merge
implicit def asTheory(x: List[Clause]): Theory = Theory(x)
implicit def asPriorTheory(t: Theory): PriorTheory = new PriorTheory(retainedRules = t)
implicit def asTheory(x: Clause): Theory = Theory(List(x))
implicit def asConstant(x: String): Constant = Constant(x)
implicit def asConstant(x: Int): Constant = Constant(x.toString)
implicit def getFilePath(x: java.io.File): String = x.getCanonicalPath
implicit def getFileFromPath(x: String): java.io.File = new File(x)
/*
implicit def asExample(e: Example): Exmpl = new Exmpl(_id = e.time, exampleWithInertia = e)
implicit def toExample(e: Iterator[Example]): Iterator[Exmpl] = e map asExample _
*/
}
/**
* To be use from within the Clause class
*/
object ClauseImplicits {
implicit def asConstant(x: String): Constant = Constant(x)
implicit def asConstant(x: Int): Constant = Constant(x.toString)
}
object TrainingSetImplicits {
// Until I fix this, I'll use this implicit conversion to convert a TrainingSet
// (used by OLED's routines) to a DataSet (used by ILED's routines -- see the code at RunILED.scala)
// The signatures of the two classes are as follows:
//class DataSet(val trainingSet: List[(Int, Int)], val testingSet: List[(Int, Int)])
//class DataAsIntervals(val trainingSet: List[Interval], val testingSet: List[Interval])
implicit def toDataset(t: DataAsIntervals): DataSet = {
val f = (x: List[Interval]) => x map (z => (z.startPoint, z.endPoint))
new DataSet(trainingSet = f(t.trainingSet), testingSet = f(t.testingSet))
}
implicit def toDataset(tl: List[DataAsIntervals]): List[DataSet] = {
tl map (toDataset(_))
}
}
/*
object ExmplImplicits {
implicit def toExample(e: List[Exmpl]): List[Example] = {
e map (x => new Example(annot = x.exmplWithInertia.annotation, nar = x.exmplWithInertia.narrative, _time = x.exmplWithInertia.time))
}
}
*/
| 3,291 | 34.021277 | 136 | scala |
OLED | OLED-master/src/main/scala/utils/LookAheads.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import logic.{Clause, Literal}
/**
* Created by nkatz on 11/7/16.
*/
object LookAheads {
/*
* Whenever a new lookahead policy is defined (and declared in a mode declarations file),
* the policy should be implemented in the LookAheadImplementations object and a mapping
* between the policy's name and its implementation should be defined here.
* */
val policyMap = Map("appearsEarlier" -> LookAheadImplementations.appearsEarlier_<-)
class LookAheadSpecification(val lookAheadDefinition: String) {
/*
* A lookahead specification is a declaration of the form:
*
* lookahead( transaction(X,Y,T), before(T,T1), appearsEarlier(2) )
*
* In this definition:
* - transaction(X,Y,T) is the current atom
* - before(T,T1) is the lookahead atom
* - appearsEarlier(2) is the policy atom
*
* The lookahead specification says that when a version of a current atom is to be
* added to the clause, then it must be accompanied by a version of the lookahead atom.
* Moreover these two atoms must share (have in common) their 3rd and 1rst variable respectively
* (denoted by the same variable T in the respective positions in the atom signatures). Also, the
* policy atom enforces additional constrains on the remaining variables of the lookahead atom.
* For instance the policy atom above (appearsEarlier(2)) states that the second variable in a
* lookahead atom must appear earlier in the clause in which the lookahead atom is about to be added.
* Policies are implemented in the LookAheadImplementations object
*
*/
val parsed = Literal.parse(lookAheadDefinition)
// the current atom
val currentLiteralSignature = parsed.terms.head.asInstanceOf[Literal]
// the lookahead
val lookAheadLiteralSignature = parsed.terms(1).asInstanceOf[Literal]
// the policy atom
val policySignature = parsed.terms(2).asInstanceOf[Literal]
// the linking (shared) variable between the current and the lookahead atom.
// The variable itself is not important, we only need it to extract the positions
// in the actual current and lookahead atoms that we'll encounter during learning,
// which the linking variable should occupy.
val targetLookAheadSharedVariable = {
val vars = currentLiteralSignature.getVars
vars.toSet.intersect(lookAheadLiteralSignature.getVars.toSet)
}
// fail if no shared variable is found
require(targetLookAheadSharedVariable.nonEmpty, s"No shared variables between current and lookahead atoms inthe lookahead specification $lookAheadDefinition")
// index of shared variable in a current atom
val sharedVarIndex_in_CurrentAtom = currentLiteralSignature.getVars.indexOf(targetLookAheadSharedVariable.head)
// index of shared variable in a lookahead atom
val sharedVarIndex_in_LookAheadAtom = lookAheadLiteralSignature.getVars.indexOf(targetLookAheadSharedVariable.head)
// index of linking variable in a lookahead atom
val linkingVar_in_LookAheadAtom = policySignature.terms.head.name.toInt - 1
def policy = policyMap(policySignature.predSymbol)
}
object LookAheadImplementations {
/*
* All lookahead implementations should be declared here.
*/
/*
* This is an implementation of the "appearsEarlier_<-" lookahead policy. This policy is declared in the
* mode declarations file as follows (predicate and variable names, arities etc are random, just for demonstration):
*
* lookahead( p(X,Y,T), q(T,T1), appearsEarlier(2) )
*
* The intended meaning of this declaration is: Whenever a p/3 literal is added to a clause r, a q/2 literal
* should also be added. Both these literals are drawn from a bottom clause (bottomClause in the method's signature), while
* policyLiteral in the method's signature is the literal appearsEarlier(2).
* The relation between these two literals is that they should share a variable T. Also, the
* remaining variable T1 of q/2 should appear in some literal r' that already appears in clause r.
* This is specified by appearsEarlier(2), which means that the second variable of q/2 should "appearEarlier". The "<-" in the
* name of the policy means that we search clause r for literal r' "from right to left" i.e. from the last body literal
* to the head atom.
*
* This method returns the lookahead literal for which the linking variable appears "as closer" to the end of the clause as possible, i.e.
* it appears in a literal closer to the end of the clause.
*
*/
val appearsEarlier_<- = (lit: Literal, specification: LookAheadSpecification, clause: Clause, bottomClause: Clause) => {
val currentAtomSignature = specification.currentLiteralSignature
if (lit.predSymbol == currentAtomSignature.predSymbol && lit.arity == currentAtomSignature.arity) {
val sharedVarIndex_in_CurrentAtom = specification.sharedVarIndex_in_CurrentAtom
val sharedVarIndex_in_LookAheadAtom = specification.sharedVarIndex_in_LookAheadAtom
val sharedVar = lit.getVars(sharedVarIndex_in_CurrentAtom)
// for the shared variable find in the bottom clause all literals that match the lookahead atom
// signature and contain the shared variable in the proper position
val candidateLookAheads =
bottomClause.body.filter { p =>
p.predSymbol == specification.lookAheadLiteralSignature.predSymbol &&
p.arity == specification.lookAheadLiteralSignature.arity &&
p.getVars(sharedVarIndex_in_LookAheadAtom).name == sharedVar.name &&
clause.toLiteralList.filter(l => List("fraud", "transaction").contains(l.predSymbol)).exists(s => s.getVars.map(_.name).contains(p.getVars(specification.linkingVar_in_LookAheadAtom).name))
}
val f = (x: logic.Variable) => {
// get max to get the literal closest to the end of the clause
clause.toLiteralList.filter(l => List("fraud", "transaction").contains(l.predSymbol)).map(y => if (y.getVars.map(_.name).contains(x.name)) clause.toLiteralList.indexOf(y) + 1 else 0).max
}
if (candidateLookAheads.nonEmpty) {
candidateLookAheads.map{ q =>
(q, f(q.getVars(specification.linkingVar_in_LookAheadAtom)))
}.sortBy(z => z._2).last._1
} else {
Literal()
}
} else {
Literal()
}
}
def appearsEarlier_<-(lit: Literal, specification: String, clause: Clause, bottomClause: Clause) = {
// This is a total hack, just to make it work. I'll see how to make it generic
// A lookahead link looks like that:
// --------------------------------------------------------------------------------------
// transaction/4 -> { before/2, (4,1) }, { greaterThan/2, (2,1) }, { lessThan/2, (2,1) }
// --------------------------------------------------------------------------------------
// --------------------------------------------------------------------------------------
/*
* I'll use a specific example to see how it plays out. The atoms involved will be before/2, after/2, greaterThan/2, lessThan/2.
* The way these atoms will be used in the example will be indicative of what I want to achieve. When that's done, I'll find a
* way to make it generic, specify lookaheads in the mode declarations, parse them into objects for easy handling and search etc
*
* So, here comes the working example
*
* Assume that r is the clause that we are currently learning and
* α = transaction(Card, A1, Cntry, T1) is the atom that we are about to add to r. Assume also that
* β = transaction(Card, A2, Cntry, T2) is the last atom that appears in r.
* if T1 is not the time variable that appears in head(r):
* STEP 1: Find either a before/2, or after/2 atom in the bottom clause that links T1 and T2.
* STEP 2: Find either a greaterThan/2, lessThan/2 or
* else:
* simply add α to r
*/
var foundLookAheadAtom = Literal()
}
/*
* This policy is similar to "appearsEarlier_<-" but the search for a literal that contains a linking variable is done
* "left to right", i.e. from the head to the last body literal
*/
def appearsEarlier_->(lit: Literal, lookAheadLit: Literal, clause: Clause, searchSpace: Clause) = {
}
/*
* This policy is similar to "appearsEarlier_<-" but the search for a literal that contains a linking variable is done randomly
*/
def appearsEarlier_*(lit: Literal, lookAheadLit: Literal, clause: Clause, searchSpace: Clause) = {
}
}
}
| 9,476 | 45.915842 | 202 | scala |
OLED | OLED-master/src/main/scala/utils/MongoUtils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import app.runutils.Globals
import com.mongodb.BasicDBObject
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.{Example, ExampleBatch}
import logic.{Constant, Literal}
import logic.Examples.Example
import scala.collection.mutable.ListBuffer
import utils.DataUtils.Interval
/**
* Created by nkatz on 11/8/15.
*/
trait MongoUtils {
def ctmToMongo = {
/*
* Creates a new db from the old CTM Jason dump (the same that exists at estia).
* Gets rid of various redundant stuff from the new database and uses the
* identifiers for narrative and annotation that we use throughout the application
* ("narrative" and "annotation"). The old db uses "pos" and "nar" respectively
*
*/
// connection to the old database
val mongoClient1 = MongoClient()
val collection1 = mongoClient1("ctm-old")("examples")
// connection to the new database
val mongoClient2 = MongoClient()
val collection2 = mongoClient2("CTM")("examples")
collection2.drop() //clear
var accum = List[Example]()
for (x <- collection1.find().sort(MongoDBObject("time" -> 1))) {
val annot = x.asInstanceOf[BasicDBObject].get("pos").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val narrative = x.asInstanceOf[BasicDBObject].get("nar").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val prev = x.asInstanceOf[BasicDBObject].get("innert").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val time = x.asInstanceOf[BasicDBObject].get("example")
accum = accum :+ new Example(annot = annot, nar = narrative, _time = time.toString)
//val annotation = annot ++ prev
}
val _accum = accum.sortBy(x => x.time.toInt) // for some reason some things are unsorted in the original db
accum = _accum
for (x <- accum) {
val entry = MongoDBObject("time" -> x.time.toInt) ++ ("annotation" -> x.annotation) ++ ("narrative" -> x.narrative)
println(entry)
collection2.insert(entry)
}
mongoClient1.close()
mongoClient2.close()
}
/**
* Helper container class for getDBsWithTimes()
*/
case class DB(name: String) {
var times: List[Int] = List()
}
/**
* Helper method returns all DB names and the time points in every DB.
*/
def getDBsWithTimes(): List[DB] = {
val alldbs = new ListBuffer[DB]
val dbs = Utils.getAllDBs()
for (db <- dbs) {
val database = DB(db)
val mongoClient = MongoClient()
val collection = mongoClient(db)("examples")
for (x <- collection.find().sort(MongoDBObject("time" -> 1))) {
val time = x.asInstanceOf[BasicDBObject].get("time").asInstanceOf[Int]
database.times = database.times :+ time
}
database.times = database.times.sorted
alldbs += database
mongoClient.close()
}
alldbs.toList
}
}
class Database(val name: String, val collectionName: String = "examples") {
val mongoClient = MongoClient()
private val _collection = mongoClient(name)(collectionName)
val collection = this._collection
// Need to create an index to avoid running out of memory during sorting operations for large dbs.cl
collection.createIndex(MongoDBObject("time" -> 1))
var startTime = 0
val endTime = collection.last.asInstanceOf[BasicDBObject].get("time").toString.toInt
val isEmpty = this.collection.findOne() match {
case Some(x) =>
this.startTime = x.asInstanceOf[BasicDBObject].get("time").toString.toInt
false
case _ => true
}
val nonEmpty = if (this.isEmpty) false else true
val size = this.collection.size
def close() = this.mongoClient.close()
def inspectDB(seeWhat: String = "time") = {
for (x <- this.collection.find().sort(MongoDBObject("time" -> 1))) {
val e = Example(x)
seeWhat match {
case "time" => println(e.time)
case "annotation" => println(e.annotation)
case "narrative" => println(e.narrative)
case "example" => println(e)
}
}
}
/**
* Retrieves a batch of examples from this database
*
* @param startTime start from example with time = startTime
* @param howMany pack howMany examples in one batch
* @return the batch as an Example instance and the time stamp of the last
* example in the bacth (used as start time for the next batch)
*/
def getBatch1(startTime: Int, howMany: Int, usingWeakExmpls: Boolean = false): (Example, Int) = {
//def batchQuery = this.collection.find("time" $gte startTime).limit(howMany)
val _batch = this.collection.find("time" $gte startTime).sort(MongoDBObject("time" -> 1)).limit(howMany)
val batch = _batch.toList
val endTime = batch.last.asInstanceOf[BasicDBObject].get("time").toString
(Example(batch.toList, usingWeakExmpls), endTime.toInt)
}
def getBatch(startTime: Int, howMany: Int, usingWeakExmpls: Boolean = true): (ExampleBatch, Int) = {
val _batch = this.collection.find("time" $gte startTime).sort(MongoDBObject("time" -> 1)).limit(howMany)
val x = _batch.toList
val batch = if (x.isEmpty) {
// We're out of data, so fetch more data from the beginning of the db
val b = this.collection.find("time" $gte 0).sort(MongoDBObject("time" -> 1)).limit(howMany)
b.toList
} else { x }
//val batch = _batch.toList
val endTime = if (batch.nonEmpty) batch.last.asInstanceOf[BasicDBObject].get("time").toString.toInt else startTime
(ExampleBatch(batch.toList, usingWeakExmpls), endTime.toInt)
}
/* startTime and endTime are respectively starting and ending points for an interval of
* the form (s,e). This method partitions the input interval into batches of a specified
* batch size (indicated by the howMany parameter) and returns a list of these batches.*/
def getBatches(startTime: Int, endTime: Int, step: Int, howMany: Int, usingWeakExmpls: Boolean = true): List[ExampleBatch] = {
val batches = new ListBuffer[ExampleBatch]
var start = startTime
while ((start <= endTime - step) && (start < this.endTime)) {
//println(s"start: $start")
//println(s"end: $endTime")
val _batch =
this.collection.find("time" $gte start - 2 * step $lte endTime + 2 * step).sort(MongoDBObject("time" -> 1)).limit(howMany)
val batch = _batch.toList
batches += ExampleBatch(batch.toList, usingWeakExmpls)
start = batch.last.asInstanceOf[BasicDBObject].get("time").toString.toInt
}
batches.toList
}
}
/* Stuff for CAVIAR experiments */
object CaviarUtils {
def main(args: Array[String]): Unit = {
val fromDB = args(0)
val db = new Database(fromDB)
println(findIntervals(db, "meeting"))
}
def findIntervals(DB: Database, hle: String) = {
/*
def getTimes = {
var times = List[Int]()
for (x <- DB.collection.find()) {
val e = Example(x)
times = times :+ e.time.toInt
}
times
}
val getFirst = DB.collection.findOne()
*/
def samePair(x: (String, String), y: (String, String)) = y._1 == x._2 && y._2 == x._1
var pairs = List[(String, String)]()
def idPair(e: Example, hle: String) = {}
var previous = Example()
for (x <- DB.collection.find().sort(MongoDBObject("time" -> 1))) {
val e = Example(x)
// set previous to e before exiting the iteration
}
}
/**
*
* This method generates the database with examples as pairs. The schema of the DB is:
* exampleId: an integer id of the example
* time: the time stamp of the example
* inertia: contains the example WITH inertia annotation
* noInertia: contains the example WITHOUT inertia annotation
*
* inertia and noInertia fields carry an DBObject with the regular example schema (time, narrativem annotation)
*
* This used the CAVIAR_Real_FixedBorders DB and in general it reads from a DB where examples are stored
* separately, each as a different entry.
*/
def createPairsDB: Unit = {
def mergeExmplesWithInnertia(e1: Example, e2: Example) = {
val annotation = e1.annotation ++ e2.annotation
val narrative = e1.narrative ++ e2.narrative
val time = e1.time
new Example(annot = annotation, nar = narrative, _time = time)
}
def mergeExamplesNoInertia(e1: Example, e2: Example) = {
// see the comments in mergeExample function from ILEDNoiseTollerant to see what keepAtom does
val keepAtom = (atom: String, annotation: List[String]) => {
val fluent = Literal.parse(atom).terms.head.tostring // the fluent is the first term
annotation forall (x => !x.contains(fluent))
}
val time = e1.time
val narrative = e1.narrative ++ e2.narrative
val annotation = e1.annotation.filter(x => keepAtom(x, e2.annotation)) ++ e2.annotation
new Example(annot = annotation, nar = narrative, _time = time)
}
def examplesToDBObject(inertia: Example, noInertia: Example, id: String) = {
val first = MongoDBObject("time" -> inertia.time) ++ ("annotation" -> inertia.annotation) ++ ("narrative" -> inertia.narrative)
val second = MongoDBObject("time" -> noInertia.time) ++ ("annotation" -> noInertia.annotation) ++ ("narrative" -> noInertia.narrative)
MongoDBObject("exampleId" -> id.toInt, "time" -> inertia.time.toInt, "noInertia" -> second, "inertia" -> first)
}
val DB = new Database("CAVIAR_Real_FixedBorders", "examples")
val dataIterator = DB.collection.find().sort(MongoDBObject("time" -> 1))
val data = new ListBuffer[Example]
while (dataIterator.hasNext) {
val x = Example(dataIterator.next())
data += x
}
val mongoClient = MongoClient()
mongoClient.dropDatabase("CAVIAR_Real_FixedBorders_AsPairs") // clear in any case
val collection = mongoClient("CAVIAR_Real_FixedBorders_AsPairs")("examples")
data.toList.sliding(2).foldLeft(1){ (z, x) =>
val withInertia = mergeExmplesWithInnertia(x.head, x.tail.head)
val withoutInertia = mergeExamplesNoInertia(x.head, x.tail.head)
val entry = examplesToDBObject(withInertia, withoutInertia, z.toString)
collection.insert(entry)
println(z)
z + 1
}
}
//val dataIterator = DB.collection.find().sort(MongoDBObject("exampleId" -> 1))
def getDataAsChunks(collection: MongoCollection, chunkSize: Int, targetClass: String): Iterator[Example] = {
def mergeExmpl(in: List[Example]) = {
val time = in.head.time
//val id = in.head.id
val merged = in.foldLeft(Example()){ (x, newExmpl) =>
val accum = x
val annotation = accum.annotation ++ newExmpl.annotation.distinct
val narrative = accum.narrative ++ newExmpl.narrative.distinct
new Example(annot = annotation, nar = narrative, _time = time)
}
//new Exmpl(_id = id, exampleWithInertia = merged)
merged
}
val dataIterator = collection.find().sort(MongoDBObject("time" -> 1))
val accum = new ListBuffer[Example]
while (dataIterator.hasNext) {
val newExample = dataIterator.next()
//val e = new Exmpl(newExample)
val e = Example(newExample)
accum += e
}
val chunked = accum.toList.sliding(chunkSize, chunkSize - 1)
chunked map { x => mergeExmpl(x) }
}
/*
If withChunking=true (default behaviour) then the intervals are chunked according to chunkSize.
This is used in order to create the training set. To create the testing set however, intervals
must not be chunked to ensure that inference with inertia works properly and tp/fp/fn are counted
correctly. So for testing set generation withChunking=false.
The withChunking parameter is simply passed to getDataFromInterval method that handles chunking or
the lack thereof.
*/
def getDataFromIntervals(collection: MongoCollection, HLE: String, i: List[Interval], chunkSize: Int, withChunking: Boolean = true): Iterator[Example] = {
val out = i.foldLeft(Iterator[Example]()){ (x, y) =>
val z = getDataFromInterval(collection, HLE, y, chunkSize, withChunking)
x ++ z
} // simply merge iterators without producing them
out
}
def getDataFromInterval(collection: MongoCollection, HLE: String, i: Interval, chunkSize: Int, withChunking: Boolean = true): Iterator[Example] = {
val startTime = i.startPoint
val endTime = i.endPoint
val batch = collection.find("time" $gte startTime $lte endTime).sort(MongoDBObject("time" -> 1))
val examples = batch.map(x => Example(x)) toList
val HLExmpls = examples map { x =>
val a = x.annotation filter (_.contains(HLE))
new Example(annot = a, nar = x.narrative, _time = x.time)
}
val chunked =
if (HLExmpls.nonEmpty) {
if (withChunking) HLExmpls.sliding(chunkSize, chunkSize - 1) else HLExmpls.sliding(HLExmpls.length) // won't be chunked in the else case
} else {
println(s"""${collection.name} returned no results for the query "DB.collection.find("time" gte $startTime lte $endTime).sort(MongoDBObject("time" -> 1))" """)
Iterator.empty
}
/*
* We need no worry about removing prior annotation from the examples, since in any case inertia is not used during learning.
* Even if a pair is passed where in both times there is positive annotation, the first positive example will be covered by
* the initalTime axiom, while the second positive will be covered by abduction (no inertia).
*/
val out =
chunked map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
//new Exmpl(_id = merged.time, exampleWithInertia = merged)
merged
}
out
}
/**
* Uses the hand-crafted rules to perform inference on CAVIAR narrative and
* produce noise-free annotation fot complex events. The noise-free data is
* stored in a DB to use for experiment.
*
* "/home/nkatz/dev/ILED/datasets/Caviar/meetingHandCrafted.lp" path to meeting
* "/home/nkatz/dev/ILED/datasets/Caviar/movingHandCrafted.lp" path to moving
*
*
* Call this method like that:
*
* iled.utils.CaviarUtils.generateCleanData("meeting","/home/nkatz/dev/ILED/datasets/Caviar/meetingHandCrafted.lp")
*
* @param HLE
*
*
*/
def generateCleanData(HLE: String, handCraftedRulesPath: String, entryPath: String = "", fromDB: String = ""): Unit = {
val CaviarDB = "CAVIAR_Real_FixedBorders"
val newDB = s"CAVIAR_${HLE}_CLEAN"
val mongoClient = MongoClient()
mongoClient.dropDatabase(newDB)
val collection = mongoClient(newDB)("examples")
val gl = new Globals(entryPath)
val (handCraftedRules, show) = HLE match {
case "meeting" =>
(handCraftedRulesPath, s"\n#show.\n#show holdsAt($HLE(X,Y),T):holdsAt($HLE(X,Y),T).\n")
case "moving" =>
(handCraftedRulesPath, s"\n#show.\n#show holdsAt($HLE(X,Y),T):holdsAt($HLE(X,Y),T).\n")
}
val file = Utils.getTempFile("generate", ".lp")
val db = new Database(CaviarDB, "examples")
db.collection.find().sort(MongoDBObject("time" -> 1)).foldLeft(List[String]()){ (priorAnnotation, newExmpl) =>
val e = Example(newExmpl)
if (e.time == "766600") {
val stop = "stop"
}
val narrative = e.narrativeASP
val in = narrative ++ priorAnnotation.map(x => x + ".") ++ List(s"time(${e.time.toInt + 40}).")
val content = in.mkString("\n") + gl.INCLUDE_BK(gl.BK_WHOLE_EC) + gl.INCLUDE_BK(handCraftedRules) + show
Utils.writeLine(content, file.getCanonicalPath, "overwrite")
val out = ASP.solve(task = Globals.INFERENCE, aspInputFile = file)
val prior =
if (out.nonEmpty) {
out.head.atoms.map(x => (Literal.parse(x).terms(1).tostring, x)).filter(z => z._1 == e.time).map(_._2)
} else {
Nil
}
val next =
if (out.nonEmpty) {
out.head.atoms.map(x => (Literal.parse(x).terms(1).tostring, x)).filter(z => z._1 == (e.time.toInt + 40).toString).map(_._2)
} else {
Nil
}
val entry = MongoDBObject("time" -> e.time.toInt) ++ ("annotation" -> prior) ++ ("narrative" -> e.narrative)
println(entry)
collection.insert(entry)
next
}
}
def copyCAVIAR = {
//val CaviarDB = new Database("CAVIAR_Real_FixedBorders")
val CaviarDB = new Database("caviar")
val idPattern = "id[0-9]+".r
val originalIds = List("id0", "id4", "id5", "id1", "id2", "id3", "id6", "id7", "id8", "id9")
val sort = (ids: List[String]) => ids.sortBy(z => z.split("id")(1).toInt)
val getFirstLastIndex = (ids: List[String]) => {
val s = sort(ids) //.last.split("id")(1).toInt
val first = s.head.split("id")(1).toInt
val last = s.last.split("id")(1).toInt
(first, last)
}
def replaceAll = (s: String, map: Map[String, String]) => {
val ids = idPattern.findAllIn(s)
val toLit = if (!s.contains(".")) Literal.parse(s) else Literal.parse(s.split("\\.")(0))
//ids.foldLeft(s){ (x,id) => x.replaceFirst(id,map(id)) }
ids.foldLeft(toLit){ (x, id) => x.replace(Constant(id), Constant(map(id))) }.tostring
}
var lastIndex = getFirstLastIndex(originalIds)._2
for (count <- 1 to 9) {
lastIndex += 1
val extraIds = (lastIndex to lastIndex + 9).map(index => s"id$index").toList
val map = (sort(originalIds) zip sort(extraIds)).toMap
val indexes = getFirstLastIndex(extraIds)
lastIndex = indexes._2
val newDB = s"CAVIAR_id${indexes._1}_id${indexes._2}"
val mongoClient = MongoClient()
mongoClient.dropDatabase(newDB)
val collection = mongoClient(newDB)("examples")
CaviarDB.collection.find().sort(MongoDBObject("time" -> 1)).foldLeft(()) { (_, newExmpl) =>
val e = Example(newExmpl)
val narrative = e.narrativeASP map (x => replaceAll(x, map))
val annotation = e.annotation map (x => replaceAll(x, map))
val entry = MongoDBObject("time" -> e.time.toInt) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
println(entry)
collection.insert(entry)
}
}
}
/*
def mergeCaviarCopies(numOfCopies: Int) = {
val mongoClient = MongoClient()
val allCopies =
(mongoClient.databaseNames.filter(x => x.contains("CAVIAR_id")).toList :+ "caviar").take(numOfCopies)
println(allCopies.size)
val DBs = allCopies.map(name => new Database(name))
// get all times to use them later as queries for merging
val times = DBs.head.collection.find().sort(MongoDBObject("time" -> 1)).foldLeft(List[Int]()) { (t, newExmpl) =>
t :+ Example(newExmpl).time.toInt
}
val newDB = s"CAVIAR-MERGED-COPIES-$numOfCopies"
mongoClient.dropDatabase(newDB)
val collection = mongoClient(newDB)("examples")
times.foldLeft(List[Example]()) { (accum, time) =>
val query = MongoDBObject("time" -> time)
val e = Example.mergeExamples(DBs.map(db => db.collection.findOne(query).get).map(z => Example(z)))
val entry = MongoDBObject("time" -> time) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative)
println(entry)
collection.insert(entry)
accum :+ e
}
mongoClient.close()
}
*/
}
/*
class Exmpl(e: DBObject = DBObject(), _id: String = "", exampleWithInertia: Example = Example()) {
val time = if (e != DBObject()) e.asInstanceOf[BasicDBObject].get("time").toString else exampleWithInertia.time
val id = if (e != DBObject()) e.asInstanceOf[BasicDBObject].get("exampleId").toString else _id
val exmplWithInertia = if (e != DBObject()) Example(e.asInstanceOf[BasicDBObject].get("inertia").asInstanceOf[BasicDBObject]) else exampleWithInertia
val exmplNoInertia = if (e != DBObject()) Example(e.asInstanceOf[BasicDBObject].get("noInertia").asInstanceOf[BasicDBObject]) else Example()
}
*/
| 20,833 | 39.691406 | 167 | scala |
OLED | OLED-master/src/main/scala/utils/Utils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import java.io.{BufferedWriter, File, FileWriter}
import java.util.UUID
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.{Example, ExamplePair}
import logic.Exceptions.MyParsingException
import parsers.ASPResultsParser
import scala.annotation.tailrec
import scala.math._
import scala.util.Random
import com.mongodb.casbah.Imports._
import scala.collection.mutable.ListBuffer
import scala.io.Source
import logic._
object Utils extends ASPResultsParser with LazyLogging {
/**
* Transforms input to an ASP program. The program is written in an output file that is passed to the ASP solver.
* the writeTo file is the only non-optional parameter of the method.
*
* @param writeToFile @tparam String path to file where the ASP program is written.
* @param program @tparam List[String] an (optional) set of ground or non-ground rules and/or ground facts.
* @param generateDirectives @tparam List[String] an (optional) list containing declarations for atoms to be be generated during the computation
* of answer sets.
* @example of such input:
*
* List("father(X,Y):person(X):person(Y)","grandfather(X,Y):person(X):person(Y)")
*
* Such a list is transformed into the "generate" part of the program:
*
* {father(X,Y):person(X):person(Y), grandfather(X,Y):person(X):person(Y)}.
* @param generateAtLeast @tparam Int an (optional) lower bound for the number of generated atoms to be included in an answer set.
* @param generateAtMost @tparam Int an (optional) upper bound for the number of generated atoms to be included in an answer set.
* @param minimizeStatements @tparam List[String] an (optional) list of atoms whose instances in an anser set should be minimized.
* @example of such input:
*
* List("father(X,Y)","grandfather(X,Y)"))
*
* Such a list is transformed into a minimize statement:
*
* #minimize{father(X,Y),grandfather(X,Y)}.
* @param maximizeStatements @tparam List[String] similar as above for maximize directives.
* @param constraints @tparam List[List[String]] a set of integrity constraints. Example:
*
* List(List("father(X,Y)","mother(X,Y)"), List("father(X,Y)","not male(X)"))
*
* Such input is transformed to integrity constraints in the ASP program:
*
* :- father(X,Y), mother(X,Y).
* :- father(X,Y), not male(X).
* @param show @tparam List[String] an (optional) list of atoms that are to be displayed. All other atoms in an answer set are hidden.
* A #hide directive is generated is this list is not empty.
* @example of such input:
*
* List("father(X,Y)","mother(X,Y)") or
*
* List("father/2","mother2")
*
* Such input is transformed into
*
*
* #hide.
* #show father(X,Y).
* #show mother(X,Y)
* @param extra @tparam List[String] any extra knowledge, that is simply printed in the ASP input file
*/
def toASPprogram(
program: List[String] = Nil,
generateDirectives: List[String] = Nil,
generateAtLeast: Int = 1000000000,
generateAtMost: Int = 1000000000,
minimizeStatements: List[String] = Nil,
maximizeStatements: List[String] = Nil,
constraints: List[List[String]] = Nil,
show: List[String] = Nil,
extra: List[String] = Nil,
writeToFile: String) = {
Utils.clearFile(writeToFile) // clear here, append everywhere else.
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => program foreach (p.println))
val genStatems = (generateDirectives, generateAtLeast, generateAtMost) match {
case x @ (Nil, _, _) => List()
//case x @ (head :: tail, 1000000000,1000000000) => println(x); x._1.map( y => "{" + y + "}.\n")
case x @ (head :: tail, 1000000000, 1000000000) => for (e <- x._1) yield "{" + e + "}."
case x @ (head :: tail, lower, 1000000000) => (head :: tail).map(y => s"$lower {" + y + "}.\n")
case x @ (head :: tail, 1000000000, upper) => (head :: tail).map(y => "0 {" + y + s"} $upper.\n")
case x @ (head :: tail, lower, upper) => (head :: tail).map(y => s"$lower {" + y + s"} $upper.\n")
}
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => genStatems foreach (p.println))
val minStatement = minimizeStatements match { // This is a single string
case Nil => ""
case _ => "#minimize{ " + minimizeStatements.mkString(",") + "}.\n"
}
val maxStatement = maximizeStatements match { // This is a single string
case Nil => ""
case _ => "#maximize{ " + maximizeStatements.mkString(",") + "}.\n"
}
val constrs = constraints match { // This is a list of strings
case Nil => List("")
case _ => for (x <- constraints) yield ":- " + x.mkString(",") + ".\n"
}
Utils.writeLine(minStatement, writeToFile, "append")
Utils.writeLine(maxStatement, writeToFile, "append")
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => constrs foreach (p.println))
val (hideDir, showDirs) = show match {
case Nil => ("", List(""))
case _ => ("", (for (x <- show) yield "#show " + x + "."))
}
Utils.writeLine(hideDir, writeToFile, "append")
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => extra foreach (p.println))
Utils.writeToFile(new java.io.File(writeToFile), "append")(p => showDirs foreach (p.println))
}
def isSubset(x: Set[Any], y: Set[Any]): Boolean = x subsetOf y
/**
*
* ***************
* File IO Utils
* ***************
*
*/
def clearFile(file: String): Unit = {
val writer = new java.io.PrintWriter(new FileWriter(new java.io.File(file), false))
writer.write("")
writer.close()
}
def createOrClearFile(path: String): String = {
val myFile = new java.io.File(path)
if (!myFile.exists()) {
myFile.createNewFile()
} else {
clearFile(path)
}
path
}
def writeToFile(f: java.io.File, howTowrite: String)(op: java.io.PrintWriter => Unit) {
// Write an iterable to file. Usage:
//writeToFile(new File("example.txt")) { p => data.foreach(p.println) }
val p = howTowrite match {
case "append" => new java.io.PrintWriter(new FileWriter(f, true))
case "overwrite" => new java.io.PrintWriter(new FileWriter(f, false))
case _ => new java.io.PrintWriter(new FileWriter(f, false)) // default is overwrite
}
try { op(p) } finally { p.close() }
}
def readFiletoList(file: String): List[String] = {
Source.fromFile(file).getLines.toList
}
def readFileToString(file: String) = {
Source.fromFile(file).mkString.replaceAll("\\s", "")
}
def getTempFile(prefix: String, suffix: String,
directory: String = "",
deleteOnExit: Boolean = true): File = {
var file: java.io.File = new java.io.File("")
directory match {
case "" => file = java.io.File.createTempFile(s"$prefix-${System.currentTimeMillis()}-${UUID.randomUUID.toString}", suffix)
case _ => file = java.io.File.createTempFile(s"$prefix-${System.currentTimeMillis()}-${UUID.randomUUID.toString}", suffix, new java.io.File(directory))
}
if (deleteOnExit) file.deleteOnExit()
file
}
def writeLine(in: String, file: String, append: String): Unit = {
val w = append match {
case "append" => new BufferedWriter(new FileWriter(file, true))
case "overwrite" => new BufferedWriter(new FileWriter(file, false))
case _ => throw new RuntimeException("Specify append or overwrite")
}
w.write(in)
w.close()
}
def deleteRecursively(file: File): Unit = {
if (file.isDirectory)
file.listFiles.foreach(deleteRecursively)
if (file.exists && !file.delete)
throw new Exception(s"Unable to delete ${file.getAbsolutePath}")
}
def getInnerDirs(dir: String): List[File] = {
val d = new File(dir)
if (d.exists && d.isDirectory) {
d.listFiles.filter(_.isDirectory).toList
} else {
List[File]()
}
}
def getInnerFiles(dir: String): List[File] = {
val d = new File(dir)
if (d.exists && d.isDirectory) {
d.listFiles.filter(_.isFile).toList
} else {
List[File]()
}
}
/*
* -------------------------------------------------------------------------------------------
* Utilities for connecting to mongo and getting examples. Fixes for locking below.
* -------------------------------------------------------------------------------------------
*
* Fix mongo connectivity issues:
*
* -- Manually remove the lockfile: sudo rm /var/lib/mongodb/mongod.lock
*
* -- Run the repair script: sudo -u mongodb mongod -f /etc/mongodb.conf --repair
*
* -- Start your MongoDB server with sudo start mongodb and verify it is running with sudo
*
* status mongodb and by trying to connect to it with mongo test.
*
* --------------------------------------------------------------------------------------------
*/
def getAllDBs(identifier: String = ""): List[String] = {
//Returns the names of all existing DBs. Identifier (if passed) is used to filter the names.
val mongoClient = MongoClient()
val all = mongoClient.databaseNames.filter(x => x.contains("CAVIAR-")).toList.sortBy { x => x.split("-")(1).toInt }
all.foreach(x => mongoClient(x)("examples"))
mongoClient.close()
all
}
def getAllExamples(db: String, collection: String,
alternativeAspFile: String = ""): Map[String, List[String]] = {
// Get all examples from a DB and write them to ASP input files.
// Returns true if the current DB has supervision, else false
val mongoClient = MongoClient()
val col = mongoClient(db)(collection)
//val col = MongoClient()(db)(collection)
val out = examplestoASP("all", "all", col, alternativeAspFile)
mongoClient.close()
out
}
/*
* Get one example
*/
def getOneExample(field: String, fieldValue: Any, collection: MongoCollection): Any = {
examplestoASP(field, fieldValue, collection)
}
/**
* Returns a Structures.ExamplePair object from a pair of two consecutive examples.
* Here field is time, fieldValueStart and fieldValueEnd are integers.
*/
def getExamplePairs(field: String, fieldValueStart: Int,
fieldValueEnd: Int, collection: MongoCollection): Option[ExamplePair] = {
for {
e1 <- collection.findOne(MongoDBObject(field -> fieldValueStart))
e2 <- collection.findOne(MongoDBObject(field -> fieldValueEnd))
} yield ExamplePair(Example(e1), Example(e2))
/*
* The above does the same at this:
*
* collection.findOne(MongoDBObject(field -> fieldValueStart)).flatMap {
* e1 => collection.findOne(MongoDBObject(field -> fieldValueEnd)) .map { e2 => ExamplePair(Example(e1), Example(e2)) }}
*
* The first part:
*
* collection.findOne(MongoDBObject(field -> fieldValueStart))
*
* executes the query field -> fieldValueStart and returns an Option[DBObject]
*
* To that, we apply the faltMap function (to the )
*/
}
def examplestoASP(
field: String,
fieldValue: Any,
collection: MongoCollection,
alternativeAspFile: String = ""): Map[String, List[String]] = {
var annotation = new ListBuffer[String]()
var narrative = new ListBuffer[String]()
field match {
case "all" =>
for (x <- collection.find().sort(MongoDBObject("time" -> 1))) {
annotation = annotation ++ x.asInstanceOf[BasicDBObject].get("annotation").asInstanceOf[BasicDBList].toList.map(x => s"example($x).")
narrative = narrative ++ x.asInstanceOf[BasicDBObject].get("narrative").asInstanceOf[BasicDBList].toList.map(x => s"$x.")
}
case "\\s" => throw new RuntimeException("Which example do you want?")
case _ => fieldValue match {
case "\\s" => throw new RuntimeException("Which example do you want?")
case "all" => throw new RuntimeException("Excecution should not have reached this code")
case _ =>
val query = MongoDBObject(field -> fieldValue)
try {
val target = collection.findOne(query).get
annotation = annotation ++ target.asInstanceOf[BasicDBObject].get("annotation").asInstanceOf[BasicDBList].toList.map(x => s"example($x).");
narrative = narrative ++ target.asInstanceOf[BasicDBObject].get("narrative").asInstanceOf[BasicDBList].toList.map(x => s"$x.");
//write((annotation, narrative))
} catch {
case e: NoSuchElementException =>
println(s"The example with \'field -> value\' : \'$field -> $fieldValue\' does not exist")
//System.exit(-1)
}
}
}
//return if (annotation.length > 0) true else false
return Map("annotation" -> annotation.toList, "narrative" -> narrative.toList)
}
def computeDistancesMany(supervision: List[List[String]], collection: MongoCollection): Unit = {
collection.find().sort(MongoDBObject("time" -> 1)).foreach(row =>
try {
for (x <- supervision) print(Hausdorff.exmplHausdrfDist(x, f(row)) + " ")
println("\n")
} catch {
case e: MyParsingException => f(row).foreach(println); println(e); throw new RuntimeException
})
}
def computeDistances(realExmpl: List[String], collection: MongoCollection): Unit = {
collection.find().sort(MongoDBObject("time" -> 1)).foreach(row =>
try {
println(Hausdorff.exmplHausdrfDist(realExmpl, f(row)))
} catch {
case e: MyParsingException => f(row).foreach(println); println(e); throw new RuntimeException
})
}
/**
* Helper method
*/
def f(row: Any): List[String] = {
val x = row.asInstanceOf[BasicDBObject].get("narrative").asInstanceOf[BasicDBList].toList
val y = row.asInstanceOf[BasicDBObject].get("annotation").asInstanceOf[BasicDBList].toList
val example = x ++ y map { x => x.asInstanceOf[String] }
example
}
/**
* Get an example as an interpretation in a list
*/
def getExample(field: String, fieldValue: Any, collection: MongoCollection): List[String] = {
val query = MongoDBObject(field -> fieldValue)
val target = collection.findOne(query) match {
case Some(x) => x
case _ => List()
}
val narrative = target.asInstanceOf[BasicDBObject].get("narrative").asInstanceOf[BasicDBList].toList
val annotation = target.asInstanceOf[BasicDBObject].get("annotation").asInstanceOf[BasicDBList].toList
val oneExample = narrative ++ annotation map { x => x.asInstanceOf[String] }
oneExample
}
/* Get a simple string as result, from the field of interest */
def getStringByField(field: String, fieldValue: Any, collection: MongoCollection): Any = {
val query = MongoDBObject(field -> fieldValue)
val target = collection.findOne(query)
val result = target match {
case Some(x) => target.get(field)
case _ => None
}
result
}
/* Returns a query whose result is an array */
def getArrayByField(field: String, fieldValue: Any, collection: MongoCollection): Any = {
val query = MongoDBObject(field -> fieldValue)
val target = collection.findOne(query).get
//val result = target match {
// case Some(x) => target.getAs[MongoDBList](field).get
//}
//result
}
/* Returns the first entry. Works for data stored as arrays*/
def getOneArray(collection: MongoCollection): List[String] = {
val target = collection.findOne().asInstanceOf[BasicDBList]
val result = Some(List[String]() ++ target map { x => x.asInstanceOf[String] }) match {
case Some(z) => z
case _ => List[String]()
}
result
}
/**
* Time a function. Usage: simply wrap around a block of code. E.g:
*
* val hd = time { exmplHausdrfDist(labled.asVarbedInterpretation, z._2.toStrList) }
*
*/
def _time[R](block: => R): R = {
val t0 = System.nanoTime()
val result = block // call-by-name
val t1 = System.nanoTime()
println("Time: " + ((t1 - t0) / 1000000000.0) + " sec")
result
}
def time[R](codeBlock: => R): (R, Double) = {
val t0 = System.nanoTime()
val result = codeBlock // call-by-name
val t1 = System.nanoTime()
val totalTime = (t1 - t0) / 1000000000.0
(result, totalTime)
}
def mywhile(cond: => Boolean, block: => Unit): Unit =
if (cond) {
block
mywhile(cond, block)
}
def lined(msg: String) = s"\n$msg\n${"-" * msg.length}"
def mean(s: List[Double]) = s.foldLeft(0.0)(_ + _) / s.size
def deviation(s: List[Double], mean: Double) = {
val diffs = s map (x => math.abs(x - mean))
this.mean(diffs)
}
def combinations(n: Int, k: Int) = {
if (n >= k) factorial(n) / (factorial(k) * factorial(n - k)) else BigInt(0)
}
def factorial(x: BigInt): BigInt = {
@tailrec
def f(x: BigInt, acc: BigInt): BigInt = {
if (x == 0) acc else f(x - 1, x * acc)
}
f(x, 1)
}
def sampleN(N: Int, sampleFrom: List[Any]) = {
@tailrec
def sampleN(N: Int, sampleFrom: List[Any], sample: List[Any]): List[Any] = {
sample.length match {
case N => sample
case _ =>
val newValue = Random.shuffle(sampleFrom).head
val newSample = if (!sample.contains(newValue)) sample :+ newValue else sample
sampleN(N, sampleFrom, newSample)
}
}
sampleN(N, sampleFrom, List())
}
def checkIfCompressed(theory: PriorTheory) = {
val compressed = LogicUtils.compressTheory(theory.merge.clauses)
if (compressed.length != theory.merge.clauses.length) {
logger.error("You're packing the same rules")
val sames = for (x <- theory.merge.clauses) yield (x, theory.merge.clauses.find(y => y != x && x.thetaSubsumes(y) && y.thetaSubsumes(x)))
val p = sames map {
x =>
x._2 match {
case Some(y) => s"Same pair: ${x._1.tostring} from weak: ${x._1.fromWeakExample}\n${y.tostring} from weak: ${y.fromWeakExample}"
case None => None
}
}
logger.error(s"same rules: $p")
}
}
/* Returns the maximum Hausdorff distance of this clause from a list of clauses */
def similarity(c: Clause, x: List[Clause]) = {
val dists = x.filter(_.body.nonEmpty).foldLeft(List[Double]()){ (accum, newClause) =>
//val sim = Hausdorff.litlistFromlitlist(c.literals,newClause.literals)
val sim = Hausdorff.litlistFromlitlist(c.body, newClause.body)
accum :+ sim
}
//println(dists)
if (dists.nonEmpty) dists.min else -100.0
}
def hoeffding(delta: Double, n: Int, range: Double = 1.0) = {
sqrt(scala.math.pow(range, 2) * scala.math.log(1.0 / delta) / (2 * n))
// For the following, check p.3 of
// Rutkowski, Leszek, et al. "Decision trees for mining data streams based on the McDiarmid's bound."
// IEEE Transactions on Knowledge and Data Engineering 25.6 (2013): 1272-1279.
// (this is McDiarmid’s inequality)
//----------------------------------------------------------
// 6*(2*Math.log(Math.E*2) + Math.log(2*2)) + 2*Math.log(2)
//----------------------------------------------------------
}
}
| 19,953 | 37.007619 | 157 | scala |
OLED | OLED-master/src/main/scala/utils/lookaheads/LookAheadImplementations.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.lookaheads
import logic.{Clause, Literal}
import utils.lookaheads.LookAheadUtils._
/**
* Created by nkatz on 11/30/16.
*/
object LookAheadImplementations {
def lookAhead_<-(litToAdd: Literal, currentClause: Clause, bottomClause: Clause, spec: LookAheadSpecification) = {
val lastLitThatMatches = getLastLiteralThatMatches(litToAdd, currentClause, bottomClause, spec)
val atoms = spec.actualLinkingGroups.flatMap(group => getLinkingAtoms(litToAdd, lastLitThatMatches, group, bottomClause))
atoms
}
private def getLastLiteralThatMatches(litToAdd: Literal, currentClause: Clause, bottomClause: Clause, spec: LookAheadSpecification) = {
/*
if (matches(spec.litToLinkTo,litToAdd)) {
currentClause.head.asLiteral
} else {
currentClause.toLiteralList.reverse.find(p => matches(p, spec.litToLinkTo) ).getOrElse {
throw new RuntimeException(s"Could not find any literal in\n ${currentClause.tostring}\nthat " +
s"matches the schema of\n${spec.litToLinkTo.tostring}. The lookahead schema is:\n${spec.lookAheadSpecification}")
}
}
*/
currentClause.toLiteralList.reverse.
find(p => matches(p, spec.litToLinkTo)).
getOrElse {
throw new RuntimeException(s"Could not find any literal in\n ${currentClause.tostring}\nthat " +
s"matches the schema of\n${spec.litToLinkTo.tostring}. The lookahead schema is:\n${spec.lookAheadSpecification}")
}
}
private def getLinkingAtoms(lit1: Literal, lit2: Literal, l: LinkingPredicatesGroup, bottomClause: Clause) = {
val map = Map(1 -> lit1, 2 -> lit2)
val linkingAtoms = l.predicates.map { x =>
val linkingAtom = x.literal
val actualVars = x.vs.map(z => map(z.appearsInLiteral).getVars(z.positionInLiteral))
val dummyVars = linkingAtom.getVars.toList
val paired = if (dummyVars.length == actualVars.length) dummyVars zip actualVars else throw new RuntimeException(s"dummy and actual vars cannot" +
s" be paired for literal $linkingAtom. Dummy vars are ${dummyVars.map(_.tostring).mkString(" ")} and actual vars are ${actualVars.map(_.tostring).mkString(" ")}")
linkingAtom.replaceAll(paired.toMap)
}
val actualLinkingAtoms = linkingAtoms.filter(x => bottomClause.toStrList.contains(x.tostring))
// For the fraud application exactly one of the actualLinkingAtoms must be found in the bottom clause
// because the atoms in each linking group are contradicting. I think that this is the generic way this
// should work. In any case, that's how it's currently implemented. There is an exception however, in the
// case where we are adding a literal that refers to the same time as in the head (then we have no before/1
// or after/2.) So I won't throw the exception below
/*
if (actualLinkingAtoms.length != 1) throw new RuntimeException("More than one atoms from the same linking group are found in the bottom clause." +
s"The bottom clause is\n${bottomClause.tostring}. Linking atoms found: ${actualLinkingAtoms.map(_.tostring).mkString(" ")}")
*/
actualLinkingAtoms
}
def hasLookAhead(lit: Literal, lookaheadSpecs: List[LookAheadSpecification]) = {
lookaheadSpecs.exists(p => p.litToBeAdded.predSymbol == lit.predSymbol && p.litToBeAdded.arity == lit.arity)
}
def matches(x: Literal, y: Literal) = {
x.predSymbol == y.predSymbol && x.arity == y.arity
}
def selectLookaheadSpec(litToAdd: Literal, c: Clause, bottomClause: Clause, lookaheadSpecs: List[LookAheadSpecification]): LookAheadSpecification = {
val (headLookaheadSpec, bodyLookaheadSpecs) = lookaheadSpecs.foldLeft(List[LookAheadSpecification](), List[LookAheadSpecification]()) { (x, y) =>
val headLink = x._1
val bodyLink = x._2
if (matches(bottomClause.head.asLiteral, y.litToLinkTo)) {
(headLink :+ y, bodyLink)
} else {
if (bottomClause.body.exists(p => matches(p, y.litToLinkTo))) {
(headLink, bodyLink :+ y)
} else {
throw new RuntimeException(s"Lookaheds Error: There is no literal in\n${c.tostring} matching the specified linking atom ${y.litToLinkTo.tostring}")
}
}
}
// just a sanity check
if (headLookaheadSpec.isEmpty) {
throw new RuntimeException("No lookahead spec linking to the head. Add one")
}
if (headLookaheadSpec.length > 1) throw new RuntimeException("Something's wrong, I'm getting multiple head-linking lookaheads")
bodyLookaheadSpecs.find(p => matches(p.litToLinkTo, litToAdd) && c.body.exists(z => matches(z, p.litToLinkTo))).getOrElse(headLookaheadSpec.head)
}
}
| 5,337 | 45.417391 | 170 | scala |
OLED | OLED-master/src/main/scala/utils/lookaheads/LookAheadUtils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.lookaheads
import logic.Exceptions.MyParsingException
import logic.Literal
import utils.parsers.ClausalLogicParser
import scala.io.Source
/**
* Created by nkatz on 11/30/16.
*/
object LookAheadUtils {
/*
*
* A linking variable is a variable found in a linking predicate (e.g. in a comparison predicate) in a lookahead schema
* appearsInLiteral is the number of the literal in which it appears (first or second literal, where the first is the literal
* that is about to be added to the clause we're currently learning and the second one in the literal that already exists
* in the clause and we're linking the new one to it.). So appearsInLiteral is 1 or 2. Position in literal is the index of the
* current linking variable in the variables list of the appearsInLiteral literal.
*
* */
class LinkingVariable(val variable: logic.Variable, val appearsInLiteral: Int, val positionInLiteral: Int)
class LinkingPredicate(val literal: Literal, val vs: List[LinkingVariable])
class LinkingPredicatesGroup(val predicates: List[LinkingPredicate])
class LookAheadSpecification(val lookAheadSpecification: String) extends LookAheadsParser {
/*
* An example pf a lookahead spcification is
* lookahead( transaction(Whatever1,A1,Whatever2,T1) <-> transaction(Whatever3,A2,Whatever4,T2) : {before(T1,T2) | after(T1,T2)}, {greaterThan(A1,A2) | lessThan(A1,A2)} )
*/
private val p = parse(lookAheadSpecification)
private val linkedAtoms = p._1
private val linkingPredGroups = p._2
val litToBeAdded = linkedAtoms._1
val litToLinkTo = linkedAtoms._2
private def isVariableUncommon(v: logic.Variable) = {
!(litToBeAdded.getVars.contains(v) && litToLinkTo.getVars.contains(v))
}
private def variableExists(v: logic.Variable) = {
litToBeAdded.getVars.contains(v) || litToLinkTo.getVars.contains(v)
}
/*
* l is a literal from some LinkingPredicatesGroup. This method returns a tuple
* with l in the first coordinate and a list of the variables that appear in l
* in the form of LinkingVariable objects
*/
private def getLinkingVars(l: Literal) = {
val t = l.getVars.toList.map { v =>
require(isVariableUncommon(v), s"Variable with name ${v.name} is common both in litToBeAdded and litToLinkTo")
require(variableExists(v), s"Variable with name ${v.name} does not appear in neither one of litToBeAdded or litToLinkTo")
val appearsInLiteral = if (litToBeAdded.getVars.contains(v)) 1 else 2
val litItAppearsIn = if (litToBeAdded.getVars.contains(v)) litToBeAdded else litToLinkTo
val positionInLiteral = litItAppearsIn.getVars.indexOf(v)
new LinkingVariable(v, appearsInLiteral, positionInLiteral)
}
(l, t)
}
// That's the only thing that matters from this class. Everything that happens in this class aims at getting this list
val actualLinkingGroups =
linkingPredGroups.map(group => group.map(literal => getLinkingVars(literal))).
map(z => z.map(p => new LinkingPredicate(p._1, p._2))).map(a => new LinkingPredicatesGroup(a))
}
class LookAheadsParser extends ClausalLogicParser {
def linkPredsGroup: Parser[List[Literal]] = "{" ~> repsep(literal, "|") <~ "}"
def linkPredsGroups: Parser[List[List[Literal]]] = repsep(linkPredsGroup, ",")
def linkedLiterals: Parser[(Literal, Literal)] = literal ~ "<->" ~ literal ^^ { case x ~ "<->" ~ y => (x, y) }
def specificationParser: Parser[((Literal, Literal), List[List[Literal]])] =
"lookahead" ~ "(" ~ linkedLiterals ~ ":" ~ linkPredsGroups ~ ")" ^^ { case "lookahead" ~ "(" ~ x ~ ":" ~ y ~ ")" => (x, y) }
private def _parse(expression: String): Option[((Literal, Literal), List[List[Literal]])] = {
parseAll(specificationParser, expression) match {
case Success(result, _) => Some(result)
case f => None
}
}
private def getParseResult(x: Option[((Literal, Literal), List[List[Literal]])]): ((Literal, Literal), List[List[Literal]]) = x match {
case Some(y) => y
case _ => throw new MyParsingException(x.toString)
}
def parse(expression: String): ((Literal, Literal), List[List[Literal]]) = {
getParseResult(_parse(expression))
}
}
object Run extends App {
val LOOK_AHEADS_TEST = {
val f = Source.fromFile("/home/nkatz/dev/ILED/datasets/Fraud/modes").getLines.toList.filter(line => line.startsWith("lookahead"))
if (f.nonEmpty) f.map(x => new LookAheadSpecification(x)) else Nil
}
val stop = "stop"
val p = new LookAheadsParser
p.parse("lookahead( transaction(Whatever,A1,Whatever,T1) <-> transaction(Whatever,A2,Whatever,T2) : {before(T1,T2) | after(T1,T2)}, {greaterThan(A1,A2) | lessThan(A1,A2)} )")
}
}
| 5,490 | 43.642276 | 178 | scala |
OLED | OLED-master/src/main/scala/utils/parsers/ASPResultsParser.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.parsers
class ASPResultsParser extends ClausalLogicParser {
def aspResult: Parser[List[String]] = repsep(literal, "") ^^ { case x => for (y <- x) yield y.tostring }
def parseASP(parser: Parser[Any], expression: String): Option[Any] = {
parseAll(parser, expression) match {
case Success(result, _) => Some(result)
case Failure(msg, _) =>
println("FAILURE: " + msg); None
case Error(msg, _) => println("ERROR: " + msg); None
}
}
def parsed(x: Option[Any]): Boolean = x match {
case Some(y) => true
case _ => false
}
def getResult(x: Option[Any]): Any = x match {
case Some(y) => y
case _ => false
}
}
| 1,379 | 30.363636 | 106 | scala |
OLED | OLED-master/src/main/scala/utils/parsers/ClausalLogicParser.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.parsers
import logic.Exceptions._
import logic._
import scala.util.parsing.combinator.JavaTokenParsers
object Test1 extends App {
val p = new ClausalLogicParser
val x = p.parse(p.literal, "initiatedAt(meeting(a(p,test(c,X,59,e(xmymz))),X,45),T)").getOrElse(throw new RuntimeException)
println(x.tostring)
}
class ClausalLogicParser extends JavaTokenParsers {
def lowerCaseIdent: Parser[String] = """[a-z][a-zA-Z0-9_]*""".r
def upperCaseIdent: Parser[String] = """[A-Z][a-zA-Z0-9_]*""".r
def anyWord: Parser[String] = """[A-Za-z0-9_,()+-\\#]*""".r ^^ { x => x }
def anyWord1: Parser[String] = """[A-Za-z0-9_()+-\\#]*""".r ^^ { x => x } // no ','
def quoted: Parser[String] = "\"" ~ anyWord ~ "\"" ^^ { case "\"" ~ x ~ "\"" => "\"" + x + "\"" } | "\'" ~ anyWord ~ "\'" ^^ { case "\'" ~ x ~ "\'" => "\'" + x + "\'" }
def naf: Parser[String] = "not " ~ rep("\\s+") ^^ { _ => "not" }
def iff: Parser[String] = rep("\\s+") ~ ":-" ~ rep("\\s+") ^^ { _ => ":-" }
def number: Parser[String] = floatingPointNumber
def quotedNumber: Parser[String] = "\"" ~ floatingPointNumber ~ "\"" ^^ { case "\"" ~ x ~ "\"" => "\"" + x + "\"" }
def variable: Parser[Expression] = upperCaseIdent ^^ { x => Variable(x) }
def constant: Parser[Expression] = (lowerCaseIdent | quoted) ^^ { x => Constant(x) } | (number | quotedNumber) ^^ { x => Constant(x) }
def term: Parser[Expression] = literal | variable | constant
def innerTerms: Parser[List[Expression]] = "(" ~> repsep(term, ",") <~ ")"
def literal: Parser[Literal] = (
naf ~ lowerCaseIdent ~ innerTerms ^^ { case naf ~ functor ~ inner => Literal(predSymbol = functor, terms = inner, isNAF = true) }
| lowerCaseIdent ~ innerTerms ^^ { case functor ~ inner => Literal(predSymbol = functor, terms = inner) })
def atom: Parser[PosLiteral] = lowerCaseIdent ~ innerTerms ^^ { case functor ~ inner => PosLiteral(functor = functor, terms = inner) }
def clauseHead: Parser[PosLiteral] = atom
def clauseBody: Parser[List[Literal]] = repsep(literal, ",")
def clause: Parser[Clause] = clauseHead ~ iff ~ clauseBody ^^ { case head ~ iff ~ body => Clause(head, body) }
def parseOutput(parser: Parser[Expression], expression: String): Expression = {
getParseResult(parse(parser, expression))
}
def parse(parser: Parser[Expression], expression: String): Option[Expression] = {
parseAll(parser, expression) match {
case Success(result, _) => Some(result)
case f => None
}
}
def getParseResult(x: Option[Expression]): Expression = x match {
case Some(y) => y
case _ => throw new MyParsingException(x.toString)
}
}
| 3,338 | 46.028169 | 170 | scala |
OLED | OLED-master/src/main/scala/utils/parsers/ModesParser.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.parsers
import com.typesafe.scalalogging.LazyLogging
import logic.Exceptions._
import logic.Modes._
import logic._
import scala.util.parsing.combinator.JavaTokenParsers
final class ModesParser extends JavaTokenParsers with LazyLogging {
def lowerCaseIdent: Parser[String] = """[a-z][a-zA-Z0-9_]*""".r
def upperCaseIdent: Parser[String] = """[A-Z][a-zA-Z0-9_]*""".r
def num: Parser[String] = """[0-9]*""".r
def innerPositionTerms: Parser[List[String]] = "(" ~> repsep(num, ",") <~ ")"
def naf: Parser[String] = "not " ~ rep("\\s+") ^^ { _ => "not" }
def mh: Parser[String] = "modeh" ^^ { x => x }
def mb: Parser[String] = "modeb" ^^ { x => x }
def ep: Parser[String] = "examplePattern" ^^ { x => x }
def ip: Parser[String] = "inputPredicate" ^^ { x => x }
def cp: Parser[String] = "comparisonPredicate" ^^ { x => x }
def posplmrk: Parser[PlmrkPos] = "+" ~ lowerCaseIdent ^^ { case "+" ~ x => PlmrkPos(x) }
def negplmrk: Parser[PlmrkNeg] = "-" ~ lowerCaseIdent ^^ { case "-" ~ x => PlmrkNeg(x) }
def constplmrk: Parser[PlmrkConst] = "#" ~ lowerCaseIdent ^^ { case "#" ~ x => PlmrkConst(x) }
def placemarker: Parser[Expression] = (posplmrk | negplmrk | constplmrk) ^^ { x => x }
def inner: Parser[List[Expression]] = "(" ~> repsep(modeAtom | placemarker, ",") <~ ")"
//def modeAtom: Parser[ModeAtom] = lowerCaseIdent ~ inner ^^ { case x ~ y => new ModeAtom(x.toString, y) }
def modeAtom: Parser[ModeAtom] =
(naf ~ lowerCaseIdent ~ inner ^^ { case not ~ x ~ y => ModeAtom(functor = x.toString, args = y, isNAF = true) }
| lowerCaseIdent ~ inner ^^ { case x ~ y => ModeAtom(functor = x.toString, args = y) })
def comparisonTermPositionIdentifier: Parser[List[Int]] = "comparison_term_position" ~ innerPositionTerms ^^ {
case "comparison_term_position" ~ innerPositionTerms => innerPositionTerms.map(_.toInt)
}
def modeh: Parser[ModeAtom] = mh ~ "(" ~ modeAtom ~ (")" | ").") ^^ { case mh ~ "(" ~ m ~ (")" | ").") => m }
def modeb: Parser[ModeAtom] = mb ~ "(" ~ modeAtom ~ (")" | ").") ^^ { case mb ~ "(" ~ m ~ (")" | ").") => m }
def mode: Parser[ModeAtom] = modeh | modeb
def exmplPattern: Parser[ModeAtom] = ep ~ "(" ~ modeAtom ~ (")" | ").") ^^ { case ep ~ "(" ~ m ~ (")" | ").") => m }
def inputPred: Parser[ModeAtom] = ip ~ "(" ~ modeAtom ~ (")" | ").") ^^ { case ep ~ "(" ~ m ~ (")" | ").") => m }
def compPred: Parser[ModeAtom] =
cp ~ "(" ~ modeAtom ~ "," ~ ("lessThan" | "greaterThan") ~ "," ~ comparisonTermPositionIdentifier ~ (")" | ").") ^^ {
case cp ~ "(" ~ m ~ "," ~ compTerm ~ "," ~ comparisonTermPositionIdentifier ~ (")" | ").") =>
m.compRelation = compTerm
m.comparisonTermPosition = comparisonTermPositionIdentifier
m
}
def parseModes(parser: Parser[ModeAtom], expression: String): Option[ModeAtom] = {
parseAll(parser, expression) match {
case Success(x, _) => Some(x)
case Failure(msg, _) =>
logger.error("FAILURE: " + msg)
logger.error("while parsing " + expression)
None
case Error(msg, _) => println("ERROR: " + msg); None
//case _ => None
}
}
def getParseResult(x: Option[ModeAtom]): ModeAtom = x match {
case Some(y) => y
case _ => throw new MyParsingException
}
}
| 3,979 | 46.380952 | 121 | scala |
OLED | OLED-master/src/main/scala/utils/parsers/PB2LogicParser.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.parsers
import com.typesafe.scalalogging.LazyLogging
import logic.{Constant, Expression, Literal, Variable}
import org.parboiled2._
import scala.util.{Failure, Success, Try}
object PB2LogicParser extends LazyLogging {
/*
*
* TODO
*
* Fix whitespace!!
* Currently parsing fails even with the slightest
* whitespace in the logical expressions.
*
* */
def parseClause(c: String, debug: Boolean = false): Expression = {
val parser = new PB2LogicParser(c)
val result = parser.Clause.run()
getParserResult(result, parser, debug)
}
def parseAtom(a: String, debug: Boolean = false): Expression = {
val parser = new PB2LogicParser(a)
val result = parser.Atom.run()
getParserResult(result, parser, debug)
}
private def getParserResult(result: Try[Expression], parser: PB2LogicParser, debug: Boolean = false) = {
val out = result match {
case Success(x) =>
if (debug) {
logger.info("\n" + x.tostring)
Some(x)
} else Some(x)
case Failure(e: ParseError) =>
logger.error(parser.formatError(e)); None
case Failure(e: Throwable) => throw e
}
out match {
case Some(x) => x
case _ => throw new RuntimeException
}
}
}
final class PB2LogicParser(val input: ParserInput) extends Parser {
case class ExpressionList(elems: List[Expression])
def Clause = rule {
Atom ~ " :- " ~ BodyLiterals ~ optional(".") ~ EOI ~> ((x, y) =>
logic.Clause(head = x, body = y.elems.map(_.asInstanceOf[Literal])))
}
def Atom = rule {
Funct ~ InnerTerms ~ optional(".") ~> ((x, y) => Literal(predSymbol = x, terms = y.elems)) |
"not " ~ Funct ~ InnerTerms ~ optional(".") ~> ((x, y) => Literal(predSymbol = x, terms = y.elems, isNAF = true))
}
private def Term: Rule1[Expression] = rule { Atom | Const | Var }
private def BodyLiterals = rule { oneOrMore(Atom).separatedBy(",") ~> (x => ExpressionList(x.toList)) }
private def InnerTerms = rule { "(" ~ oneOrMore(Term).separatedBy(",") ~ ")" ~> (x => ExpressionList(x.toList)) }
private def Funct = rule { capture(LowerCaseString) ~> ((x: String) => x) }
private def Var = rule { capture(UpperCaseString) ~> ((x: String) => Variable(x)) }
private def Const = rule {
(capture(LowerCaseString) ~> ((x: String) => Constant(x))) |
(capture(Integer) ~> (x => Constant(x))) |
(capture(MinusInteger) ~> (x => Constant(x))) |
(capture(optional('"') ~ TK_WhatEver ~ optional('"')) ~> (x => Constant(x))) |
(capture(optional('"') ~ LowerCaseString ~ optional('"')) ~> ((x: String) => Constant(x))) |
(capture('"' ~ UpperCaseString ~ '"') ~> ((x: String) => Constant(x)))
}
/*
private def Const = rule {
(capture(LowerCaseString) ~> ((x: String) => Constant(x))) |
(capture(Integer) ~> (x => Constant(x))) |
(capture(MinusInteger) ~> (x => Constant(x))) |
(capture(optional('"') ~ LowerCaseString ~ optional('"')) ~> ((x: String) => Constant(x))) |
(capture('"' ~ UpperCaseString ~ '"') ~> ((x: String) => Constant(x)))
}
*/
private def LowerCaseString = rule { CharPredicate.LowerAlpha ~ zeroOrMore(CharPredicate.AlphaNum | "_") }
private def Integer = rule { oneOrMore(CharPredicate.Digit) }
private def TK_WhatEver = rule { Integer ~ "_" ~ Integer }
// This is needed in use/2 atoms with rule ids, e.g. use(-23421, 0)
private def MinusInteger = rule { "-" ~ oneOrMore(CharPredicate.Digit) }
private def UpperCaseString = rule { CharPredicate.UpperAlpha ~ zeroOrMore(CharPredicate.AlphaNum | "_") }
}
object TestRunner extends App {
val t = PB2LogicParser.parseAtom("fluentGrnd(holdsAt(reFuel(\"7330_124060\"),1498863690))", debug = true)
// - in
PB2LogicParser.parseAtom("use(-34534534,6)", debug = true)
PB2LogicParser.parseAtom("initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E))", debug = true)
// with a final "."
PB2LogicParser.parseAtom("initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E)).", debug = true)
// The Atom parser succeeds in the first match, e.g here it parses the (garbage) expression since it matches the head.
// I'll have to do something with EOI to disallow that
PB2LogicParser.parseAtom("initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E)). :- sdfsdfsfsdfsdf", debug = true)
// negation
PB2LogicParser.parseAtom("not initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E))", debug = true)
// negation with final "."
PB2LogicParser.parseAtom("not initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E)).", debug = true)
// clause
PB2LogicParser.parseClause("initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E)) :- happensAt(walking(X,34,p(a(s,2),Z)),T,Or),close(1,2,3,4,Yt)", debug = true)
// clause with final "."
PB2LogicParser.parseClause("initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E)) :- happensAt(walking(X,34,p(a(s,2),Z)),T,Or),close(1,2,3,4,Yt)", debug = true)
// clause with NAF in the body
PB2LogicParser.parseClause("initiatedAt(meeting(X0,X1,45),1,Z,Petryb,a(p(2,3,z(23,g,f,ert(sdjfskj,Xkjsh))),1),oo(12,23,E)) :- not happensAt(walking(X,34,p(a(s,2),Z)),T,Or),close(1,2,3,4,Yt),not happens(a(X,R,T,z(a,23,4)))", debug = true)
val mlnTest = PB2LogicParser.parseAtom("initiatedAt(meeting(a(k,l,m),b,45),c,a,d)", debug = true)
val a = Literal.toMLNFlat(mlnTest.asInstanceOf[Literal])
println(a.tostring)
// This does not work for variabilized literals (throws an exception).
val mlnTest1 = PB2LogicParser.parseAtom("initiatedAt(c,A,d)", debug = true)
val b = Literal.toMLNFlat(mlnTest1.asInstanceOf[Literal])
println(b.tostring)
}
| 6,620 | 41.716129 | 239 | scala |
OLED | OLED-master/src/main/scala/utils/plotting/Draft.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.plotting
/*import scalatikz.graphics.Compiler.LUALATEX
import scalatikz.graphics.pgf.Figure
import scalatikz.graphics.pgf.enums.Color.{BLACK, BLUE, GREEN, ORANGE, RED}
import scalatikz.graphics.pgf.enums.LegendPos
import scalatikz.graphics.pgf.enums.LegendPos.{NORTH_EAST, NORTH_WEST}
import scalatikz.graphics.pgf.enums.Mark.{ASTERISK, CIRCLE, PLUS, TRIANGLE, X}
import utils.plotting.TPLPExpertsPlots.makeSparse*/
import scalatikz.pgf.enums.Color.{BLACK, BLUE, RED}
import scalatikz.pgf.plots.Figure
import scalatikz.pgf.plots.enums.LegendPos.{NORTH_EAST, NORTH_WEST}
import scalatikz.pgf.Compiler.LUA_LATEX
import scala.io.Source
object Draft extends App {
plot("/home/nkatz/Desktop/PEA-NEW-RESULTS/test", "/home/nkatz/Desktop/PEA-NEW-RESULTS")
//plotTime("/home/nkatz/Desktop/PEA-NEW-RESULTS/time", "/home/nkatz/Desktop/PEA-NEW-RESULTS")
/*Figure("secondary_axis")
.plot(lineColor = RED)((-5 to 5) -> ((x: Double) => 3 * x))
.havingXLabel("$x$")
.havingYLabel("$3x$")
.secondaryAxis { x => x
.scatter(markStrokeColor = BLUE, markFillColor = BLUE)((-5 to 5) -> ((x: Double) => x * x))
.havingYLabel("$x^2$")
}
.saveAsPDF("/home/nkatz/Desktop/PEA-NEW-RESULTS")*/
def plot(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
//val OLED = data.next().split(",").map(_.toDouble).toVector
val OLED_Experts = data.next().split(",").map(_.toDouble).toVector
val OLED_MLN = data.next().split(",").map(_.toDouble).toVector
Figure("meeting-prequential-mistakes")
//.plot(color = BLACK, marker = X, markStrokeColor = BLACK)(makeSparse(handCrafted))
//.plot(lineColor = BLACK)(OLED)
.plot(lineColor = RED)(OLED_MLN)
.plot(lineColor = BLUE)(OLED_Experts)
.havingLegends("\\footnotesize \\textsf{OLED}", "\\footnotesize \\textsf{WOLED}", "\\footnotesize \\textsf{Experts}")
//"\\footnotesize \\textsf{OLED}",
/*plot(color = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(handCraftedExperts)).
plot(color = GREEN ! 70 ! BLACK, marker = CIRCLE, markStrokeColor = GREEN ! 70 ! BLACK)(makeSparse(OLED)).
plot(color = ORANGE, marker = PLUS, markStrokeColor = ORANGE)(makeSparse(OLED_MLN)).
plot(color = RED, marker = ASTERISK, markStrokeColor = RED)(makeSparse(OLED_Experts))
.havingLegends("\\footnotesize \\textsf{HandCrafted}", "\\footnotesize \\textsf{HandCrafted-EXP}", "\\footnotesize \\textsf{OLED}",
"\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")*/
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\footnotesize Mini-batches (size 100)")
.havingYLabel("\\footnotesize \\textbf{Average Loss}").
//havingTitle("\\emph{Meeting}").
havingTitle("\\emph{Meeting},ybar").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
//show(compiler = LUALATEX)
saveAsPDF(savePath, compiler = LUA_LATEX)
//saveAsTeX(savePath)
}
def plotTime(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val OLED = Vector(1.0, 100.0, 500.0) zip data.next().split(",").map(_.toDouble).toVector
val OLED_MLN = Vector(1.0, 100.0, 500.0) zip data.next().split(",").map(_.toDouble).toVector
val OLED_Experts = Vector(1.0, 100.0, 500.0) zip data.next().split(",").map(_.toDouble).toVector
Figure("times-meeting")
//.plot(color = BLACK, marker = X, markStrokeColor = BLACK)(makeSparse(handCrafted))
.plot(lineColor = BLACK)(OLED)
.plot(lineColor = RED)(OLED_MLN)
.plot(lineColor = BLUE)(OLED_Experts)
.havingLegends("\\footnotesize \\textsf{OLED}", "\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")
//"\\footnotesize \\textsf{OLED}",
/*plot(color = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(handCraftedExperts)).
plot(color = GREEN ! 70 ! BLACK, marker = CIRCLE, markStrokeColor = GREEN ! 70 ! BLACK)(makeSparse(OLED)).
plot(color = ORANGE, marker = PLUS, markStrokeColor = ORANGE)(makeSparse(OLED_MLN)).
plot(color = RED, marker = ASTERISK, markStrokeColor = RED)(makeSparse(OLED_Experts))
.havingLegends("\\footnotesize \\textsf{HandCrafted}", "\\footnotesize \\textsf{HandCrafted-EXP}", "\\footnotesize \\textsf{OLED}",
"\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")*/
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\footnotesize Mini-batche size")
.havingYLabel("\\footnotesize Avg. CPU time per batch (sec)").
//havingTitle("\\emph{Meeting}").
havingTitle("\\emph{Moving}").
havingAxisXLabels(Seq("1", "100", "500")).
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
//show(compiler = LUALATEX)
saveAsPDF(savePath, compiler = LUA_LATEX)
//saveAsTeX(savePath)
}
}
| 5,716 | 45.479675 | 137 | scala |
OLED | OLED-master/src/main/scala/utils/plotting/LPARPlots.scala | package utils.plotting
import scalatikz.pgf.enums.Color.{BLACK, BLUE, RED}
import scalatikz.pgf.plots.Figure
import scalatikz.pgf.plots.enums.FontSize.VERY_LARGE
import scalatikz.pgf.plots.enums.LegendPos.{NORTH_EAST, NORTH_WEST}
import scalatikz.pgf.plots.enums.Mark.{TRIANGLE, X}
/**
* Created by nkatz at 27/2/20
*/
object LPARPlots extends App {
//plotCrossValBothCEs("/home/nkatz/Dropbox/PapersAll/LPAR-2020/woled-asp")
//plotTrainingTimes("/home/nkatz/Dropbox/PapersAll/LPAR-2020/woled-asp")
//plotTheorySizes("/home/nkatz/Dropbox/PapersAll/LPAR-2020/woled-asp")
//plotMapInferenceMeet("/home/nkatz/Dropbox/PapersAll/KR-2020/KR20_authors_kit_v1.2/paper")
//plotMapInferenceMove("/home/nkatz/Dropbox/PapersAll/KR-2020/KR20_authors_kit_v1.2/paper")
//plotMapInferenceRendezVous("/home/nkatz/Dropbox/PapersAll/KR-2020/KR20_authors_kit_v1.2/paper")
//plotMapInferenceDangerousDriving("/home/nkatz/Dropbox/PapersAll/KR-2020/KR20_authors_kit_v1.2/paper")
//plotMapInferenceDangerousDriving("/home/nkatz/Dropbox/PapersAll/ACCEPTED/KR-2020/scalatikz-graphs")
//plotMapInferenceNonEconomicDriving("/home/nkatz/Dropbox/PapersAll/ACCEPTED/KR-2020/scalatikz-graphs")
//plotMapInferenceRefuelOpportunity("/home/nkatz/Dropbox/PapersAll/ACCEPTED/KR-2020/scalatikz-graphs")
//plotMapInferenceRendezVous("/home/nkatz/Dropbox/PapersAll/ACCEPTED/KR-2020/scalatikz-graphs")
//plotMapInferencePilotOps("/home/nkatz/Dropbox/PapersAll/ACCEPTED/KR-2020/scalatikz-graphs")
plotCrossValRendevousPilotOps("/home/nkatz/Dropbox/PapersAll/ACCEPTED/KR-2020/scalatikz-graphs")
def plotMapInferenceDangerousDriving(savePath: String) = {
val groundingSolvingASP = Vector(0.041, 0.072, 0.098, 0.568)
val groundingSolvingMLN = Vector(0.043, 0.183, 2.324, 10.234)
Figure("map-scalability-dangerous-driving")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
.havingTitle("\\emph{\\large{Dangerous Driving}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("1.8K", "2.9K", "12K", "16K")).
havingFontSize(VERY_LARGE).
saveAsPDF(savePath)
//.show()
}
def plotMapInferenceNonEconomicDriving(savePath: String) = {
val groundingSolvingASP = Vector(0.024, 0.032, 0.076, 0.468)
val groundingSolvingMLN = Vector(0.086, 0.283, 3.675, 12.922)
Figure("map-scalability-non-economic-driving")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
.havingTitle("\\emph{\\large{Non-Economic Driving}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("2K", "3.3K", "14K", "17K")).
havingFontSize(VERY_LARGE).
saveAsPDF(savePath)
//.show()
}
def plotMapInferenceRefuelOpportunity(savePath: String) = {
val groundingSolvingASP = Vector(0.022, 0.031, 0.06, 0.426)
val groundingSolvingMLN = Vector(0.112, 0.344, 4.234, 11.423)
Figure("map-scalability-refuel")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
.havingTitle("\\emph{\\large{Re-Fuel Opportunity}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("2K", "3K", "13K", "16.3K")).
havingFontSize(VERY_LARGE).
saveAsPDF(savePath)
//.show()
}
def plotMapInferenceRendezVous(savePath: String) = {
val groundingSolvingASP = Vector(0.041, 0.072, 0.289, 0.734)
val groundingSolvingMLN = Vector(0.043, 0.183, 3.824, 19.234)
Figure("map-scalability-rendezvous")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
.havingTitle("\\emph{\\large{Vessel RendezVous}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("3K", "5K", "20K", "30K")).
havingFontSize(VERY_LARGE).
saveAsPDF(savePath)
//.show()
}
def plotMapInferencePilotOps(savePath: String) = {
val groundingSolvingASP = Vector(0.045, 0.068, 0.178, 0.456)
val groundingSolvingMLN = Vector(0.042, 0.243, 4.624, 22.228)
Figure("map-scalability-pilotops")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
.havingTitle("\\emph{\\large{Pilot Ops}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("3K", "5K", "20K", "30K")).
havingFontSize(VERY_LARGE).
saveAsPDF(savePath)
//.show()
}
def plotCrossValRendevousPilotOps(savePath: String) = {
val fscoresRendezVous = Vector(0.837, 0.724) //, 0.782
val fscoresPilotOps = Vector (0.848, 0.725) //, 0.704
Figure("cross-val").
bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.1)(fscoresRendezVous). //
bar(barColor = RED ! 50 ! BLACK, barWidth = 0.1)(fscoresPilotOps). //, barWidth = 0.2
havingYLabel("\\textbf{$F_1$-score (test set)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq(
"\\textsf{WOLED-ASP}",
"\\textsf{WOLED-MLN}",
//"\\textsf{\\scriptsize OLED}"
))//.rotateXTicks(20)
.havingTitle("\\emph{},ybar")
.havingLegends("\\emph{RendezVous}", "\\emph{PilotOps}")
.havingLegendPos(NORTH_EAST)
.saveAsPDF(savePath)
}
def plotMapInferenceMove(savePath: String) = {
val groundingSolvingASP = Vector(0.032, 0.068, 0.187, 0.634)
val groundingSolvingMLN = Vector(0.029, 0.073, 2.023, 11.025)
Figure("map-scalability-move")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
//.havingTitle("\\emph{\\large{Moving}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("1.3K", "2.6K", "10K", "19.2K")).
havingFontSize(VERY_LARGE).
saveAsPDF(savePath)
//.show()
}
def plotMapInferenceMeet(savePath: String) = {
val groundingSolvingASP = Vector(0.029, 0.043, 0.147, 0.257)
val groundingSolvingMLN = Vector(0.028, 0.062, 1.65, 9.26)
Figure("map-scalability-meet")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(groundingSolvingASP)
.plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(groundingSolvingMLN)
.havingLegends("\\large{WOLED-ASP}", "\\large{WOLED-MLN}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\Large{Avg. #atoms in ground program}")
.havingYLabel("\\Large{Grounding + solving (sec)}")
//.havingTitle("\\emph{\\large{Meeting}}")
//havingTitle("\\emph{Meeting},ybar").
.havingAxisXLabels(Seq("1.1K", "2.2K", "9K", "15K"))
.havingFontSize(VERY_LARGE)
.saveAsPDF(savePath)
//.show()
}
def plotCrossValBothCEs(savePath: String) = {
val fscoresMeeting = Vector(0.887, 0.841, 0.782, 0.801, 0.735, 0.762)
val fscoresMoving = Vector (0.856, 0.802, 0.704, 0.688, 0.624, 0.644)
Figure("cross-val-both-CEs").bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(fscoresMoving).
bar(barColor = RED ! 50 ! BLACK, barWidth = 0.2)(fscoresMeeting)
.havingYLabel("\\textbf{Average $F_1$-score (test set)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq(
"\\textsf{\\scriptsize WOLED-ASP}",
"\\textsf{\\scriptsize WOLED-MLN}",
"\\textsf{\\scriptsize OLED}",
"\\textsf{\\scriptsize XHAIL}",
"\\textsf{\\scriptsize HandCrafted}",
"\\textsf{\\scriptsize HandCrafted-W}"
)).rotateXTicks(20)
.havingTitle("\\emph{},ybar").havingLegends("\\emph{Moving}", "\\emph{Meeting}").havingLegendPos(NORTH_EAST)
.saveAsPDF(savePath)
}
def plotTrainingTimes(savePath: String) = {
val fscoresMeeting = Vector(1.14, 4.823, 0.918, 0.812) //26.234
val fscoresMoving = Vector (1.223, 4.989, 1.014, 0.842) //38.645
Figure("cross-val-training-times").bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(fscoresMoving).
bar(barColor = RED ! 50 ! BLACK, barWidth = 0.2)(fscoresMeeting)
.havingYLabel("\\textbf{Average Training times (min)}").//.havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq(
"\\textsf{\\scriptsize WOLED-ASP}",
"\\textsf{\\scriptsize WOLED-MLN}",
"\\textsf{\\scriptsize OLED}",
"\\textsf{\\scriptsize HandCrafted-W}"
)).rotateXTicks(20)
.havingTitle("\\emph{},ybar").havingLegends("\\emph{Moving}", "\\emph{Meeting}").havingLegendPos(NORTH_EAST)
.saveAsPDF(savePath)
}
def plotTheorySizes(savePath: String) = {
val fscoresMeeting = Vector(52, 62, 50, 25) //26.234
val fscoresMoving = Vector (58, 69, 53, 22) //38.645
Figure("cross-val-theory-size").bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(fscoresMoving).
bar(barColor = RED ! 50 ! BLACK, barWidth = 0.2)(fscoresMeeting)
.havingYLabel("\\textbf{Average Theory Size}").//.havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq(
"\\textsf{\\scriptsize WOLED-ASP}",
"\\textsf{\\scriptsize WOLED-MLN}",
"\\textsf{\\scriptsize OLED}",
"\\textsf{\\scriptsize XHAIL}"
)).rotateXTicks(20)
.havingTitle("\\emph{},ybar").havingLegends("\\emph{Moving}", "\\emph{Meeting}").havingLegendPos(NORTH_EAST)
.saveAsPDF(savePath)
}
}
| 11,010 | 46.666667 | 114 | scala |
OLED | OLED-master/src/main/scala/utils/plotting/PlotTest2.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.plotting
/*import scalatikz.graphics.pgf.Figure
import scalatikz.graphics.pgf.enums.LegendPos*/
import scalatikz.pgf.plots.Figure
import scalatikz.pgf.plots.enums.LegendPos
import scala.io.Source
import scala.math._
object PlotTest2 extends App {
//plotMeeting1pass("/home/nkatz/meeting-1-pass-new", "/home/nkatz/Desktop")
//plotMeeting2passes("/home/nkatz/Desktop/oled-winnow-results/meeting-2-passes", "/home/nkatz/Desktop/oled-winnow-results")
//plotMeeting1passLogScale("/home/nkatz/Desktop/oled-winnow-results/meeting-1-pass-new", "/home/nkatz/Desktop/oled-winnow-results")
//plotMeeting2passLogScale("/home/nkatz/Desktop/oled-winnow-results/meeting-2-passes-new", "/home/nkatz/Desktop/oled-winnow-results")
//plotMaritime("/home/nkatz/Desktop/oled-winnow-results/maritime/brest/rendezvous/speedup", "/home/nkatz/Desktop/oled-winnow-results/maritime/brest/rendezvous")
plotTK_prequential("/home/nkatz/Dropbox/Track-&-Know/Athens-Plenary-6-2019")
plotTK_holdout("/home/nkatz/Dropbox/Track-&-Know/Athens-Plenary-6-2019")
def plotTK_prequential(savePath: String) = {
//val oled = Vector(355,823,1744,2300,2640,2724,2738,2738,2738,2738)
//val oled_exp = Vector(54,158,348,348,562,562,624,624,624,624)
val oled = Vector(355, 1744, 2640, 2738, 2738)
val oled_exp = Vector(54, 348, 562, 624, 624)
Figure("dangerousDrivingPrequential").plot(oled).plot(oled_exp).havingLegendPos(LegendPos.NORTH_WEST).
havingLegends("\\scriptsize OLED", "\\scriptsize OLED-EXP").
havingXLabel("Time").
havingYLabel("Accumulated Number of Mistakes") //.saveAsPDF(savePath)
.havingAxisXLabels(Seq("10K", "20K", "30K", "40K", "50K")).havingTitle("DangerousDriving (Prequential Evaluation)").saveAsPDF(savePath)
}
def plotTK_holdout(savePath: String) = {
//val oled = Vector(355,823,1744,2300,2640,2724,2738,2738,2738,2738)
//val oled_exp = Vector(54,158,348,348,562,562,624,624,624,624)
val oled = Vector(0.48, 0.786, 0.923, 0.968, 0.968)
val oled_exp = Vector(0.42, 0.825, 0.820, 0.971, 0.978)
Figure("dangerousDrivingHoldout").plot(oled).plot(oled_exp).havingLegendPos(LegendPos.NORTH_WEST).
havingLegends("\\scriptsize OLED", "\\scriptsize OLED-EXP").
havingXLabel("Time").
havingYLabel("$F_1$-score on test-Set") //.saveAsPDF(savePath)
.havingAxisXLabels(Seq("10K", "20K", "30K", "40K", "50K")).havingTitle("DangerousDriving (Holdout Evaluation)").saveAsPDF(savePath)
}
def plotMaritime(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
//val cores = data.next().split(",").map(_.toDouble).toVector
val syncTime = Vector(1.0, 2.0, 4.0, 8.0, 16.0) zip data.next().split(",").map(_.toDouble).toVector
val asyncTime = Vector(1.0, 2.0, 4.0, 8.0, 16.0) zip data.next().split(",").map(_.toDouble).toVector
//val syncTime = Vector(2.0,4.0,8.0,16.0) zip data.next().split(",").map(_.toDouble).toVector
//val asyncTime = Vector(2.0,4.0,8.0,16.0) zip data.next().split(",").map(_.toDouble).toVector
/*
Figure("rendezvous-time").plot(syncTime).plot(asyncTime).havingLegendPos(LegendPos.NORTH_EAST).havingLegends("sync","async").
havingXLabel("Number of cores").havingYLabel("Training time (hours)")
.havingAxisXLabels(Seq("1","2","4","8","16"))
.saveAsTeX(savePath)
*/
/*
Figure("rendezvous-f1").plot(syncTime).plot(asyncTime).havingLegendPos(LegendPos.NORTH_EAST).
havingLimits(0, 16, 0.7, 1.0).havingLegends("sync","async").
havingXLabel("Number of cores").havingYLabel("$F_1$ score").havingAxisXLabels(Seq("1","2","4","8","16"))
.saveAsTeX(savePath)
*/
///*
Figure("rendezvous-msgs").plot(syncTime).plot(asyncTime).havingLegendPos(LegendPos.NORTH_EAST).
havingLegends("sync", "async").
havingXLabel("Number of cores").
havingYLabel("Number of messages").
havingAxisXLabels(Seq("2", "4", "8", "16")).saveAsPDF(savePath)
//*/
/*
Figure("loitering-time").bar(syncTime).plot(asyncTime).havingLegendPos(LegendPos.NORTH_EAST).
havingLegends("sync","async").
havingXLabel("Number of cores").havingYLabel("Training time (hours)").havingTitle("Loitering").saveAsPDF(savePath)
*/
/*
Figure("loitering-f1").plot(syncTime).plot(asyncTime).havingLegendPos(LegendPos.NORTH_EAST).
havingLimits(0, 16, 0.7, 1.0).havingLegends("sync","async").
havingXLabel("Number of cores").havingYLabel("$F_1$ score").havingTitle("Loitering").saveAsPDF(savePath)
*/
/*
Figure("loitering-msgs").plot(syncTime).plot(asyncTime).havingLegendPos(LegendPos.NORTH_EAST).
havingLegends("sync","async").
havingXLabel("Number of cores").havingYLabel("Number of messages").havingTitle("Loitering").saveAsPDF(savePath)
*/
}
/*
def plotMeeting1pass(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter( x => !x.isEmpty && !x.startsWith("%"))//.split(",")
val winnow = data.next().split(",").map(_.toDouble).toVector
val _095 = data.next().split(",").map(_.toDouble).toVector
val _09 = data.next().split(",").map(_.toDouble).toVector
val _08 = data.next().split(",").map(_.toDouble).toVector
val _07 = data.next().split(",").map(_.toDouble).toVector
val _06 = data.next().split(",").map(_.toDouble).toVector
val noConstraints = data.next().split(",").map(_.toDouble).toVector
val handCrafted = data.next().split(",").map(_.toDouble).toVector
Figure("meeting-1-pass").plot(winnow).plot(_095).plot(_09).plot(_08).plot(_07).plot(_06).plot(noConstraints).plot(handCrafted)
//.plot(domain -> sin _)
//.plot(lineStyle = DASHED)(domain -> cos _)
.havingLegends("Experts", "$OLED_{score \\geq 0.95}$", "$OLED_{score \\geq 0.9}$",
"$OLED_{score \\geq 0.9}$", "$OLED_{score \\geq 0.7}$", "$OLED_{score \\geq 0.6}$", "$OLED_{all-rules}$", "Hand-crafted")
//.havingLegendPos(SOUTH_WEST)
.havingXLabel("Data batches (size 50)")
.havingYLabel("Accumulated \\ Error$").
//.havingTitle("Meeting \\ 1-pass").
saveAsPDF(savePath)
//.show()
}
*/
def log2(x: Double) = {
val b = "stop"
//println("")
//println(x)
//println(log(x))
//println("")
if (log10(x) == 3.8949802909279687) {
val stop = "stop"
}
try {
if (log10(x).isInfinity) 0.0 else log10(x) / log10(2.0)
} catch {
case _: NoSuchElementException =>
println(x); x
}
}
def toLog(x: Double) = {
try {
val y = if (log2(x).isPosInfinity) 0.0 else log2(x)
y
} catch {
case _: NoSuchElementException =>
println(x); x
}
}
/*
def plotMeeting1passLogScale(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter( x => !x.isEmpty && !x.startsWith("%"))//.split(",")
val winnow = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _095 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _09 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _08 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _07 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _06 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val noConstraints = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val handCrafted = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
Figure("meeting-1-pass-log-scale-new").plot(winnow).plot(_095).plot(_09).plot(_08).plot(_07).plot(_06).plot(noConstraints).plot(handCrafted)
//.plot(domain -> sin _)
//.plot(lineStyle = DASHED)(domain -> cos _)
.havingLegends("winnow", "score $\\geq 0.95$", "score $\\geq 0.9$", "score $\\geq 0.8$", "score $\\geq 0.7$", "score $\\geq 0.6$", "all rules", "hand-crafted")
//.havingLegendPos(SOUTH_WEST)
.havingXLabel("Data batches (size 50)")
.havingYLabel("Accumulated \\ Error (log-scale)$")
.havingTitle("Meeting \\ 1-pass").
saveAsPDF(savePath)
//.show()
}
*/
def plotResults(
savePath: String,
name: String, trueLabels: Vector[Double], wInit: Vector[Double], wNoInit: Vector[Double],
wTerm: Vector[Double], wNoTerm: Vector[Double],
predictiInt: Vector[Double], predictiTerm: Vector[Double],
inert: Vector[Double], holds: Vector[Double]) = {
//Figure(name).plot(wInit).saveAsTeX(savePath)
/*
Figure(name).plot(trueLabels.map(toLog(_))).
plot(wInit.map(toLog(_))).plot(wNoInit.map(toLog(_))).
plot(wTerm.map(toLog(_))).plot(wNoTerm.map(toLog(_))).
plot(predictiInt.map(toLog(_))).plot(predictiTerm.map(toLog(_))).
plot(inert.map(toLog(_))).plot(holds.map(toLog(_))).
havingLegends("True labels","$W_I^+$","$W_I^-$","$W_T^+$","$W_T^-$","$W_I$","$W_T$","$W_{inert}$","$W_{holds}$").
havingXLabel("Time").havingYLabel("Weights \\ (log-scale)").havingTitle("Meeting \\ 1-pass").
saveAsPDF(savePath)//show()//.saveAsPDF(savePath)
*/
/*
Figure(name).plot(trueLabels).
plot(wInit.map(toLog(_))).plot(wNoInit.map(toLog(_))).
plot(wTerm.map(toLog(_))).plot(wNoTerm.map(toLog(_))).
//plot(predictiInt.map(toLog(_))).plot(predictiTerm.map(toLog(_))).
plot(inert.map(toLog(_))).plot(holds.map(toLog(_))).
//havingLegends("True labels","$W_I^+$","$W_I^-$","$W_T^+$","$W_T^-$","$W_I$","$W_T$","$W_{inert}$","$W_{holds}$").
havingLegends("True labels","$W_I^+$","$W_I^-$","$W_T^+$","$W_T^-$","$W_{inert}$","$W_{holds}$").
//havingLegends("True labels","$W_I$","$W_T$","$W_{inert}$","$W_{holds}$").
havingXLabel("Time").havingYLabel("Weights \\ (log-scale)").
havingTitle("").
saveAsPDF(savePath)//show()//.saveAsPDF(savePath)
*/
/*
Figure(name).plot(trueLabels).
plot(wInit.map(toLog(_))).plot(wNoInit.map(toLog(_))).
//plot(predictiInt.map(toLog(_))).plot(predictiTerm.map(toLog(_))).
plot(inert.map(toLog(_))).plot(holds.map(toLog(_))).
//havingLegends("True labels","$W_I^+$","$W_I^-$","$W_T^+$","$W_T^-$","$W_I$","$W_T$","$W_{inert}$","$W_{holds}$").
havingLegends("True labels","$W_I^+$","$W_I^-$","$W_{inert}$","$W_{holds}$").
//havingLegends("True labels","$W_I$","$W_T$","$W_{inert}$","$W_{holds}$").
havingXLabel("Time").havingYLabel("Weights \\ (log-scale)").
havingTitle("").
saveAsPDF(savePath)//show()//.saveAsPDF(savePath)
*/
}
/*
def plotMeeting2passLogScale(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter( x => !x.isEmpty && !x.startsWith("%"))//.split(",")
val winnow = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _095 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _09 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _08 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _07 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
val _06 = data.next().split(",").map(_.toDouble).toVector.map(x => toLog(x))
Figure("meeting-2-passes-log-scale-new").plot(winnow).plot(_095).plot(_09).plot(_08).plot(_07).plot(_06)
//.plot(domain -> sin _)
//.plot(lineStyle = DASHED)(domain -> cos _)
.havingLegends("winnow", "score $\\geq 0.95$", "score $\\geq 0.9$", "score $\\geq 0.8$", "score $\\geq 0.7$", "score $\\geq 0.6$", "all rules", "hand-crafted")
//.havingLegendPos(SOUTH_WEST)
.havingXLabel("Data batches (size 50)")
.havingYLabel("Accumulated \\ Error (log-scale)$")
.havingTitle("Meeting \\ 1-pass").
saveAsPDF(savePath)
//.show()
}
*/
/*
def plotMeeting2passes(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter( x => !x.isEmpty && !x.startsWith("%"))//.split(",")
val winnow = data.next().split(",").map(_.toDouble).toVector
val _095 = data.next().split(",").map(_.toDouble).toVector
val _09 = data.next().split(",").map(_.toDouble).toVector
val _08 = data.next().split(",").map(_.toDouble).toVector
val _07 = data.next().split(",").map(_.toDouble).toVector
val _06 = data.next().split(",").map(_.toDouble).toVector
//val noConstraints = data.next().split(",").map(_.toDouble).toVector
//val handCrafted = data.next().split(",").map(_.toDouble).toVector
Figure("meeting-2-passes").plot(winnow).plot(_095).plot(_09).plot(_08).plot(_07).plot(_06)//.plot(noConstraints).plot(handCrafted)
//.plot(domain -> sin _)
//.plot(lineStyle = DASHED)(domain -> cos _)
.havingLegends("winnow", "score $\\geq 0.95$", "score $\\geq 0.9$", "score $\\geq 0.8$", "score $\\geq 0.7$", "score $\\geq 0.6$")//, , "all rules", "hand-crafted")
//.havingLegendPos(SOUTH_WEST)
.havingXLabel("Data batches (size 50)")
.havingYLabel("Accumulated \\ Error$")
.havingTitle("Meeting \\ 2-passes").
saveAsPDF(savePath)
//.show()
}
*/
}
| 13,968 | 43.346032 | 170 | scala |
OLED | OLED-master/src/main/scala/utils/plotting/TPLPExpertsPlots.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils.plotting
import scalatikz.pgf.enums.Color.{BLACK, BLUE, GREEN, ORANGE, RED, YELLOW}
import scalatikz.pgf.plots.Figure
import scalatikz.pgf.plots.enums.LegendPos
import scalatikz.pgf.plots.enums.LegendPos.{NORTH_WEST, SOUTH_EAST}
import scalatikz.pgf.plots.enums.Mark.{ASTERISK, CIRCLE, PLUS, TRIANGLE, X}
import scala.io.Source
/*import scalatikz.graphics.pgf.Figure
import scalatikz.graphics.pgf.enums.Color.{BLACK, BLUE, GREEN, ORANGE, RED, YELLOW}
import scalatikz.graphics.pgf.enums.LegendPos
import scalatikz.graphics.pgf.enums.Mark._
import scalatikz.graphics.pgf.enums.LegendPos.{NORTH_WEST, SOUTH_EAST, SOUTH_WEST}
import scalatikz.graphics.pgf.enums.LineStyle.{DASHED, DENSELY_DOTTED, DOTTED, LOOSELY_DASHED, SOLID}
import scalatikz.graphics.pgf.enums.Mark.DOT*/
object TPLPExpertsPlots extends App {
//plotMeetingMistakesInertiaNoInertia("/home/nkatz/Desktop/TPLP-2019-results/meeting-inertia-experiments-mistakes", "/home/nkatz/Desktop/TPLP-2019-results")
//plotMovingMistakesInertiaNoInertia("/home/nkatz/Desktop/TPLP-2019-results/moving-inertia-experiments-mistakes", "/home/nkatz/Desktop/TPLP-2019-results")
//plotPrequentialTimesTogether("/home/nkatz/Desktop/TPLP-2019-results")
//plotMeetingStreaming("/home/nkatz/Desktop/TPLP-2019-results/meeting-streaming", "/home/nkatz/Desktop/TPLP-2019-results")
//plotMovingStreaming("/home/nkatz/Desktop/TPLP-2019-results/moving-streaming", "/home/nkatz/Desktop/TPLP-2019-results")
plotCrossValBothCEs("/home/nkatz/Desktop/TPLP-2019-results")
//plotMovingF1Scores("/home/nkatz/Desktop/TPLP-2019-results/moving-prequential-comparison-PrequentialF1Score", "/home/nkatz/Desktop/TPLP-2019-results")
//plotMovingMistakes("/home/nkatz/Desktop/TPLP-2019-results/moving-prequential-comparison-MistakeNum", "/home/nkatz/Desktop/TPLP-2019-results")
plotMeetingMistakes("/home/nkatz/Desktop/TPLP-2019-results/meeting-prequential-comparison-MistakeNum", "/home/nkatz/Desktop/TPLP-2019-results")
//plotMeetingF1Scores("/home/nkatz/Desktop/TPLP-2019-results/meeting-prequential-comparison-PrequentialF1Score", "/home/nkatz/Desktop/TPLP-2019-results")
//plotLimitedFeedbackMeeting("/home/nkatz/Desktop/TPLP-2019-results")
//plotLimitedFeedbackMoving("/home/nkatz/Desktop/TPLP-2019-results")
//plotLimitedFeedbackMovingBarChart("/home/nkatz/Desktop/TPLP-2019-results")
//plotLimitedFeedbackMeetingBarChart("/home/nkatz/Desktop/TPLP-2019-results")
//plotPrequentialTimeMeeting("/home/nkatz/Desktop/TPLP-2019-results")
//plotPrequentialTimeMoving("/home/nkatz/Desktop/TPLP-2019-results")
//plotRulesNumMeeting("/home/nkatz/Desktop/TPLP-2019-results")
//plotRulesNumMoving("/home/nkatz/Desktop/TPLP-2019-results")
//plotMeetingRulesNum("/home/nkatz/Desktop/TPLP-2019-results/meeting-rules-number", "/home/nkatz/Desktop/TPLP-2019-results")
//plotMovingCrossVal("/home/nkatz/Desktop/TPLP-2019-results")
//plotMeetingCrossVal("/home/nkatz/Desktop/TPLP-2019-results")
def plotCrossValBothCEs(savePath: String) = {
val fscoresMeeting = Vector(0.762, 0.863, 0.861, 0.822, 0.843, 0.889, 0.906)
val fscoresMoving = Vector(0.751, 0.890, 0.841, 0.802, 0.789, 0.857, 0.847)
Figure("cross-val-both-CEs").bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(fscoresMoving).
bar(barColor = RED ! 50 ! BLACK, barWidth = 0.2)(fscoresMeeting)
.havingYLabel("\\textbf{Average $F_1$-score (test set)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("\\textsf{\\scriptsize HC}",
"\\textsf{\\scriptsize HC-MM}",
"\\textsf{\\scriptsize XHAIL}",
"\\textsf{\\scriptsize HC-EXP}",
"\\textsf{\\scriptsize OLED}",
"\\textsf{\\scriptsize OLED-MLN}",
"\\textsf{\\scriptsize OLED-EXP}")).rotateXTicks(20)
.havingTitle("\\emph{},ybar").havingLegends("\\emph{Moving}", "\\emph{Meeting}").havingLegendPos(NORTH_WEST)
.saveAsPDF(savePath)
}
def plotMeetingCrossVal(savePath: String) = {
val fscores = Vector(0.762, 0.863, 0.861, 0.822, 0.843, 0.889, 0.906)
Figure("meeting-cross-val")
//.stem(color = BLUE!50!BLACK, marker = CIRCLE)(fscores)
.bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(fscores)
.havingYLabel("\\textbf{Average $F_1$-score (test set)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("\\textsf{\\scriptsize HC}",
"\\textsf{\\scriptsize HC-MM}",
"\\textsf{\\scriptsize XHAIL}",
"\\textsf{\\scriptsize HC-EXP}",
"\\textsf{\\scriptsize OLED}",
"\\textsf{\\scriptsize OLED-MLN}",
"\\textsf{\\scriptsize OLED-EXP}")).rotateXTicks(20)
.havingTitle("\\emph{Meeting}")
.saveAsPDF(savePath)
}
def plotMovingCrossVal(savePath: String) = {
val fscores = Vector(0.751, 0.890, 0.841, 0.802, 0.789, 0.857, 0.847)
Figure("moving-cross-val")
//.stem(color = BLUE!50!BLACK, marker = CIRCLE)(fscores)
.bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(fscores)
.havingYLabel("\\textbf{Average $F_1$-score (test set)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("\\textsf{\\scriptsize HC}",
"\\textsf{\\scriptsize HC-MM}",
"\\textsf{\\scriptsize XHAIL}",
"\\textsf{\\scriptsize HC-EXP}",
"\\textsf{\\scriptsize OLED}",
"\\textsf{\\scriptsize OLED-MLN}",
"\\textsf{\\scriptsize OLED-EXP}")).rotateXTicks(20)
.havingTitle("\\emph{Moving}")
.saveAsPDF(savePath)
}
def plotRulesNumMeeting(savePath: String) = {
val times = Vector(88.0, 118.0, 79.0)
val _times = Vector(9.0, 19.0, 15.0)
Figure("meeting-prequential-rules-num").
bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(times).bar(barColor = RED ! 50 ! BLACK, barWidth = 0.2)(_times).
havingYLabel("\\textbf{Number of Rules}").
havingAxisXLabels(Seq("\\textsf{\\footnotesize OLED}", "\\textsf{\\footnotesize OLED-MLN}",
"\\textsf{\\footnotesize OLED-EXP}")).
havingTitle("\\emph{Meeting},ybar").havingLegends("Average", "Useful").havingLegendPos(NORTH_WEST).
saveAsPDF(savePath)
}
def plotRulesNumMoving(savePath: String) = {
val times = Vector(75.0, 92.0, 65.0)
val _times = Vector(10.0, 18.0, 14.0)
Figure("moving-prequential-rules-num").
bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.2)(times).bar(barColor = RED ! 50 ! BLACK, barWidth = 0.2)(_times).
havingYLabel("\\textbf{Number of Rules}").
havingAxisXLabels(Seq("\\textsf{\\footnotesize OLED}", "\\textsf{\\footnotesize OLED-MLN}",
"\\textsf{\\footnotesize OLED-EXP}")).
havingTitle("\\emph{Moving},ybar").havingLegends("Average", "Useful").havingLegendPos(NORTH_WEST).
saveAsPDF(savePath)
}
def plotPrequentialTimesTogether(savePath: String) = {
val timesMeeting = Vector(12.0, 43.0, 104.0, 58.0)
val timesMoving = Vector(14.0, 48.0, 118.0, 62.0)
Figure("prequential-times-both-CEs").
bar(barColor = YELLOW ! 50 ! BLACK, barWidth = 0.3)(timesMoving).bar(barColor = GREEN ! 50 ! BLACK, barWidth = 0.3)(timesMeeting).
havingYLabel("\\textbf{Time (sec)}").
havingAxisXLabels(Seq("\\textsf{\\footnotesize HC-EXP}",
"\\textsf{\\footnotesize OLED}", "\\textsf{\\footnotesize OLED-MLN}",
"\\textsf{\\footnotesize OLED-EXP}")).
havingTitle("\\emph{},ybar").havingLegends("\\emph{Moving}", "\\emph{Meeting}").havingLegendPos(NORTH_WEST).
saveAsPDF(savePath)
}
def plotPrequentialTimeMeeting(savePath: String) = {
val times = Vector(12.0, 43.0, 118.0, 62.0)
Figure("meeting-prequential-time").
bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.3)(times).
havingYLabel("\\textbf{Time (sec)}").
havingAxisXLabels(Seq("\\textsf{\\footnotesize HandCrafted-EXP}",
"\\textsf{\\footnotesize OLED}", "\\textsf{\\footnotesize OLED-MLN}",
"\\textsf{\\footnotesize OLED-EXP}")).
havingTitle("\\emph{Meeting}").
saveAsPDF(savePath)
}
def plotPrequentialTimeMoving(savePath: String) = {
val times = Vector(14.0, 48.0, 104.0, 58.0)
Figure("moving-prequential-time").
bar(barColor = BLUE ! 50 ! BLACK, barWidth = 0.3)(times).
havingYLabel("\\textbf{Time (sec)}").
havingAxisXLabels(Seq("\\textsf{\\footnotesize HandCrafted-EXP}",
"\\textsf{\\footnotesize OLED}", "\\textsf{\\footnotesize OLED-MLN}",
"\\textsf{\\footnotesize OLED-EXP}")).
havingTitle("\\emph{Moving}").
saveAsPDF(savePath)
}
def plotLimitedFeedbackMovingBarChart(savePath: String) = {
val feedbackProbs = Vector(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val fscores = Vector(0.834, 0.843, 0.896, 0.934, 0.948, 0.963, 0.966, 0.968, 0.968, 0.968)
val t = feedbackProbs zip fscores
Figure("moving-limited-feedback")
.stem(lineColor = BLUE ! 50 ! BLACK, marker = CIRCLE)(t)
.havingXLabel("\\textbf{Feedback probability}").
havingYLabel("\\textbf{Prequential $F_1$-score (final)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "1.0")).
havingTitle("\\emph{Moving}")
.saveAsPDF(savePath)
}
def plotLimitedFeedbackMeetingBarChart(savePath: String) = {
val feedbackProbs = Vector(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val fscores = Vector(0.822, 0.845, 0.883, 0.905, 0.948, 0.963, 0.966, 0.968, 0.968, 0.968)
val t = feedbackProbs zip fscores
Figure("meeting-limited-feedback")
.stem(lineColor = BLUE ! 50 ! BLACK, marker = CIRCLE)(t)
//.bar(t)
.havingXLabel("\\textbf{Feedback probability}").
havingYLabel("\\textbf{Prequential $F_1$-score (final)}").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "1.0")).
havingTitle("\\emph{Meeting}")
.saveAsPDF(savePath)
}
def plotLimitedFeedbackMoving(savePath: String) = {
val feedbackProbs = Vector(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val fscores = Vector(0.834, 0.843, 0.896, 0.934, 0.948, 0.963, 0.966, 0.968, 0.968, 0.968)
val t = feedbackProbs zip fscores
Figure("moving-limited-feedback").plot(t).havingLegendPos(LegendPos.NORTH_EAST).
//havingLegends("sync","async").
havingXLabel("Feedback probability").
havingYLabel("Prequential $F_1$-score (final)").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "1.0")).
havingTitle("\\emph{Moving}").
saveAsPDF(savePath)
}
def plotLimitedFeedbackMeeting(savePath: String) = {
val feedbackProbs = Vector(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val fscores = Vector(0.822, 0.845, 0.883, 0.905, 0.948, 0.963, 0.966, 0.968, 0.968, 0.968)
val t = feedbackProbs zip fscores
Figure("meeting-limited-feedback").plot(t).havingLegendPos(LegendPos.NORTH_EAST).
//havingLegends("sync","async").
havingXLabel("Feedback probability").
havingYLabel("Prequential $F_1$-score (final)").havingYLimits(0.5, 1.0).
havingAxisXLabels(Seq("0.1", "0.2", "0.3", "0.4", "0.5", "0.6", "0.7", "0.8", "0.9", "1.0")).
havingTitle("\\emph{Meeting}").saveAsPDF(savePath)
}
def plotMeetingStreaming(dataPath: String, savePath: String) = {
// skip every n elements from vector
def skip[A](l: Vector[A], n: Int) =
l.zipWithIndex.collect { case (e, i) if ((i + 1) % n) == 0 => e } // (i+1) because zipWithIndex is 0-based
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val k1 = data.next().split(",").map(_.toDouble).toVector
//val k1 = skip(k, 50)
//val k1 = k.grouped(10).toVector.map(x => x.sum.toDouble/x.length)
Figure("meeting-streaming")
.plot(lineColor = RED)(makeSparse(k1))
//havingLegends("\\footnotesize \\textsf{OLED-EXP-inertia}", "\\footnotesize \\textsf{OLED-EXP-no-inertia}")
//.havingLegendPos(NORTH_WEST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Mistakes}").
havingTitle("\\emph{Meeting}").
//havingTitle("\\emph{Meeting},ybar").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
}
def plotMovingStreaming(dataPath: String, savePath: String) = {
// skip every n elements from vector
def skip[A](l: Vector[A], n: Int) =
l.zipWithIndex.collect { case (e, i) if ((i + 1) % n) == 0 => e } // (i+1) because zipWithIndex is 0-based
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val k1 = data.next().split(",").map(_.toDouble).toVector
//val k1 = skip(k, 50)
//val k1 = k.grouped(10).toVector.map(x => x.sum.toDouble/x.length)
Figure("moving-streaming")
.plot(lineColor = RED)(makeSparse(k1))
//havingLegends("\\footnotesize \\textsf{OLED-EXP-inertia}", "\\footnotesize \\textsf{OLED-EXP-no-inertia}")
//.havingLegendPos(NORTH_WEST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Mistakes}").
havingTitle("\\emph{Moving}").
//havingTitle("\\emph{Meeting},ybar").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
}
def plotMeetingMistakesInertiaNoInertia(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val inertia = data.next().split(",").map(_.toDouble).toVector
val noInertia = data.next().split(",").map(_.toDouble).toVector
Figure("meeting-inertia-no-inertia-mistakes")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(makeSparse(inertia)).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(noInertia))
.havingLegends("\\footnotesize \\textsf{OLED-EXP-inertia}", "\\footnotesize \\textsf{OLED-EXP-no-inertia}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Acummulated Mistakes}").
havingTitle("\\emph{Meeting}").
//havingTitle("\\emph{Meeting},ybar").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
//.show()
}
def plotMovingMistakesInertiaNoInertia(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val inertia = data.next().split(",").map(_.toDouble).toVector
val noInertia = data.next().split(",").map(_.toDouble).toVector
Figure("moving-inertia-no-inertia-mistakes")
.plot(lineColor = RED, marker = X, markStrokeColor = RED)(makeSparse(inertia)).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(noInertia))
.havingLegends("\\footnotesize \\textsf{OLED-EXP-inertia}", "\\footnotesize \\textsf{OLED-EXP-no-inertia}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Acummulated Mistakes}").
havingTitle("\\emph{Moving}").
//havingTitle("\\emph{Meeting},ybar").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
//.show()
}
def plotMeetingMistakes(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val handCrafted = data.next().split(",").map(_.toDouble).toVector
val handCraftedExperts = data.next().split(",").map(_.toDouble).toVector
val OLED = data.next().split(",").map(_.toDouble).toVector
val OLED_MLN = data.next().split(",").map(_.toDouble).toVector
val OLED_Experts = data.next().split(",").map(_.toDouble).toVector
Figure("meeting-prequential-mistakes")
.plot(lineColor = BLACK, marker = X, markStrokeColor = BLACK)(makeSparse(handCrafted)).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(handCraftedExperts)).
plot(lineColor = GREEN ! 70 ! BLACK, marker = CIRCLE, markStrokeColor = GREEN ! 70 ! BLACK)(makeSparse(OLED)).
plot(lineColor = ORANGE, marker = PLUS, markStrokeColor = ORANGE)(makeSparse(OLED_MLN)).
plot(lineColor = RED, marker = ASTERISK, markStrokeColor = RED)(makeSparse(OLED_Experts))
.havingLegends("\\footnotesize \\textsf{HandCrafted}", "\\footnotesize \\textsf{HandCrafted-EXP}", "\\footnotesize \\textsf{OLED}",
"\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Acummulated Mistakes}").
havingTitle("\\emph{Meeting}").
//havingTitle("\\emph{Meeting},ybar").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
//.show()
}
def plotMeetingF1Scores(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val handCrafted = data.next().split(",").map(_.toDouble).toVector
val handCraftedExperts = data.next().split(",").map(_.toDouble).toVector
val OLED = data.next().split(",").map(_.toDouble).toVector
val OLED_MLN = data.next().split(",").map(_.toDouble).toVector
val OLED_Experts = data.next().split(",").map(_.toDouble).toVector
Figure("meeting-prequential-fscore")
.plot(lineColor = BLACK, marker = X, markStrokeColor = BLACK)(makeSparse(handCrafted)).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(handCraftedExperts)).
plot(lineColor = GREEN ! 70 ! BLACK, marker = CIRCLE, markStrokeColor = GREEN ! 70 ! BLACK)(makeSparse(OLED)).
plot(lineColor = ORANGE, marker = PLUS, markStrokeColor = ORANGE)(makeSparse(OLED_MLN)).
plot(lineColor = RED, marker = ASTERISK, markStrokeColor = RED)(makeSparse(OLED_Experts))
.havingLegends("\\footnotesize \\textsf{HandCrafted}", "\\footnotesize \\textsf{HandCrafted-EXP}", "\\footnotesize \\textsf{OLED}",
"\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")
.havingLegendPos(SOUTH_EAST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Prequential $F_1$-score}").
havingTitle("\\emph{Meeting}").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
//.show()
}
def plotMovingF1Scores(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val handCrafted = data.next().split(",").map(_.toDouble).toVector
val handCraftedExperts = data.next().split(",").map(_.toDouble).toVector
val OLED = data.next().split(",").map(_.toDouble).toVector
val OLED_MLN = data.next().split(",").map(_.toDouble).toVector
val OLED_Experts = data.next().split(",").map(_.toDouble).toVector
Figure("moving-prequential-fscore")
.plot(lineColor = BLACK, marker = X, markStrokeColor = BLACK)(makeSparse(handCrafted)).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(handCraftedExperts)).
plot(lineColor = GREEN ! 70 ! BLACK, marker = CIRCLE, markStrokeColor = GREEN ! 70 ! BLACK)(makeSparse(OLED)).
plot(lineColor = ORANGE, marker = PLUS, markStrokeColor = ORANGE)(makeSparse(OLED_MLN)).
plot(lineColor = RED, marker = ASTERISK, markStrokeColor = RED)(makeSparse(OLED_Experts))
.havingLegends("\\footnotesize \\textsf{HandCrafted}", "\\footnotesize \\textsf{HandCrafted-EXP}", "\\footnotesize \\textsf{OLED}",
"\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")
.havingLegendPos(SOUTH_EAST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Prequential $F_1$-score}").
havingTitle("\\emph{Moving}").
//havingAxisXLabels(Seq("0","5K","10K","15K","20K","25K")).
saveAsPDF(savePath)
//.show()
}
def plotMovingMistakes(dataPath: String, savePath: String) = {
val data = Source.fromFile(dataPath).getLines.filter(x => !x.isEmpty && !x.startsWith("%")) //.split(",")
val handCrafted = data.next().split(",").map(_.toDouble).toVector
val handCraftedExperts = data.next().split(",").map(_.toDouble).toVector
val OLED = data.next().split(",").map(_.toDouble).toVector
val OLED_MLN = data.next().split(",").map(_.toDouble).toVector
val OLED_Experts = data.next().split(",").map(_.toDouble).toVector
Figure("moving-prequential-mistakes")
.plot(lineColor = BLACK, marker = X, markStrokeColor = BLACK)(makeSparse(handCrafted)).
plot(lineColor = BLUE, marker = TRIANGLE, markStrokeColor = BLUE)(makeSparse(handCraftedExperts)).
plot(lineColor = GREEN ! 70 ! BLACK, marker = CIRCLE, markStrokeColor = GREEN ! 70 ! BLACK)(makeSparse(OLED)).
plot(lineColor = ORANGE, marker = PLUS, markStrokeColor = ORANGE)(makeSparse(OLED_MLN)).
plot(lineColor = RED, marker = ASTERISK, markStrokeColor = RED)(makeSparse(OLED_Experts))
.havingLegends("\\footnotesize \\textsf{HandCrafted}", "\\footnotesize \\textsf{HandCrafted-EXP}", "\\footnotesize \\textsf{OLED}",
"\\footnotesize \\textsf{OLED-MLN}", "\\footnotesize \\textsf{OLED-EXP}")
.havingLegendPos(NORTH_WEST)
.havingXLabel("\\textbf{Time} $\\mathbf{(\\times 50)}$")
.havingYLabel("\\textbf{Acummulated Mistakes}").
havingTitle("\\emph{Moving}").
saveAsPDF(savePath)
//.show()
}
def makeSparse(input: Vector[Double]): Vector[(Double, Double)] = {
val l = input.length
input.zipWithIndex.foldLeft(Vector.empty[(Double, Double)]) {
case (output, (x, i)) =>
if (output.isEmpty) output :+ (i.toDouble, x)
else if (i == l - 1) output :+ (i.toDouble, x)
else if (output.last._2 != x) output :+ (i.toDouble, x)
else output
}
}
}
| 22,942 | 52.232019 | 158 | scala |
OLED | OLED-master/src/main/scala/woled/ASPActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
/*TODO*/
class ASPActor {
}
| 741 | 28.68 | 72 | scala |
OLED | OLED-master/src/main/scala/woled/Learner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import akka.actor.{Actor, ActorRef, Props}
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic.{Clause, Literal, Theory}
import oled.functions.SingleCoreOLEDFunctions
import oled.weightlearn.parallel.IO.FinishedBatch
import oled.weightlearn.parallel.WeightedTheoryLearner
import org.slf4j.LoggerFactory
import utils.ASP
import scala.io.Source
import scala.util.matching.Regex
class Learner[T <: app.runutils.IOHandling.InputSource](inps: RunningOptions, trainingDataOptions: T,
testingDataOptions: T, trainingDataFunction: T => Iterator[Example],
testingDataFunction: T => Iterator[Example],
targetClass: String) extends WeightedTheoryLearner(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction, targetClass) {
import context.become
private val logger = LoggerFactory.getLogger(self.path.name)
private val state = inps.globals.state
private var inertiaAtoms = Vector.empty[Literal]
var batchCount = 0
var withHandCrafted = false
// Use a hand-crafted theory for debugging
/*def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
val source = Source.fromFile("/home/nkatz/dev/BKExamples/BK-various-taks/WeightLearning/Caviar/fragment/meeting/ASP/asp-rules-test")
val list = source.getLines.filter(line => !matches( """""".r, line) && !line.startsWith("%"))
val rulesList = list.map(x => Clause.parse(x)).toList
source.close
inps.globals.state.updateRules(rulesList, "add", inps)
withHandCrafted = true*/
def inferenceState: Receive = { ??? }
override def processingState: Receive = {
case batch: Example =>
logger.info(s"\n\n\n *** BATCH $batchCount *** ")
if (batchCount == 2) {
val stop = "stop"
}
// Get the data in MLN format by doing numerical stuff thresholds etc. with clingo
// and getting the atoms expected by the mode declarations
val program = {
val nar = batch.narrative.map(_ + ".").mkString("\n")
val include = s"""#include "${inps.globals.BK_WHOLE_EC}"."""
val show = inps.globals.bodyAtomSignatures.map(x => s"#show ${x.tostring}.").mkString("\n")
Vector(nar, include, show)
}
// This transforms the actual data into an MLN-compatible form.
val f = woled.Utils.dumpToFile(program)
val t = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
val answer = t.head.atoms
val e = new Example(annot = batch.annotation, nar = answer, _time = batch.time)
var rules = List.empty[Clause]
var inferredState = Map.empty[String, Boolean]
var tpCounts = 0
var fpCounts = 0
var fnCounts = 0
var totalGroundings = 0
var inertiaAtoms = Vector.empty[Literal]
// MAP inference and getting true groundings with Clingo (see below should be performed in parallel)
//this.inertiaAtoms = Vector.empty[Literal] // Use this to difuse inertia
/*=============== WOLED ================*/
if (inps.weightLean) { // set --weight-learning=true to run WOLED, =false to run OLED
// We perform inference with the top rules only. The main reason is that there is a problem with conversion to CNF
// when many clauses are involved. It hangs, often eats up all the memory etc.
rules = state.getAllRules(inps.globals, "top")
//rules = state.getBestRules(inps.globals)
println("MAP inference...")
inferredState = WoledUtils.getInferredState(Theory.compressTheory(rules), e, this.inertiaAtoms, "MAP", inps)
// Parallelizing this is trivial (to speed things up in case of many rules/large batches).
// Simply split the rules to multiple workers, the grounding/counting tasks executed are completely rule-independent.
println("Scoring...")
val (_tpCounts, _fpCounts, _fnCounts, _totalGroundings, _inertiaAtoms) = Scoring.scoreAndUpdateWeights(e, inferredState, state.getAllRules(inps.globals, "all").toVector, inps, logger)
tpCounts = _tpCounts
fpCounts = _fpCounts
fnCounts = _fnCounts
totalGroundings = _totalGroundings
inertiaAtoms = _inertiaAtoms
/*=============== OLED ================*/
} else {
rules = state.getBestRules(inps.globals, "score").filter(x => x.score >= 0.7)
val (_tpCounts, _fpCounts, _fnCounts, _, _, _) = oled.functions.SingleCoreOLEDFunctions.eval(Theory(rules), e, inps)
tpCounts = _tpCounts
fpCounts = _fpCounts
fnCounts = _fnCounts
val initTheory = Theory(state.initiationRules)
val termTheory = Theory(state.terminationRules)
if (initTheory.clauses.nonEmpty) initTheory.scoreRules(e, inps.globals)
if (termTheory.clauses.nonEmpty) termTheory.scoreRules(e, inps.globals)
}
this.inertiaAtoms = inertiaAtoms
this.inertiaAtoms = Vector.empty[Literal] // Use this to difuse inertia
state.perBatchError = state.perBatchError :+ (fpCounts + fnCounts)
logger.info(s"\n${state.perBatchError}")
logger.info(s"\nFPs: $fpCounts, FNs: $fnCounts")
if (!withHandCrafted) {
state.totalGroundings += totalGroundings
state.updateGroundingsCounts(totalGroundings)
// Generate new rules with abduction & everything. This should be removed...
println("Generating new rules...")
var newInit = List.empty[Clause]
var newTerm = List.empty[Clause]
if (fpCounts != 0 || fnCounts != 0) {
val topInit = state.initiationRules
val topTerm = state.terminationRules
val growNewInit = Theory(topInit).growNewRuleTest(e, "initiatedAt", inps.globals)
val growNewTerm = Theory(topTerm).growNewRuleTest(e, "terminatedAt", inps.globals)
newInit = if (growNewInit) oled.functions.SingleCoreOLEDFunctions.generateNewRules(Theory(topInit), e, "initiatedAt", inps.globals) else Nil
newTerm = if (growNewTerm) oled.functions.SingleCoreOLEDFunctions.generateNewRules(Theory(topTerm), e, "terminatedAt", inps.globals) else Nil
state.updateRules(newInit ++ newTerm, "add", inps)
}
// Generate a few more rules randomly from mistakes. Initiation rules from the FNs and termination from the FPs.
/*if (fpCounts != 0 || fnCounts != 0) {
val (a, b) = Scoring.generateNewRules(fps, fns, batch, inps, logger)
state.updateRules(a.toList ++ b.toList, "add", inps)
}*/
val newRules = newInit ++ newTerm
// score the new rules and update their weights
Scoring.scoreAndUpdateWeights(e, inferredState, newRules.toVector, inps, logger)
/* Rules' expansion. */
// We only need the top rules for expansion here.
val init = inps.globals.state.initiationRules
val term = inps.globals.state.terminationRules
val expandedTheory = SingleCoreOLEDFunctions.expandRules(Theory(init ++ term), inps, logger)
inps.globals.state.updateRules(expandedTheory._1.clauses, "replace", inps)
inps.globals.state.pruneRules(inps.pruneThreshold)
// Do a MAP inference and scoring step again, to update weights for the newly generated rules.
// This is necessary for large batch sizes. This should be fixed so as to perform this step only
// for new BC-generated rules, by grounding those (only) and computing differences from current MAP state.
// UPDATE: This degrades performance. It needs to be done right.
/*rules = state.getAllRules(inps.globals, "top")
inferredState = WoledUtils.getInferredState(Theory.compressTheory(rules), e, this.inertiaAtoms, "MAP", inps)
Scoring.scoreAndUpdateWeights(e, inferredState, state.getAllRules(inps.globals, "all").toVector, inps, logger)*/
//println(Theory(state.getAllRules(inps.globals, "top")).showWithStats)
}
batchCount += 1
become(normalState)
self ! new FinishedBatch
}
}
| 8,763 | 43.714286 | 191 | scala |
OLED | OLED-master/src/main/scala/woled/MAPActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import akka.actor.Actor
import app.runutils.RunningOptions
import logic.Examples.Example
import org.slf4j.LoggerFactory
/*TODO*/
class MAPActor(inps: RunningOptions) extends Actor {
private val logger = LoggerFactory.getLogger(self.path.name)
def receive = {
case exmpl: Example =>
}
}
| 1,013 | 26.405405 | 72 | scala |
OLED | OLED-master/src/main/scala/woled/MAPCorrect.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import java.io.File
import logic.{Clause, Literal}
import lomrf.logic.compile.{NormalForm, PredicateCompletion, PredicateCompletionMode}
import lomrf.logic.parser.KBParser
import lomrf.logic.{AtomSignature, Constant, EvidenceAtom, FunctionMapping}
import lomrf.mln.grounding.MRFBuilder
import lomrf.mln.inference.ILP
import lomrf.mln.learning.structure.ClauseConstructor
import lomrf.mln.model.AtomIdentityFunctionOps._
import lomrf.mln.model.{Evidence, KB, MLN}
import scala.io.Source
object MAPCorrect extends App {
val queryAtoms = Set(
AtomSignature("HoldsAt", 2),
AtomSignature("InitiatedAt", 2),
AtomSignature("TerminatedAt", 2)
)
//val mlnBKFile = "/home/nkatz/dev/BKExamples/BK-various-taks/WeightLearning/Caviar/fragment/meeting/MLN/MAPInferenceBK.mln"
val mlnBKFile = "/home/nkatz/dev/WOLED-DEBUG/bk-and-rules"
val (kb, constants) = KB.fromFile(mlnBKFile)
val formulas = kb.formulas
val parser = new KBParser(kb.predicateSchema.map { case (x, y) => x -> y.toVector }, kb.functionSchema)
//val evidenceFile = new File("/home/nkatz/dev/BKExamples/BK-various-taks/WeightLearning/Caviar/fragment/meeting/MLN/23-Meet_Crowd.id0_id2.db")
val evidenceFile = new File("/home/nkatz/dev/WOLED-DEBUG/evidence")
val evidence = Evidence.fromFiles(kb, constants, queryAtoms, Seq(evidenceFile), false, false)
//========================================================================================
//val b = EvidenceBuilder(kb.predicateSchema, queryAtoms, Set.empty, constants)
///
//b.functions += new FunctionMapping("A", "foo", Vector("B", "C"))
//b.evidence += EvidenceAtom.asTrue("HappensAt", Vector(Constant("A")))
///
//val e = b.result()
//========================================================================================
val source = Source.fromFile("/home/nkatz/dev/BKExamples/BK-various-taks/WeightLearning/Caviar/fragment/meeting/ASP/asp-rules-test")
val list = source.getLines
val rulesList = list.map(x => Clause.parse(x)).toList
source.close
val testRule = rulesList.head
val head = testRule.head.asLiteral
val body = testRule.body
val ruleLiteralstoMLN = (head :: body).map(Literal.toMLNClauseLiteral)
val result = infer(rulesList)
val trueHoldsInit = result.filter{ case (k, v) => (k.startsWith("Init") || k.startsWith("Holds")) && v }
println(s"Initiation & Holds atoms inferred as true:\n${trueHoldsInit.mkString("\n")}")
def infer(rules: List[Clause]): Map[String, Boolean] = {
/*val definiteClauses = rules.map { rule =>
val head = Literal.toMLNClauseLiteral(rule.head.asLiteral).tostring_mln
val body = rule.body.map(Literal.toMLNClauseLiteral(_).tostring_mln).mkString(" ^ ")
parser.parseDefiniteClause(s"1 $head :- $body")
}*/
val definiteClauses = kb.definiteClauses
definiteClauses.map(_.toText).foreach(println)
val resultedFormulas = PredicateCompletion(formulas, definiteClauses.toSet, PredicateCompletionMode.Decomposed)(kb.predicateSchema, kb.functionSchema, constants)
//val cnf = NormalForm.compileCNF(resultedFormulas)(constants).toVector
println("Coverting to CNF...")
val cnf = NormalForm.compileFastCNF(resultedFormulas)(constants).toVector
cnf.filter(x => !x.isHard).map(x => x.toText()).foreach(println)
val mln = MLN(kb.schema, evidence, queryAtoms, cnf)
val builder = new MRFBuilder(mln, createDependencyMap = true)
val mrf = builder.buildNetwork
val solver = ILP(mrf)
solver.infer
var result = Map.empty[String, Boolean]
val queryStartID = mln.space.queryStartID
val queryEndID = mln.space.queryEndID
val iterator = mrf.atoms.iterator
while (iterator.hasNext) {
iterator.advance()
val atomID = iterator.key
if (atomID >= queryStartID && atomID <= queryEndID) {
val groundAtom = iterator.value
val state = if (groundAtom.getState) true else false
result += atomID.decodeAtom(mln).get -> state
}
}
result
}
}
| 4,715 | 36.428571 | 165 | scala |
OLED | OLED-master/src/main/scala/woled/Pruning.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import logic.{Clause, Theory}
/**
* Created by nkatz at 13/10/19
*/
object Pruning {
def naivePruningStrategy(rules: Vector[Clause], threshold: Double, logger: org.slf4j.Logger) = {
val (drop, keep) = rules.partition(rule => rule.body.size >= 3 && rule.precision < threshold)
logger.info(s"\nDropped rules:\n${Theory(drop.toList).showWithStats}")
keep
}
def oldestFirst() = {
}
def worstFirst() = {
}
/* Combination of the two */
def oldestWorst() = {
}
}
| 1,211 | 24.25 | 98 | scala |
OLED | OLED-master/src/main/scala/woled/Runner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import com.typesafe.scalalogging.LazyLogging
import experiments.caviar.{FullDatasetHoldOut, MeetingTrainTestSets}
import experiments.caviar.FullDatasetHoldOut.MongoDataOptions
import logic.Examples.Example
import oled.mwua.Runner.logger
import oled.single_core.Dispatcher
/**
* Created by nkatz at 6/10/19
*/
/* This is very similar to the experts runner. Need to factor things out and clean-up when done to have a single App. */
object Runner extends LazyLogging {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
logger.error(argsok._2); System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
val train1 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-2-meeting-moving", "caviar-video-5",
"caviar-video-6", "caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-4", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
val train2 =
Vector("caviar-video-21-meeting-moving", "caviar-video-7", "caviar-video-28-meeting", "caviar-video-25", "caviar-video-30",
"caviar-video-11", "caviar-video-6", "caviar-video-14-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-20-meeting-moving", "caviar-video-13-meeting", "caviar-video-19-meeting-moving",
"caviar-video-12-moving", "caviar-video-1-meeting-moving", "caviar-video-9", "caviar-video-16", "caviar-video-23-moving",
"caviar-video-29", "caviar-video-5", "caviar-video-22-meeting-moving", "caviar-video-18", "caviar-video-4",
"caviar-video-24-meeting-moving", "caviar-video-8", "caviar-video-10", "caviar-video-2-meeting-moving",
"caviar-video-15", "caviar-video-3", "caviar-video-17")
// Single-pass run on the entire dataset
val trainingDataOptions =
new MongoDataOptions(dbNames = train1, //trainShuffled ,//dataset._1,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions = trainingDataOptions
val trainingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val testingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = "start"
system.actorOf(Props(new Dispatcher(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)), name = "Learner") ! startMsg
// Eval on test set in the end:
/*val caviarNum = args.find(x => x.startsWith("caviar-num")).get.split("=")(1)
val trainSet = Map(1 -> MeetingTrainTestSets.meeting1, 2 -> MeetingTrainTestSets.meeting2, 3 -> MeetingTrainTestSets.meeting3,
4 -> MeetingTrainTestSets.meeting4, 5 -> MeetingTrainTestSets.meeting5, 6 -> MeetingTrainTestSets.meeting6,
7 -> MeetingTrainTestSets.meeting7, 8 -> MeetingTrainTestSets.meeting8, 9 -> MeetingTrainTestSets.meeting9,
10 -> MeetingTrainTestSets.meeting10)
val dataset = trainSet(caviarNum.toInt)
val trainingDataOptions =
new MongoDataOptions(dbNames = dataset._1,//trainShuffled, //
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions =
new MongoDataOptions(dbNames = dataset._2,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "testing")
val trainingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val testingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = "start"
system.actorOf(Props(new Dispatcher(runningOptions, trainingDataOptions, testingDataOptions,
trainingDataFunction, testingDataFunction) ), name = "Learner") ! startMsg*/
}
}
}
| 5,475 | 46.206897 | 172 | scala |
OLED | OLED-master/src/main/scala/woled/Scoring.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import app.runutils.{Globals, RunningOptions}
import logic.{Clause, Literal}
import logic.Examples.Example
import utils.ASP
import xhail.Xhail
import scala.util.Random
/**
* Created by nkatz on 11/10/19.
*/
object Scoring {
/*val BK =
"""
|%tps(X) :- X = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)}.
|%fps(X) :- X = #count {F,T: not annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)}.
|%fns(X) :- X = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), false)}.
|
|coverage_counts(TPs, FPs, FNs) :-
| TPs = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)},
| FPs = #count {F,T: not annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)},
| FNs = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), false)}.
|
|actually_initiated_correct(F, T, RuleId) :- fires(initiatedAt(F, T), RuleId), annotation(holdsAt(F, Te)), next(T, Te).
|actually_initiated_incorrect(F, T, RuleId) :- fires(initiatedAt(F, T), RuleId), not annotation(holdsAt(F, Te)), next(T, Te).
|inferred_initiated_correct(F, T, RuleId) :- actually_initiated_correct(F, T, RuleId), inferred(initiatedAt(F, T), true).
|inferred_initiated_incorrect(F, T, RuleId) :- actually_initiated_incorrect(F, T, RuleId), inferred(initiatedAt(F, T), true).
|
|actual_init_tps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: actually_initiated_correct(F, T, RuleId)}.
|actual_init_fps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: actually_initiated_incorrect(F, T, RuleId)}.
|inferred_init_tps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: inferred_initiated_correct(F, T , RuleId)}.
|inferred_init_fps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: inferred_initiated_incorrect(F, T, RuleId)}.
|
|result_init(RuleId, ActualTPs, ActualFPs, InferredTPs, InferredFPs, Mistakes) :-
| initiated_rule_id(RuleId),
| actual_init_tps(RuleId, ActualTPs),
| actual_init_fps(RuleId, ActualFPs),
| inferred_init_tps(RuleId, InferredTPs),
| inferred_init_fps(RuleId, InferredFPs),
| Mistakes = InferredTPs + InferredFPs - ActualTPs.
|
|actually_terminated_correct(F, T, RuleId) :- fires(terminatedAt(F, T), RuleId), annotation(holdsAt(F, T)), not annotation(holdsAt(F,Te)), next(T, Te).
|actually_not_terminated_correct(F, T, RuleId) :- terminated_rule_id(RuleId), not fires(terminatedAt(F, T), RuleId), annotation(holdsAt(F, Te)), next(T, Te).
|actually_terminated_incorrect(F, T , RuleId) :- fires(terminatedAt(F, T), RuleId), annotation(holdsAt(F, Te)), next(T, Te).
|
|inferred_terminated_correct(F, T, RuleId) :- actually_terminated_correct(F, T, RuleId), inferred(terminatedAt(F, T), true).
|inferred_not_terminated_correct(F, T, RuleId) :- actually_not_terminated_correct(F, T, RuleId), inferred(terminatedAt(F, T), false).
|inferred_terminated_incorrect(F, T , RuleId) :- actually_terminated_incorrect(F, T , RuleId), inferred(terminatedAt(F, T), true).
|
|actual_term_tps_1(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_terminated_correct(F, T, RuleId)}.
|actual_term_tps_2(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_not_terminated_correct(F, T, RuleId)}.
|actual_term_fps(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_terminated_incorrect(F, T, RuleId)}.
|inferred_term_tps_1(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_terminated_correct(F, T, RuleId)}.
|inferred_term_tps_2(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_not_terminated_correct(F, T, RuleId)}.
|inferred_term_fps(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_terminated_incorrect(F, T, RuleId)}.
|
|result_term(RuleId, ActualTPs, ActualFPs, InferredTPs, InferredFPs, Mistakes) :-
| terminated_rule_id(RuleId),
| actual_term_tps_1(RuleId, ActualTPs1),
| actual_term_tps_2(RuleId, ActualTPs2),
| ActualTPs = ActualTPs1 + ActualTPs2,
| actual_term_fps(RuleId, ActualFPs),
| inferred_term_tps_1(RuleId, InferredTPs1),
| inferred_term_tps_2(RuleId, InferredTPs2),
| InferredTPs = InferredTPs1 + InferredTPs2,
| inferred_term_fps(RuleId, InferredFPs),
| Mistakes = InferredTPs + InferredFPs - ActualTPs.
|
|#show.
|#show coverage_counts/3.
|#show result_init/6.
|#show result_term/6.
|#show total_groundings/1.
|
|""".stripMargin*/
/*val BK =
"""
|%tps(X) :- X = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)}.
|%fps(X) :- X = #count {F,T: not annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)}.
|%fns(X) :- X = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), false)}.
|
|coverage_counts(TPs, FPs, FNs) :-
| TPs = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)},
| FPs = #count {F,T: not annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)},
| FNs = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), false)}.
|
|actually_initiated_correct(F, T, RuleId) :- fires(initiatedAt(F, T), RuleId), annotation(holdsAt(F, Te)), next(T, Te).
|actually_initiated_incorrect(F, T, RuleId) :- fires(initiatedAt(F, T), RuleId), not annotation(holdsAt(F, Te)), next(T, Te).
|inferred_initiated_correct(F, T, RuleId) :-
| actually_initiated_correct(F, T, RuleId),
| inferred(initiatedAt(F, T), true),
| inferred(holdsAt(F, T), true).
| %inferred(holdsAt(F, Te), true), % These generate much worse results.
| %next(T,Te).
|inferred_initiated_incorrect(F, T, RuleId) :-
| actually_initiated_incorrect(F, T, RuleId),
| inferred(initiatedAt(F, T), true),
| inferred(holdsAt(F, T), false).
| %inferred(holdsAt(F, Te), false), % These generate much worse results.
| %next(T,Te).
|
|actual_init_tps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: actually_initiated_correct(F, T, RuleId)}.
|actual_init_fps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: actually_initiated_incorrect(F, T, RuleId)}.
|inferred_init_tps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: inferred_initiated_correct(F, T , RuleId)}.
|inferred_init_fps(RuleId, X) :- initiated_rule_id(RuleId), X = #count {F,T: inferred_initiated_incorrect(F, T, RuleId)}.
|
|result_init(RuleId, ActualTPs, ActualFPs, InferredTPs, InferredFPs, Mistakes) :-
| initiated_rule_id(RuleId),
| actual_init_tps(RuleId, ActualTPs),
| actual_init_fps(RuleId, ActualFPs),
| inferred_init_tps(RuleId, InferredTPs),
| inferred_init_fps(RuleId, InferredFPs),
| Mistakes = InferredTPs + InferredFPs - ActualTPs.
|
|% actually_terminated_correct(F, T, RuleId) :- fires(terminatedAt(F, T), RuleId), annotation(holdsAt(F, T)), not annotation(holdsAt(F,Te)), next(T, Te).
|actually_terminated_correct(F, T, RuleId) :- fires(terminatedAt(F, T), RuleId), not annotation(holdsAt(F,Te)), next(T, Te).
|actually_not_terminated_correct(F, T, RuleId) :- terminated_rule_id(RuleId), not fires(terminatedAt(F, T), RuleId), annotation(holdsAt(F, Te)), next(T, Te).
|actually_terminated_incorrect(F, T , RuleId) :- fires(terminatedAt(F, T), RuleId), annotation(holdsAt(F, Te)), next(T, Te).
|
|inferred_terminated_correct(F, T, RuleId) :- actually_terminated_correct(F, T, RuleId), inferred(terminatedAt(F, T), true).
|inferred_not_terminated_correct(F, T, RuleId) :- actually_not_terminated_correct(F, T, RuleId), inferred(terminatedAt(F, T), false).
|inferred_terminated_incorrect(F, T , RuleId) :- actually_terminated_incorrect(F, T , RuleId), inferred(terminatedAt(F, T), true).
|
|%% These do not seem to work. They generate crazy results.
|%*
|inferred_terminated_correct(F, T, RuleId) :-
| actually_terminated_correct(F, T, RuleId),
| inferred(terminatedAt(F, T), true),
| inferred(holdsAtAt(F, T), true),
| inferred(holdsAtAt(F, Te), false),
| next(T, Te).
|inferred_not_terminated_correct(F, T, RuleId) :-
| actually_not_terminated_correct(F, T, RuleId),
| inferred(terminatedAt(F, T), false),
| inferred(holdsAt(F, Te), true),
| next(T, Te).
|inferred_terminated_incorrect(F, T , RuleId) :-
| actually_terminated_incorrect(F, T , RuleId),
| inferred(terminatedAt(F, T), true),
| inferred(holdsAt(F, Te), true),
| next(T, Te).
|*%
|
|actual_term_tps_1(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_terminated_correct(F, T, RuleId)}.
|actual_term_tps_2(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_not_terminated_correct(F, T, RuleId)}.
|actual_term_fps(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_terminated_incorrect(F, T, RuleId)}.
|inferred_term_tps_1(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_terminated_correct(F, T, RuleId)}.
|inferred_term_tps_2(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_not_terminated_correct(F, T, RuleId)}.
|inferred_term_fps(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_terminated_incorrect(F, T, RuleId)}.
|
|result_term(RuleId, ActualTPs, ActualFPs, InferredTPs, InferredFPs, Mistakes) :-
| terminated_rule_id(RuleId),
| actual_term_tps_1(RuleId, ActualTPs1),
| % actual_term_tps_2(RuleId, ActualTPs2),
| % ActualTPs = ActualTPs1 + ActualTPs2,
| ActualTPs = ActualTPs1,
| actual_term_fps(RuleId, ActualFPs),
| inferred_term_tps_1(RuleId, InferredTPs1),
| %inferred_term_tps_2(RuleId, InferredTPs2),
| %InferredTPs = InferredTPs1 + InferredTPs2,
| InferredTPs = InferredTPs1,
| inferred_term_fps(RuleId, InferredFPs),
| Mistakes = InferredTPs + InferredFPs - ActualTPs.
|
|#show.
|#show coverage_counts/3.
|#show result_init/6.
|#show result_term/6.
|#show total_groundings/1.
|
|""".stripMargin*/
val BK =
"""
|%tps(X) :- X = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)}.
|%fps(X) :- X = #count {F,T: not annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)}.
|%fns(X) :- X = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), false)}.
|
|coverage_counts(TPs, FPs, FNs) :-
| TPs = #count {F,T: annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)},
| FPs = #count {F,T: not annotation(holdsAt(F,T)), inferred(holdsAt(F,T), true)},
| FNs = #count {F,T: annotation(holdsAt(F,T)), not startTime(T), inferred(holdsAt(F,T), false)}.
|
|actual_initiated_true_grounding(F, T, RuleId) :-
| fluent(F), % This is necessary for correct scoring
| fires(initiatedAt(F, T), RuleId),
| annotation(holdsAt(F, Te)),
| next(T, Te).
|
|%actual_initiated_true_grounding(F, T, RuleId) :-
|% fluent(F), % This is necessary for correct scoring
|% fires(initiatedAt(F, T), RuleId),
|% annotation(holdsAt(F, T)),
|% endTime(T).
|
|actual_initiated_false_grounding(F, T, RuleId) :-
| fluent(F), % This is necessary for correct scoring
| fires(initiatedAt(F, T), RuleId),
| not annotation(holdsAt(F, Te)), next(T, Te).
|
|inferred_initiated_true_grounding(F, T, RuleId) :-
| fluent(F), % This is necessary for correct scoring
| initiated_rule_id(RuleId),
| fires(initiatedAt(F, T), RuleId),
| inferred(initiatedAt(F, T), true).
|
|result_init(RuleId, ActualTrueGroundings, ActualFalseGroundings, InferredTrueGroundings, Mistakes) :-
| initiated_rule_id(RuleId),
| ActualTrueGroundings = #count {F,T: actual_initiated_true_grounding(F, T, RuleId)},
| InferredTrueGroundings = #count {F,T: inferred_initiated_true_grounding(F, T , RuleId)},
| ActualFalseGroundings = #count {F,T: actual_initiated_false_grounding(F, T, RuleId)},
| Mistakes = InferredTrueGroundings - ActualTrueGroundings.
|
|actually_terminated_true_grounding(F, T, RuleId) :-
| fluent(F), % This is necessary for correct scoring
| fires(terminatedAt(F, T), RuleId),
| not annotation(holdsAt(F,Te)), next(T, Te).
|
|actually_terminated_true_grounding(F, T, RuleId) :- % This is necessary for correct scoring...
| fluent(F), % This is necessary for correct scoring
| fires(terminatedAt(F, T), RuleId),
| endTime(T),
| not annotation(holdsAt(F,T)).
|
|actually_terminated_false_grounding(F, T, RuleId) :-
| fluent(F), % This is necessary for correct scoring
| fires(terminatedAt(F, T), RuleId),
| annotation(holdsAt(F,Te)), next(T, Te).
|
|inferred_terminated_true_grounding(F, T, RuleId) :-
| fluent(F), % This is necessary for correct scoring
| terminated_rule_id(RuleId),
| fires(terminatedAt(F, T), RuleId),
| inferred(terminatedAt(F, T), true).
|
|
|actual_term_tps(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: actually_terminated_true_grounding(F, T, RuleId)}.
|inferred_term_tps(RuleId, X) :- terminated_rule_id(RuleId), X = #count {F,T: inferred_terminated_true_grounding(F, T, RuleId)}.
|
|result_term(RuleId, ActualTrueGroundings, ActualFalseGroundings, InferredTrueGroundings, Mistakes) :-
| terminated_rule_id(RuleId),
| ActualTrueGroundings = #count {F,T: actually_terminated_true_grounding(F, T, RuleId)},
| InferredTrueGroundings = #count {F,T: inferred_terminated_true_grounding(F, T, RuleId)},
| ActualFalseGroundings = #count {F,T: actually_terminated_false_grounding(F, T, RuleId)},
| Mistakes = InferredTrueGroundings - ActualTrueGroundings.
|
|
|inertia(holdsAt(F,T)) :- inferred(holdsAt(F, T), true), endTime(T).
|
|
|#show.
|#show coverage_counts/3.
|#show result_init/5.
|#show result_term/5.
|#show total_groundings/1.
|#show inertia/1.
|
|
|""".stripMargin
def scoreAndUpdateWeights(
data: Example,
inferredState: Map[String, Boolean],
rules: Vector[Clause],
inps: RunningOptions,
logger: org.slf4j.Logger) = {
val bk = BK
val zipped = rules zip (1 to rules.length)
val ruleIdsMap = zipped.map(x => x._2 -> x._1).toMap
val ruleIdPreds = {
ruleIdsMap.map{ case (id, rule) => if (rule.head.functor == "initiatedAt") s"initiated_rule_id($id)." else s"terminated_rule_id($id)." }
} mkString (" ")
val metaRules = ruleIdsMap.foldLeft(Vector[String]()) { (accum, r) =>
val (ruleId, rule) = (r._1, r._2)
val typeAtoms = rule.toLiteralList.flatMap(x => x.getTypePredicates(inps.globals)).distinct.map(x => Literal.parse(x))
val metaRule = s"fires(${rule.head.tostring}, $ruleId) :- ${(rule.body ++ typeAtoms).map(_.tostring).mkString(",")}."
accum :+ metaRule
}
val totalExmplsCount = {
val targetPred = inps.globals.EXAMPLE_PATTERNS.head
val tpstr = targetPred.tostring
val vars = targetPred.getVars.map(x => x.name).mkString(",")
val typePreds = targetPred.getTypePredicates(inps.globals).mkString(",")
val groundingsRule = s"grounding($tpstr) :- $typePreds, X0!=X1."
val groundingsCountRule = s"total_groundings(X) :- X = #count {Y: grounding(Y)}."
//s"total_groundings(X) :- X = #count {$vars: $tpstr: $typePreds} .\n"
s"$groundingsRule\n$groundingsCountRule"
}
val endTime = (data.narrative ++ data.annotation).map(x => Literal.parse(x)).map(x => x.terms.last.tostring.toInt).sorted.last
val observationAtoms = (data.narrative :+ s"endTime($endTime)").map(_ + ".")
val annotationAtoms = data.annotation.map(x => s"annotation($x).")
val inferredAtoms = inferredState.map{ case (k, v) => s"inferred($k,$v)." }
val include = s"""#include "${inps.globals.BK_WHOLE_EC}"."""
val metaProgram = {
Vector("% Annotation Atoms:\n", annotationAtoms.mkString(" "),
"\n% Inferred Atoms:\n", inferredAtoms.mkString(" "),
"\n% Observation Atoms:\n", observationAtoms.mkString(" "),
"\n% Marked Rules:\n", metaRules.mkString("\n") + ruleIdPreds,
"\n% Meta-rules for Scoring:\n", s"$include\n", totalExmplsCount, bk)
}
/* SOLVE */
val f = woled.Utils.dumpToFile(metaProgram)
val t = ASP.solve(Globals.INFERENCE, aspInputFile = f)
val answer = if (t.nonEmpty) t.head.atoms else Nil
/* PARSE RESULTS */
val (batchTPs, batchFPs, batchFNs, totalGroundings, inertiaAtoms, rulesResults) = answer.foldLeft(0, 0, 0, 0, Vector.empty[Literal], Vector.empty[String]) { (x, y) =>
if (y.startsWith("total_groundings")) {
val num = y.split("\\(")(1).split("\\)")(0).toInt
(x._1, x._2, x._3, num, x._5, x._6)
} else if (y.startsWith("coverage_counts")) {
val split = y.split(",")
val tps = split(0).split("\\(")(1)
val fps = split(1)
val fns = split(2).split("\\)")(0)
(tps.toInt, fps.toInt, fns.toInt, x._4, x._5, x._6)
} else if (y.startsWith("inertia")) {
val parsed = Literal.parse(y)
val atom = parsed.terms.head.asInstanceOf[Literal]
(x._1, x._2, x._3, x._4, x._5 :+ atom, x._6)
} else {
(x._1, x._2, x._3, x._4, x._5, x._6 :+ y)
}
}
//var prevTotalWeightVector = Vector.empty[Double] // used for the experts update
//var _rules = Vector.empty[Clause] // used for the experts update
/* UPDATE WEIGHTS */
rulesResults foreach { x =>
val split = x.split(",")
val ruleId = split(0).split("\\(")(1).toInt
val actualTrueGroundings = split(1).toInt
val actualFalseGroundings = split(2).toInt
val inferredTrueGroundings = split(3).toInt
val mistakes = split(4).split("\\)")(0).toInt
val rule = ruleIdsMap(ruleId)
rule.mistakes += mistakes
val prevWeight = rule.weight
//println(s"Before: ${rule.mlnWeight}")
//prevTotalWeightVector = prevTotalWeightVector :+ prevWeight // used for the experts update
//_rules = rules :+ rule // used for the experts update
// Adagrad
val lambda = inps.adaRegularization //0.001 // 0.01 default
val eta = inps.adaLearnRate //1.0 // default
val delta = inps.adaGradDelta //1.0
val currentSubgradient = mistakes
rule.subGradient += currentSubgradient * currentSubgradient
val coefficient = eta / (delta + math.sqrt(rule.subGradient))
val value = rule.weight - coefficient * currentSubgradient
val difference = math.abs(value) - (lambda * coefficient)
if (difference > 0) rule.weight = if (value >= 0) difference else -difference
else rule.weight = 0.0
// Experts:
/*var newWeight = if (totalGroundings!=0) rule.mlnWeight * Math.pow(0.8, rule.mistakes/totalGroundings) else rule.mlnWeight * Math.pow(0.8, rule.mistakes)
if (newWeight.isNaN) {
val stop = "stop"
}
if (newWeight == 0.0 | newWeight.isNaN) newWeight = 0.00000001
rule.mlnWeight = if(newWeight.isPosInfinity) rule.mlnWeight else newWeight
println(s"After: ${rule.mlnWeight}")*/
/*if (prevWeight != rule.mlnWeight) {
logger.info(s"\nPrevious weight: $prevWeight, current weight: ${rule.mlnWeight}, actualTPs: $actualTrueGroundings, actualFPs: $actualFalseGroundings, inferredTPs: $inferredTrueGroundings, mistakes: $mistakes\n${rule.tostring}")
}*/
rule.tps += actualTrueGroundings
rule.fps += actualFalseGroundings
}
/*val prevTotalWeight = prevTotalWeightVector.sum
val _newTotalWeight = _rules.map(x => x.mlnWeight).sum
val newTotalWeight = _newTotalWeight
rules.foreach(x => x.mlnWeight = x.mlnWeight * (prevTotalWeight/newTotalWeight))
val newNewTotalWeight = _rules.map(x => x.mlnWeight).sum
if (newNewTotalWeight.isNaN) {
val stop = "stop"
}
println(s"Before | After: $prevTotalWeight | $newNewTotalWeight")*/
(batchTPs, batchFPs, batchFNs, totalGroundings, inertiaAtoms)
}
def generateNewRules(fps: Map[Int, Set[Literal]], fns: Map[Int, Set[Literal]], batch: Example,
inps: RunningOptions, logger: org.slf4j.Logger) = {
val (initTopRules, termTopRules) = (inps.globals.state.initiationRules, inps.globals.state.terminationRules)
val fpSeedAtoms = fps.map(x => x._1 -> x._2.map(x => Literal(predSymbol = "terminatedAt", terms = x.terms)))
val fnSeedAtoms = fns.map(x => x._1 -> x._2.map(x => Literal(predSymbol = "initiatedAt", terms = x.terms)))
// These should be done in parallel...
val initBCs = if (fnSeedAtoms.nonEmpty) pickSeedsAndGenerate(fnSeedAtoms, batch, inps, 3, initTopRules.toVector, logger) else Set.empty[Clause]
val termBCs = if (fpSeedAtoms.nonEmpty) pickSeedsAndGenerate(fpSeedAtoms, batch, inps, 3, termTopRules.toVector, logger) else Set.empty[Clause]
val newRules = (inp: Set[Clause]) => {
inp map { rule =>
val c = Clause(head = rule.head, body = List())
c.addToSupport(rule)
c
}
}
val initNewRules = newRules(initBCs)
val termNewRules = newRules(termBCs)
// These rules are generated to correct current mistakes, so set their weight to 1, so they are applied immediately.
initNewRules foreach (x => x.weight = 1.0)
termNewRules foreach (x => x.weight = 1.0)
(initNewRules, termNewRules)
}
def pickSeedsAndGenerate(mistakeAtomsGroupedByTime: Map[Int, Set[Literal]], batch: Example, inps: RunningOptions,
numOfTrials: Int, currentTopRules: Vector[Clause], logger: org.slf4j.Logger) = {
def generateBC(seedAtom: String, batch: Example, inps: RunningOptions) = {
val examples = batch.toMapASP
val f = (x: String) => if (x.endsWith(".")) x else s"$x."
val interpretation = examples("annotation").map(x => s"${f(x)}") ++ examples("narrative").map(x => s"${f(x)}")
val infile = woled.Utils.dumpToFile(interpretation)
val (_, kernel) = Xhail.generateKernel(List(seedAtom), "", examples, infile, inps.globals.BK_WHOLE, inps.globals)
infile.delete()
kernel
}
val random = new Random
val existingBCs = currentTopRules.flatMap(x => x.supportSet.clauses).toSet
val newBCs = (1 to numOfTrials).foldLeft(existingBCs, Set.empty[Clause]) { (x, _) =>
val _newBCs = x._2
val seed = random.shuffle(mistakeAtomsGroupedByTime).head
val seedAtoms = seed._2.map(_.tostring)
val _BCs = seedAtoms.flatMap(generateBC(_, batch, inps))
val BCs = _BCs.filter(newBC => !(existingBCs ++ _newBCs).exists(oldBC => newBC.thetaSubsumes(oldBC)))
if (BCs.nonEmpty) {
logger.info(s"\nStart growing new rules from BCs:\n${BCs.map(_.tostring).mkString("\n")}")
(x._1 ++ BCs, x._2 ++ BCs)
} else {
logger.info("Generated BCs already existed in the bottom theory.")
x
}
}
newBCs._2
}
def getMistakes(inferredState: Map[String, Boolean], batch: Example) = {
val annotation = batch.annotation.toSet
val (tps, fps, fns) = inferredState.foldLeft(Set.empty[String], Set.empty[String], Set.empty[String]) { (accum, y) =>
val (atom, predictedTrue) = (y._1, y._2)
if (atom.startsWith("holds")) {
if (predictedTrue) {
if (!annotation.contains(atom)) (accum._1, accum._2 + atom, accum._3) // FP
else (accum._1 + atom, accum._2, accum._3) // TP, we don't care.
} else {
if (annotation.contains(atom)) (accum._1, accum._2, accum._3 + atom) // FN
else accum // TN, we don't care.
}
} else {
accum
}
}
val (tpCounts, fpCounts, fnCounts) = (tps.size, fps.size, fns.size)
val groupedByTime = (in: Set[String]) => in.map(x => Literal.parse(x)).groupBy(x => x.terms.tail.head.tostring.toInt)
(groupedByTime(fps), groupedByTime(fns), tpCounts, fpCounts, fnCounts)
}
}
| 26,236 | 49.945631 | 235 | scala |
OLED | OLED-master/src/main/scala/woled/State.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import app.runutils.{Globals, RunningOptions}
import logic.Clause
class State {
var initiationRules: List[Clause] = List[Clause]()
var terminationRules: List[Clause] = List[Clause]()
var perBatchError: Vector[Int] = Vector.empty[Int]
var runningRulesNumber: Vector[Int] = Vector.empty[Int]
// This is the number of examples seen so far, the N for the Hoeffding test.
var totalGroundings = 0
var batchCounter = 0
var totalTPs = 0
var totalFPs = 0
var totalFNs = 0
var totalTNs = 0
def getTopTheory() = initiationRules ++ terminationRules
/* The "what" variable here is either "all" or "top".
* "all" returns all non-empty bodied rules along with their
* specializations, while "top" returns the top non-empty bodied rules.
* */
def getAllRules(gl: Globals, what: String) = {
val topRules = getTopTheory()
what match {
case "all" =>
topRules.flatMap { topRule =>
if (topRule.refinements.isEmpty) topRule.generateCandidateRefs(gl)
//if (topRule.body.nonEmpty) List(topRule) ++ topRule.refinements else topRule.refinements
List(topRule) ++ topRule.refinements
}
case "top" =>
topRules.filter(x => x.body.nonEmpty)
}
}
// Returns the best refinement currently available from each subsumption lattice
def getBestRules(gl: Globals, quality: String = "weight") = {
val topRules = getTopTheory()
topRules map { topRule =>
if (topRule.refinements.isEmpty) topRule.generateCandidateRefs(gl)
val sorted = (topRule.refinements :+ topRule).sortBy(x => if (quality == "weight") -x.weight else -x.score)
if (sorted.head.body.nonEmpty) sorted.head else sorted.tail.head
}
}
def updateGroundingsCounts(newCount: Int) = {
val rules = getTopTheory()
rules foreach { rule =>
rule.seenExmplsNum += newCount
rule.supportSet.clauses.head.seenExmplsNum += newCount
rule.refinements foreach { ref =>
ref.seenExmplsNum += newCount
}
}
}
/* The "action" variable here is either "add" or "replace" */
def updateRules(newRules: List[Clause], action: String, inps: RunningOptions) = {
newRules foreach { rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals) }
val (init, term) = newRules.partition(x => x.head.functor == "initiatedAt")
action match {
case "add" =>
initiationRules = initiationRules ++ init
terminationRules = terminationRules ++ term
case "replace" =>
initiationRules = init
terminationRules = term
}
}
def pruneRules(acceptableScore: Double) = {
/* Remove rules by score */
def removeBadRules(rules: List[Clause]) = {
rules.foldLeft(List.empty[Clause]) { (accum, rule) =>
if (rule.body.length >= 2 && rule.score <= 0.5) accum else accum :+ rule
}
}
initiationRules = removeBadRules(initiationRules)
terminationRules = removeBadRules(terminationRules)
}
}
| 3,718 | 32.205357 | 113 | scala |
OLED | OLED-master/src/main/scala/woled/Test.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import akka.actor.{Actor, ActorSystem, Props}
import woled.Test.{Master, Reply, Work}
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.pattern._
import akka.util.Timeout
import logic.Clause
import scala.io.Source
/**
* Created by nkatz at 17/10/19
*/
object NewTest extends App {
val inputPath = "/home/nkatz/dev/MarineTraffic-Infore/events_infore.csv"
val events = Source.fromFile(inputPath).getLines().foldLeft(Set.empty[String]) { (x, y) =>
val split = y.split(",")
val event = split(1)
if (!x.contains(event)) x + event else x
}
println(events)
}
object Test {
class Work
class Reply(val reply: String)
class Master extends Actor {
def receive = {
case _: Work =>
println(s"Starting work at ${System.currentTimeMillis()}")
Thread.sleep(10000)
sender ! new Reply(s"Finished work at ${System.currentTimeMillis()}")
}
}
/*class Worker extends Actor {
def receive = {
case "" => ???
}
}*/
}
object Run extends App {
println(s"Starting execution at ${System.currentTimeMillis()}")
val system = ActorSystem("TestActorSystem")
implicit val timeout: Timeout = Timeout(21474800 seconds)
val master = system.actorOf(Props[Master], name = "test-actor")
val future = ask(master, new Work).mapTo[Reply]
val result = Await.result(future, Duration.Inf)
println(result.reply)
system.terminate()
println("Shut down and exit")
}
object SubsumptionTest extends App {
val c1 = Clause.parse("pilotOps(X2,X0,X1) :- lowSpeed(X0,X1),lowSpeed(X2,X1),withinArea(X0,nearPorts,X1),withinArea(X2,nearPorts,X1)")
val c2 = Clause.parse("pilotOps(X0,X2,X1) :- lowSpeed(X0,X1),proximity(X0,X2,X1),proximity(X2,X0,X1),withinArea(X0,nearPorts,X1),withinArea(X2,nearPorts,X1)")
println(c1.thetaSubsumes(c2))
}
| 2,558 | 24.088235 | 160 | scala |
OLED | OLED-master/src/main/scala/woled/Utils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import java.io.{File, FileWriter}
import java.util.UUID
/**
* Created by nkatz at 5/10/19
*/
object Utils {
def dumpToFile(input: Any, file: String = "", howTowrite: String = "overwrite") = {
/* Write an iterable to file. Usage:
* listToFile(new File("example.txt")) { p => data.foreach(p.println) }
*/
def listToFile(f: java.io.File, mode: String)(op: java.io.PrintWriter => Unit) {
val p = mode match {
case "append" => new java.io.PrintWriter(new FileWriter(f, true))
case "overwrite" => new java.io.PrintWriter(new FileWriter(f, false))
case _ => new java.io.PrintWriter(new FileWriter(f, false)) // default is overwrite
}
try { op(p) } finally { p.close() }
}
val writeTo =
if (file == "") File.createTempFile(s"temp-${System.currentTimeMillis()}-${UUID.randomUUID.toString}", "asp")
else new File(file)
val deleteOnExit = if (file == "") true else false
val mode = if (file == "") "overwrite" else howTowrite
input match {
case in: Iterable[String] => listToFile(writeTo, mode) { p => in.foreach(p.println) }
case in: String => listToFile(writeTo, mode) { p => Vector(in).foreach(p.println) }
}
if (deleteOnExit) writeTo.deleteOnExit()
writeTo
}
}
| 2,018 | 31.564516 | 115 | scala |
OLED | OLED-master/src/main/scala/woled/WoledUtils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package woled
import java.text.DecimalFormat
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic.{Clause, Constant, Literal, LogicUtils, Theory, Variable}
import lomrf.logic.compile.{NormalForm, PredicateCompletion, PredicateCompletionMode}
import lomrf.logic.{AtomSignature, Constant, EvidenceAtom, FunctionMapping}
import lomrf.logic.parser.KBParser
import lomrf.mln.grounding.MRFBuilder
import lomrf.mln.inference.ILP
import lomrf.mln.learning.structure.ClauseConstructor
import lomrf.mln.model.{AtomIdentityFunction, KB, MLN}
import utils.{ASP, Utils}
import lomrf.mln.model.AtomIdentityFunctionOps._
import lomrf.mln.model.builders.{ConstantsDomainBuilder, EvidenceBuilder}
import scala.collection.mutable
import scala.io.Source
object WoledUtils {
def evalOnTestSet(testData: Iterator[Example], rules: List[Clause], inps: RunningOptions) = {
println("Evaluating on the test set...")
var totalTPs = 0
var totalFPs = 0
var totalFNs = 0
testData foreach { batch =>
val program = {
val nar = batch.narrative.map(_ + ".").mkString("\n")
val include = s"""#include "${inps.globals.BK_WHOLE_EC}"."""
val show = inps.globals.bodyAtomSignatures.map(x => s"#show ${x.tostring}.").mkString("\n")
Vector(nar, include, show)
}
// This transforms the actual data into an MLN-compatible form.
val f = woled.Utils.dumpToFile(program)
val t = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
val answer = t.head.atoms
val e = new Example(annot = batch.annotation, nar = answer, _time = batch.time)
val inferredState = WoledUtils.getInferredState(Theory.compressTheory(rules), e, Vector.empty[Literal], "MAP", inps)
val inferredTrue = inferredState.filter(x => x._2 && x._1.startsWith("holdsAt")).keySet
val actuallyTrue = e.annotation.toSet
val tps = inferredTrue.intersect(actuallyTrue).size
val fps = inferredTrue.diff(actuallyTrue).size
val fns = actuallyTrue.diff(inferredTrue).size
println(s"TPs, FPs, FNs: $tps, $fps, $fns")
totalTPs += tps
totalFPs += fps
totalFNs += fns
}
val precision = totalTPs.toDouble / (totalTPs + totalFPs)
val recall = totalTPs.toDouble / (totalTPs + totalFNs)
val f1 = 2 * (precision * recall) / (precision + recall)
println(s"F1-score on test set: $f1")
}
def getInferredState(rules: List[Clause], e: Example, inertiaAtoms: Vector[Literal], mode: String, inps: RunningOptions) = {
// Other inference modes will be added here, e.g. WM (experts) and crisp (OLED)
if (mode == "MAP") {
MAPInference(rules, e, inertiaAtoms, inps)
} else {
Map[String, Boolean]()
}
}
def format(x: Double) = {
val defaultNumFormat = new DecimalFormat("0.############")
defaultNumFormat.format(x)
}
def MAPInference(rules: List[Clause], e: Example, inertiaAtoms: Vector[Literal], inps: RunningOptions) = {
val queryAtoms = Set(
AtomSignature("HoldsAt", 2),
AtomSignature("InitiatedAt", 2),
AtomSignature("TerminatedAt", 2)
)
/* Get BK etc */
val mlnBKFile = s"${inps.entryPath}/MAPInferenceBK.mln"
val (kb, constants) = KB.fromFile(mlnBKFile)
val formulas = kb.formulas
val parser = new KBParser(kb.predicateSchema.map { case (x, y) => x -> y.toVector }, kb.functionSchema)
/* Input definitive clauses, whose structure is learnt over time */
val definiteClauses = rules.map { rule =>
val head = Literal.toMLNClauseLiteral(rule.head.asLiteral).tostringMLN
val body = rule.body.map(Literal.toMLNClauseLiteral(_).tostringMLN).mkString(" ^ ")
parser.parseDefiniteClause(s"${format(rule.weight)} $head :- $body")
}
/* Read the definite clauses from the BK file. FOR DEBUGGING */
//val definiteClauses = kb.definiteClauses
val ee = new Example(annot = e.annotation, nar = e.narrative ++ inertiaAtoms.map(x => x.tostring), _time = e.time)
val (functionMappings, mlnEvidenceAtoms, mlmConstsToAspAtomsMap) = getFunctionMappings(ee, inps.globals.BK_WHOLE_EC)
// Adding constants
val const = ConstantsDomainBuilder.from(constants)
for ((_, (returnValue, symbol)) <- functionMappings) {
val splitFunction = symbol.split(",").toVector
val functionSymbol = splitFunction.head
val args = splitFunction.tail
val (returnValueDomain, argDomains) = kb.functionSchema(AtomSignature(functionSymbol, args.length))
const += returnValueDomain -> returnValue
argDomains.zip(args).foreach { case (domain, value) => const += domain -> value }
}
for (atom <- mlnEvidenceAtoms) {
val args = atom.terms.map(x => lomrf.logic.Constant(x.tostring)).toVector
val domains = kb.predicateSchema(AtomSignature(atom.predSymbol, args.length))
domains.zip(args).foreach { case (domain, value) => const += domain -> value.symbol }
}
/* For CAVIAR fragment */
/*const += "dist" -> "34"
const += "dist" -> "30"
const += "dist" -> "25"
const += "dist" -> "24"
const += "event" -> "Inactive_A"
const += "event" -> "Inactive_B"
const += "event" -> "Walking_A"
const += "event" -> "Walking_Β"
const += "event" -> "Active_A"
const += "event" -> "Active_Β"
const += "event" -> "Disappear_A"
const += "event" -> "Disappear_Β"
const += "event" -> "Appear_A"
const += "event" -> "Appear_Β"
const += "event" -> "Abrupt_A"
const += "event" -> "Abrupt_Β"
const += "event" -> "Running_A"
const += "event" -> "Running_A"*/
/* For the entire CAVIAR */
/*const += "event" -> "Inactive_Id0"
const += "event" -> "Inactive_Id4"
const += "event" -> "Inactive_Id1"
const += "event" -> "Inactive_Id5"
const += "event" -> "Inactive_Id9"
const += "event" -> "Inactive_Id2"
const += "event" -> "Inactive_Id8"
const += "event" -> "Inactive_Id6"
const += "event" -> "Inactive_Id3"
const += "event" -> "Inactive_Id7"
const += "event" -> "Active_Id6"
const += "event" -> "Active_Id3"
const += "event" -> "Active_Id7"
const += "event" -> "Active_Id4"
const += "event" -> "Active_Id5"
const += "event" -> "Active_Id0"
const += "event" -> "Active_Id1"
const += "event" -> "Active_Id9"
const += "event" -> "Active_Id8"
const += "event" -> "Active_Id2"
const += "event" -> "Walking_Id4"
const += "event" -> "Walking_Id5"
const += "event" -> "Walking_Id9"
const += "event" -> "Walking_Id6"
const += "event" -> "Walking_Id7"
const += "event" -> "Walking_Id2"
const += "event" -> "Walking_Id1"
const += "event" -> "Walking_Id3"
const += "event" -> "Walking_Id8"
const += "event" -> "Walking_Id0"
const += "event" -> "Abrupt_Id6"
const += "event" -> "Abrupt_Id5"
const += "event" -> "Abrupt_Id0"
const += "event" -> "Abrupt_Id9"
const += "event" -> "Abrupt_Id4"
const += "event" -> "Abrupt_Id1"
const += "event" -> "Abrupt_Id8"
const += "event" -> "Abrupt_Id2"
const += "event" -> "Abrupt_Id3"
const += "event" -> "Abrupt_Id7"
const += "event" -> "Running_Id7"
const += "event" -> "Running_Id0"
const += "event" -> "Running_Id4"
const += "event" -> "Running_Id6"
const += "event" -> "Running_Id1"
const += "event" -> "Running_Id5"
const += "event" -> "Running_Id9"
const += "event" -> "Running_Id2"
const += "event" -> "Running_Id8"
const += "event" -> "Running_Id3"
const += "event" -> "Appear_Id0"
const += "event" -> "Appear_Id4"
const += "event" -> "Appear_Id3"
const += "event" -> "Appear_Id6"
const += "event" -> "Appear_Id8"
const += "event" -> "Appear_Id2"
const += "event" -> "Appear_Id9"
const += "event" -> "Appear_Id7"
const += "event" -> "Appear_Id1"
const += "event" -> "Appear_Id5"
const += "event" -> "Disappear_Id1"
const += "event" -> "Disappear_Id5"
const += "event" -> "Disappear_Id9"
const += "event" -> "Disappear_Id4"
const += "event" -> "Disappear_Id0"
const += "event" -> "Disappear_Id8"
const += "event" -> "Disappear_Id7"
const += "event" -> "Disappear_Id3"
const += "event" -> "Disappear_Id6"
const += "event" -> "Disappear_Id2"*/
/*val domains = const.result()
domains.foreach { case (name, set) =>
val constants = set.iterator
println(s"$name: [${constants.mkString(",")}]")
}*/
val evidenceBuilder = EvidenceBuilder(kb.predicateSchema, kb.functionSchema, queryAtoms, Set.empty, const.result())
//val evidenceBuilder = EvidenceBuilder(kb.predicateSchema, kb.functionSchema, queryAtoms, Set.empty, domains)
for (entry <- functionMappings) {
val functionReturnConstant = entry._2._1
val functionStr = entry._2._2
val splitFunction = functionStr.split(",").toList
val functionSymbol = splitFunction.head
val functionArgs = splitFunction.tail.toVector
evidenceBuilder.functions += new FunctionMapping(functionReturnConstant, functionSymbol, functionArgs)
}
/* For now we need these hard-coded. It's a LoMRF limitation not being able to have them dynamically*/
// This is for CAVIAR fragment
/*evidenceBuilder.functions += new FunctionMapping("Inactive_A", "inactive", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Inactive_B", "inactive", Vector("B"))
evidenceBuilder.functions += new FunctionMapping("Walking_A", "walking", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Walking_B", "walking", Vector("B"))
evidenceBuilder.functions += new FunctionMapping("Active_A", "active", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Active_B", "active", Vector("B"))
evidenceBuilder.functions += new FunctionMapping("Disappear_A", "disappear", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Disappear_B", "disappear", Vector("B"))
evidenceBuilder.functions += new FunctionMapping("Appear_A", "appear", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Appear_B", "appear", Vector("B"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_A", "abrupt", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_B", "abrupt", Vector("B"))
evidenceBuilder.functions += new FunctionMapping("Running_A", "running", Vector("A"))
evidenceBuilder.functions += new FunctionMapping("Running_B", "running", Vector("B"))*/
// This is for the entire CAVIAR
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "active", Vector("ref"))
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "appear", Vector("ref"))
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "inactive", Vector("ref"))
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "walking", Vector("ref"))
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "abrupt", Vector("ref"))
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "running", Vector("ref"))
evidenceBuilder.functions += new FunctionMapping("ADSANdlj", "disappear", Vector("ref"))
/*evidenceBuilder.functions += new FunctionMapping("Inactive_Id0", "inactive", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id4", "inactive", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id1", "inactive", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id5", "inactive", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id9", "inactive", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id2", "inactive", Vector("Id2"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id8", "inactive", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id6", "inactive", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id3", "inactive", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Inactive_Id7", "inactive", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Active_Id6", "active", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Active_Id3", "active", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Active_Id7", "active", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Active_Id4", "active", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Active_Id5", "active", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Active_Id0", "active", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Active_Id1", "active", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Active_Id9", "active", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Active_Id8", "active", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Active_Id2", "active", Vector("Id2"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id4", "walking", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id5", "walking", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id9", "walking", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id6", "walking", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id7", "walking", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id2", "walking", Vector("Id2"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id1", "walking", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id3", "walking", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id8", "walking", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Walking_Id0", "walking", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id6", "abrupt", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id5", "abrupt", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id0", "abrupt", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id9", "abrupt", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id4", "abrupt", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id1", "abrupt", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id8", "abrupt", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id2", "abrupt", Vector("Id2"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id3", "abrupt", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Abrupt_Id7", "abrupt", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Running_Id7", "running", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Running_Id0", "running", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Running_Id4", "running", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Running_Id6", "running", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Running_Id1", "running", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Running_Id5", "running", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Running_Id9", "running", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Running_Id2", "running", Vector("Id2"))
evidenceBuilder.functions += new FunctionMapping("Running_Id8", "running", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Running_Id3", "running", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id0", "appear", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id4", "appear", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id3", "appear", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id6", "appear", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id8", "appear", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id2", "appear", Vector("Id2"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id9", "appear", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id7", "appear", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id1", "appear", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Appear_Id5", "appear", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id1", "disappear", Vector("Id1"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id5", "disappear", Vector("Id5"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id9", "disappear", Vector("Id9"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id4", "disappear", Vector("Id4"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id0", "disappear", Vector("Id0"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id8", "disappear", Vector("Id8"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id7", "disappear", Vector("Id7"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id3", "disappear", Vector("Id3"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id6", "disappear", Vector("Id6"))
evidenceBuilder.functions += new FunctionMapping("Disappear_Id2", "disappear", Vector("Id2"))*/
for (atom <- mlnEvidenceAtoms) {
val predicate = atom.predSymbol
val args = atom.terms.map(x => lomrf.logic.Constant(x.tostring)).toVector
evidenceBuilder.evidence += EvidenceAtom.asTrue(predicate, args)
}
for (atom <- inertiaAtoms) {
val predicate = atom.predSymbol.capitalize
val fluent = atom.terms.head.asInstanceOf[Literal]
val fluentConst = s"${fluent.predSymbol.capitalize}_${fluent.terms.map(x => x.tostring.capitalize).mkString("_")}"
if (!mlmConstsToAspAtomsMap.keySet.contains(fluentConst)) mlmConstsToAspAtomsMap(fluentConst) = fluent.tostring
val timeConst = atom.terms.tail.head.tostring
val args = Vector(fluentConst, timeConst).map(x => lomrf.logic.Constant(x)).toVector
evidenceBuilder.evidence += EvidenceAtom.asTrue(predicate, args)
}
val evidence = evidenceBuilder.result()
/*evidence.db.foreach { case (signature, aedb) =>
println(s"Evidence for atom signature $signature")
println("Summary:\n" + aedb)
for (id <- aedb.identity.indices) {
val args = aedb.identity.decode(id).get
val tv = aedb(id)
println(s"${signature.symbol}(${args.mkString(",")}) = $tv")
}
}*/
println(" Predicate completion...")
val resultedFormulas = PredicateCompletion(formulas, definiteClauses.toSet, PredicateCompletionMode.Decomposed)(kb.predicateSchema, kb.functionSchema, constants)
val cnf = NormalForm.compileFastCNF(resultedFormulas)(constants).toVector
// This prints out the lifted rules in CNF form.
//println(cnf.map(_.toText()).mkString("\n"))
val mln = MLN(kb.schema, evidence, queryAtoms, cnf)
val builder = new MRFBuilder(mln, createDependencyMap = true)
val mrf = builder.buildNetwork
/* FOR DEBUGGING (print out the ground program) */
/*val constraints = mrf.constraints.iterator()
while (constraints.hasNext) {
constraints.advance()
val constraint = constraints.value()
println(constraint.decodeFeature(10000)(mln))
}*/
val solver = ILP(mrf)
//solver.infer()
println(" Calling the solver...")
val s = solver.infer
var result = Map.empty[String, Boolean]
val it = s.mrf.atoms.iterator()
while (it.hasNext) {
it.advance()
val a = it.value()
val atom = a.id.decodeAtom(mln).get
val state = a.getState
// keep only inferred as true atoms, get the rest via CWA.
//if (state) result += atom -> state //result += atom -> state
result += atom -> state
}
val resultToASP = inferredStateToASP(result, mlmConstsToAspAtomsMap)
resultToASP //result
}
def inferredStateToASP(mapState: Map[String, Boolean], mlmConstsToAspAtomsMap: mutable.Map[String, String]) = {
mapState.map { case (mlnAtom, truthValue) =>
val _mlnAtom = {
val (head, tail) = (mlnAtom.head, mlnAtom.tail)
head.toString.toLowerCase() + tail
}
val aspAtom = mlmConstsToAspAtomsMap.foldLeft(_mlnAtom) { (x, y) =>
x.replaceAll(y._1, y._2)
}
aspAtom -> truthValue
}
}
/*
This method extracts function mappings from the current batch, stuff like
Running_ID0 = running(ID0)
Enter_ID0 = enter(ID0)
Meeting_ID0_ID0 = meeting(ID0, ID0)...
It also converts ASP evidence atoms to MLN evidence atoms and
generates next/2 instances for LoMRF. It needs to extract fluent/1, event/1 and next/2 signatures form the batch data.
*/
def getFunctionMappings(exmpl: Example, bkFile: String) = {
var functionMappings = scala.collection.mutable.Map[String, (String, String)]()
val additonalDirectives = s"event(X) :- happensAt(X,_).\n#show.\n#show event/1.\n#show fluent_all/1.#show next/2."
val source = Source.fromFile(bkFile)
var bk = source.getLines().toList
source.close()
bk = bk :+ additonalDirectives
val all = (exmpl.annotationASP ++ exmpl.narrativeASP ++ bk)
val file = utils.Utils.getTempFile("function-mappings", ".lp")
utils.Utils.writeLine(all.mkString("\n"), file.getCanonicalPath, "overwrite")
val stuff = ASP.solve(task = Globals.INFERENCE, aspInputFile = file).head.atoms
val (fluents, events, nextAtoms) = stuff.foldLeft(List[String](), List[String](), List[String]()) { (x, y) =>
if (y.startsWith("fluent")) (x._1 :+ y, x._2, x._3)
else if (y.startsWith("event")) (x._1, x._2 :+ y, x._3)
else if (y.startsWith("next")) (x._1, x._2, x._3 :+ y)
else throw new RuntimeException(s"Unexpected input: $y")
}
// Populate the function mapping map.
(fluents ++ events) foreach { x =>
val parsed = Literal.parse(x)
val atom = parsed.terms.head
val atomStr = atom.tostring
val functor = atom.asInstanceOf[Literal].predSymbol
val args = atom.asInstanceOf[Literal].terms
// This is the term that represents the MLN constant (function value) that is generated from this atom.
// For instance, given the atom meeting(id0,id2), the corresponding constant term is Meeting_Id0_Id2
val constantTerm = s"${functor.capitalize}_${args.map(_.tostring.capitalize).mkString("_")}"
// This represent the MLN function that correspond to this term.
// For instance, given the atom meeting(id0,id2), the corresponding function term is meeting(Id0,Id2)
// This is represented by the string "meeting,Id0,Id2", so that the function symbol and the arguments may
// be easily extracted by splitting with "," at the MAPInference and generate the input for populating the
// functions of the evidence builder object.
val functionTerm = s"$functor,${args.map(_.tostring.capitalize).mkString(",")}"
if (!functionMappings.keySet.contains(atomStr)) functionMappings += atomStr -> (constantTerm, functionTerm)
}
file.delete()
// These are used to match the terms in the atoms of the MAP-inferred state and facilitate the conversion to ASP form.
// It is a map of the form
// Meeting_ID0_ID2 -> meeting(id0,id2)
val MLNConstantsToASPAtomsMap = functionMappings.map{ case (k, v) => v._1 -> k }
// Convert ASP atoms to MLN representation.
val MLNEvidenceAtoms = (exmpl.narrative ++ nextAtoms).map { x =>
val parsed = Literal.parse(x)
Literal.toMLNFlat(parsed)
}
(functionMappings, MLNEvidenceAtoms, MLNConstantsToASPAtomsMap)
}
}
| 24,859 | 48.423459 | 165 | scala |
OLED | OLED-master/src/main/scala/xhail/Xhail.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package xhail
import java.io.File
import app.runutils.Globals
import com.typesafe.scalalogging._
import logic.Exceptions._
import logic.Modes._
import logic.Rules._
import logic._
import utils.{ASP, MongoUtils, Utils}
import utils.parsers.ASPResultsParser
import scala.collection.mutable.ListBuffer
import scala.io.Source
import scala.util.matching.Regex
object Xhail extends ASPResultsParser with LazyLogging {
val showTheory = (x: List[Clause]) => x.map { x => x.tostring }.mkString("\n")
var outTheory: Theory = Theory()
def main(args: Array[String]) {
Globals.glvalues("perfect-fit") = "false"
//Core.setGlobalParams(args)
val entryPath = args(0)
val fromDB = args(1)
val globals = new Globals(entryPath)
runXhail(fromDB = fromDB, bkFile = globals.BK_WHOLE_EC, globals = globals)
}
def runXhail(
fromFile: String = "",
fromDB: String = "",
inputDirectory: String = "",
kernelSetOnly: Boolean = false,
learningTerminatedAtOnly: Boolean = false,
//keepAbducedPreds: String = "all",
fromWeakExmpl: Boolean = false,
bkFile: String,
globals: Globals): (List[Clause], List[Clause]) = {
val matches = (p: Regex, str: String) => p.pattern.matcher(str).matches
val examples: Map[String, List[String]] = fromFile match {
case "" =>
fromDB match {
case "" => throw new TrainingExamplesException("Provide a file or a mongo DB with the training examples.")
case _ => Utils.getAllExamples(fromDB, "examples")
}
case _ =>
val all =
Source.fromFile(fromFile).getLines.toList.map(x => x.replaceAll("\\s", "")).filter(line => !matches("""""".r, line))
val annotation = all.filter { x => x.contains("example(") }
val narrative = all.filter { x => !x.contains("example(") }
Map("annotation" -> annotation, "narrative" -> narrative)
}
val aspFile: File = Utils.getTempFile("aspinput", ".lp")
val abdModels: List[AnswerSet] =
abduce("modehs", examples = examples, learningTerminatedAtOnly = learningTerminatedAtOnly, fromWeakExmpl = fromWeakExmpl, bkFile = bkFile, globals = globals)
//if (abdModel != Nil) logger.info("Created Delta set")
//println(abdModels)
val (kernel, varKernel) = abdModels match {
case Nil => (List[Clause](), List[Clause]())
case _ =>
if (!Globals.glvalues("iter-deepening").toBoolean) {
//val abduced = if(learningTerminatedAtOnly) abdModels.head.atoms.filter(_.contains("terminatedAt")) else abdModels.head.atoms
val abduced =
/*
if(!oledLearningInitWithInertia) {
if(learningTerminatedAtOnly) abdModels.head.atoms.filter(_.contains("terminatedAt")) else abdModels.head.atoms
} else {
//if(learningTerminatedAtOnly) abdModels.head.atoms.filter(_.contains("terminatedAt")) else abdModels.head.atoms
if(learningTerminatedAtOnly) abdModels.head.atoms.filter(_.contains("terminatedAt")) else abdModels.head.atoms.filter(_.contains("initiatedAt"))
}
*/
if (learningTerminatedAtOnly) abdModels.head.atoms.filter(_.contains("terminatedAt")) else abdModels.head.atoms
//if(learningTerminatedAtOnly) abdModels.head.atoms.filter(_.contains("terminatedAt")) else abdModels.head.atoms.filter(_.contains("initiatedAt"))
generateKernel(abduced, examples = examples, aspInputFile = aspFile, bkFile = bkFile, globals = globals)
} else {
return iterativeSearch(abdModels, examples, kernelSetOnly, bkFile, globals) // this is used from ILED to find a kernel with iterative search
}
}
if (!kernelSetOnly) findHypothesis(varKernel, examples = examples, globals = globals)
(kernel, varKernel)
}
def iterativeSearch(models: List[AnswerSet], e: Map[String, List[String]], kernelSetOnly: Boolean, bkFile: String, globals: Globals) = {
val findHypothesisIterativeSearch = (varKernel: List[Clause], examples: Map[String, List[String]]) => {
val aspFile: File = Utils.getTempFile("aspInduction", ".lp")
val (_, use2AtomsMap, _, _, _, _) = ASP.inductionASPProgram(kernelSet = Theory(varKernel), examples = examples, aspInputFile = aspFile, globals = globals)
ASP.solve(Globals.SEARCH_MODELS, use2AtomsMap, examples = examples, aspInputFile = aspFile)
}
var foundHypothesis = false
var modelCounter = 0
var kernel = (List[Clause](), List[Clause]())
while (!foundHypothesis) {
if (modelCounter == models.length) { // We tried all models and failed to find a hypothesis
throw new RuntimeException("Failed to find a hypothesis with iterative search")
}
val model = models(modelCounter)
logger.info("Trying and alternative adbuctive explanation:")
kernel = generateKernel(model.atoms, examples = e, aspInputFile = Utils.getTempFile("iterSearch", "lp"), bkFile = bkFile, globals = globals)
val tryNew = findHypothesisIterativeSearch(kernel._2, e)
if (tryNew.nonEmpty && tryNew.head != AnswerSet.UNSAT) {
foundHypothesis = true
} else {
logger.info("Failed to generalize the Kernel set.")
}
modelCounter = modelCounter + 1
}
if (!kernelSetOnly) findHypothesis(kernel._2, examples = e, globals = globals)
kernel
}
/**
* Prepares the ASP input for abduction and calls the ASP solver to get the results.
*
* @param abducibles a flag to indicate where to get the abducible predicates from.
* Currently the only acceptable flag is "modehs", meaning that abducible predicates
* are the head mode declaration atoms.
* @param numberOfModels an upper bound to the number of models. Currently this is not
* used anywhere.
* @param useMatchModesProgram @tparam Boolean is true, then this methods creates an additional program that allows
* to pair an abduced atom with its matching mode atom, on ASP side.
* @throws AbductionException in case of mistaken or missing abducibles flag.
* @todo implement the case where abducible predicates are explicitly provided.
* (see comments in code).
* @todo implement iterative deepening.
*/
def abduce(
abducibles: Any,
numberOfModels: Int = 1000,
useMatchModesProgram: Boolean = true,
examples: Map[String, List[String]],
learningTerminatedAtOnly: Boolean = false,
fromWeakExmpl: Boolean = false,
bkFile: String, globals: Globals): List[AnswerSet] = {
val aspFile: File = Utils.getTempFile("aspinput", ".lp", "", deleteOnExit = true)
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS
def getASPinput() = {
globals.MODEHS match {
case Nil => throw new RuntimeException("No Mode Declarations found.")
case _ =>
val varbedMHAtoms = globals.MODEHS map (x => x.varbed)
val generate: List[String] = varbedMHAtoms.map(
x => s"{${x.tostring}} :- " + x.typePreds.mkString(",") + "."
)
// Generate minimize statement
val minimize = Globals.glvalues("iter-deepening") match {
case "false" =>
if (Globals.glvalues("perfect-fit").toBoolean) {
"\n#minimize{\n" + (varbedMHAtoms map (
x => "1," + (x.variables(globals) map (y => y.tostring)).mkString(",") + s":${x.tostring}")
).mkString(";\n") + "\n}."
} else {
val f = (x: Literal) => "1," + (x.variables(globals) map (y => y.tostring)).mkString(",")
val ff = varbedExmplPatterns.map(x =>
s"${f(x)},posNotCovered(${x.tostring}):example(${x.tostring})," +
s" not ${x.tostring};\n${f(x)},negsCovered(${x.tostring}):${x.tostring}," +
s" not example(${x.tostring})").mkString(";")
"\n#minimize{\n" + (varbedMHAtoms map (
x => "1," + (x.variables(globals) map (y => y.tostring)).mkString(",") + s":${x.tostring}")
).mkString(";\n") + s";\n$ff\n}."
}
// If we want iterative deepening then we drop the minimize statements
// Also to use iterative deepening we'll have to pass the required generateAtLeast
// and generateAtMost parameters.
case _ => ""
}
val coverageConstr: List[String] =
if (Globals.glvalues("perfect-fit").toBoolean) {
ASP.getCoverageDirectives(learningTerminatedAtOnly, globals = globals)
} else {
val z = varbedExmplPatterns.map { x =>
s"\nposNotCovered(${x.tostring}) :- example(${x.tostring}), not ${x.tostring}." +
s"\nnegsCovered(${x.tostring}) :- ${x.tostring}, not example(${x.tostring})."
}.mkString("\n")
List(z)
}
val modeMatchingProgram =
if (useMatchModesProgram)
ASP.matchModesProgram(globals.MODEHS.map(x => x.varbed))
else List()
ASP.toASPprogram(
program = List(s"#include " +
"\"" + bkFile + "\"" + ".\n\n") ++
examples("annotation") ++ List("\n\n") ++
examples("narrative") ++ List("\n\n") ++
coverageConstr ++ generate ++ List(minimize),
extra = modeMatchingProgram,
writeToFile = aspFile.getCanonicalPath)
}
}
abducibles match {
case "modehs" => getASPinput()
/* This is for the case where abducibles are explicitly given.
*
* @todo: Implement this logic
*
* */
case _: List[Any] => throw new AbductionException("This logic has not been implemented yet.")
case _ => throw new AbductionException("You need to specify the abducible predicates.")
}
ASP.solve(Globals.ABDUCTION, examples = examples, aspInputFile = aspFile, fromWeakExmpl = fromWeakExmpl)
}
/**
* Generates a Kernel Set.
*
* @param abdModel @tparam List[Literal] the list of atoms previousely abduced.
* @return the ground and variabilized Kernel Set in a tuple
* @todo Need to fix the body generation loop: Each constant that corresponds to an output placemarker
* must be added to the initial (but growing) set of input terms, used to generate instances of body atoms.
*/
def generateKernel(
abdModel: List[String],
alternativePath: String = "",
examples: Map[String, List[String]],
aspInputFile: java.io.File,
bkFile: String,
globals: Globals): (List[Clause], List[Clause]) = {
//val bkFile = globals.BK_WHOLE_EC
def replaceQuotedVars(x: String) = {
val varPattern = "\"([A-Z][A-Za-z0-9_])*\"".r
val matches = varPattern.findAllIn(x).toList
val vars = matches.map(x => x.replaceAll("\"", ""))
val zipped = matches zip vars
zipped.foldLeft(x){ (p, q) =>
val quotedVar = q._1
val strippedVar = q._2
p.replaceAll(quotedVar, strippedVar)
}
}
def groundBodyModesWithInTerms(interms: List[Expression]): List[(List[String], ModeAtom)] = {
val filterout = (x: String, y: Regex, z: List[String]) => z.filter(e => !y.findAllIn(x).toList.map(q => replaceQuotedVars(q)).exists(e.contains(_)))
val p: List[String] =
for (
x <- interms;
pred = x.asInstanceOf[Constant]._type;
arg = x.asInstanceOf[Constant].name
) yield s"$pred($arg)."
val mapping = (globals.MODEBS map (x => (globals.MODEBS.indexOf(x), x))).toMap
val groundModes =
for (
x <- globals.MODEBS;
varb = x.varbed;
quoted = x.varbed.tostringQuote;
quoatedNoNeg = x.varbed.nonNegated.tostringQuote;
filtered = filterout(quoted, "\"([A-Za-z0-9_])*\"".r, varb.typePreds)
) yield // surround with triple quotes to allow double quotes in the string
//s"""ground(${globals.MODEBS.indexOf(x)},$quoatedNoNeg) :- ${filterout(quoted, "\"([A-Za-z0-9_])*\"".r, varb.typePreds).mkString(",")}."""
if (filtered.nonEmpty) {
s"""ground(${globals.MODEBS.indexOf(x)},$quoatedNoNeg) :- ${filtered.mkString(",")}."""
} else {
s"""ground(${globals.MODEBS.indexOf(x)},$quoatedNoNeg) :- #true."""
}
ASP.toASPprogram(
program = p ++ groundModes ++ List("\n\n#show ground/2."),
writeToFile = aspInputFile.getCanonicalPath)
val q = ASP.solve("getQueries", examples = examples, aspInputFile = aspInputFile)
/*
val result =
(for (x <- q.head.atoms;
tolit = Literal.toLiteral(x);
mode = mapping(tolit.terms.head.name.toInt);
groundTerm = tolit.terms(1))
yield (mode,groundTerm)).groupBy(_._1).mapValues(p => p map (q => q._2)).map(z => (z._2.map(k=>k.tostring),z._1)).toList
*/
//println(q)
val result =
(for (
x <- q.head.atoms;
tolit = Literal.parse(x);
mode = mapping(tolit.terms.head.name.toInt);
groundTerm = tolit.terms(1)
) yield (mode, groundTerm)).groupBy(_._1).toList map { case (k, v) => (for ((_, m) <- v) yield m.tostring, k) }
result
}
//println(abdModel)
// Map[Modes.ModeAtom, List[(Modes.ModeAtom, Expression)]]
val abducedAtoms: List[Literal] = for (
x <- abdModel;
tolit = Literal.parse(x);
(atom, modeAtom) = try {
(tolit.terms(1), globals.MODEHS(tolit.terms.head.asInstanceOf[Constant].name.toInt - 1))
} catch {
case e: java.lang.ClassCastException => (tolit, tolit.matchingMode(globals))
}
) yield Literal.toLiteral2(atom.asInstanceOf[Literal], modeAtom.asInstanceOf[ModeAtom])
var kernelSet = new ListBuffer[Clause]()
if (Globals.glvalues("iter-deepening").toBoolean) logger.info(abducedAtoms.map(_.tostring).mkString(" "))
for (x <- abducedAtoms) {
var body = new ListBuffer[Literal]()
val (_interms, _, _) = x.placeMarkers
val interms = _interms.to[ListBuffer]
var solution = List[AnswerSet]()
for (i <- 0 to Globals.glvalues("variableDepth").toInt) {
val queries = groundBodyModesWithInTerms(interms.toList)
val deduce =
(for (
(queryList, mAtom) <- queries;
show = queryList map (x => replaceQuotedVars(x)) map (x =>
//"\n#show " + "ifTrue("+Core.modebs.indexOf(mAtom)+","+x+")" + ":" + (if (!mAtom.isNAF) x else "not "+x) + ".\n")
s"\n#show ifTrue(${globals.MODEBS.indexOf(mAtom)},$x) : ${if (!mAtom.isNAF) x else "not " + x}, ${Literal.types(x, mAtom, globals)}."
)
) yield show).flatten
val program = abdModel.map(x => x + ".")
ASP.toASPprogram(program =
examples("annotation") ++
examples("narrative") ++
program ++
List(s"\n#include " + "\"" + bkFile + "\".") ++
List("\n#show.\n") ++ deduce, writeToFile = aspInputFile.getCanonicalPath)
solution = ASP.solve("deduction", examples = examples, aspInputFile = aspInputFile)
if (solution.nonEmpty) {
val f = (x: (Expression, Expression)) => {
val mode = globals.MODEBS(x._1.asInstanceOf[Constant].name.toInt)
val lit =
if (mode.isNAF) {
Literal.toLiteral2(x._2.asInstanceOf[Literal]).negated
} else {
Literal.toLiteral2(x._2.asInstanceOf[Literal]).nonNegated
}
Literal.toLiteral2(lit, mode)
}
val _b = solution.head.atoms.asInstanceOf[List[String]].distinct map (
x => Literal.parse(x)
) map (
x => (x.terms.head, x.terms(1))
) map (
//x => Literal.toLiteral2(x._2.asInstanceOf[Literal], Core.modebs(x._1.asInstanceOf[Constant].name.toInt))
x => f(x)
)
///*
// just to make woled work a bit faster, since you cannot cut these atoms from lomrf
// (such redundant atoms can be automatically pruned with clingo, but you cannot do this with lomrf).
// I need to fix this
val b = _b.filter{ x =>
x.predSymbol != "close" || (x.predSymbol == "close" && x.terms(0).name != x.terms(1).name)
}
//*/
//val b = _b
for (k <- b) {
//if (!body.contains(k)) body ++= b
if (!body.exists(x => x.tostring == k.tostring)) body += k
val (_, outTerms, _) = k.placeMarkers
interms ++= outTerms
}
}
}
if (solution.nonEmpty) {
val kernelClause = Clause(x.asPosLiteral, body.toList.distinct)
kernelSet += kernelClause
}
}
val _varKernel = kernelSet.map(x => x.varbed)
// Remove redundant comparison literals for the variabilized kernel to simplify things...
val varKernel = _varKernel.map(x => LogicUtils.simplifyRule(x, globals))
//val varKernel = _varKernel
val vlength = varKernel.length
val compressed = if (Globals.glvalues("compressKernels").toBoolean) compressTheory(varKernel.toList) else varKernel.toList
compressed foreach (x => x.isBottomRule = true)
val clength = compressed.length
val nonEmptyVarKernel = compressed.filter(x => x.body.nonEmpty)
//val nonEmptyKernel = kernelSet.filter(x => x.body.nonEmpty)
if (nonEmptyVarKernel.nonEmpty) {
//logger.info(s"Created Kernel set:\n${nonEmptyVarKernel.map(x => x.tostring).mkString("\n")}")
logger.info(s"Created Kernel set")
logger.debug("\n------------------------------------------------------------------------------------\n" +
s"Kernel Set (Ground---Variabilized($vlength clauses)---Compressed($clength clauses)):" +
"\n------------------------------------------------------------------------------------\n" +
showTheory(kernelSet.toList) + "\n\n" + showTheory(varKernel.toList) + "\n\n" + showTheory(compressed.toList))
//println(Theory(kernelSet.toList).tostring)
}
(kernelSet.toList, compressed)
}
def compressTheory(kernel: List[Clause]): List[Clause] = {
val compressed = new ListBuffer[Clause]
val included = (c: Clause) => compressed.toList.exists(x => x.thetaSubsumes(c) && c.thetaSubsumes(x))
for (c <- kernel) {
//val others = kernel.filter(x => x != c)
if (!included(c)) compressed += c
}
compressed.toList
}
def findHypothesis(varKernel: List[Clause], examples: Map[String, List[String]], globals: Globals) = {
Globals.glvalues("perfect-fit") = "false"
val aspFile: File = Utils.getTempFile("aspInduction", ".lp", "", deleteOnExit = true)
val (_, use2AtomsMap, _, _, _, _) =
ASP.inductionASPProgram(kernelSet = Theory(varKernel), examples = examples, aspInputFile = aspFile, globals = globals)
logger.info("Searching the Kernel Set for a hypothesis")
val models = ASP.solve("xhail", use2AtomsMap, examples = examples, aspInputFile = aspFile)
// I only keep the final, most compressive hypothesis.
// To see and evaluate all discovered hypothese simply iterate over the models
models foreach println
val finalModel = models.head.atoms
println("final:", finalModel)
getNewRules(finalModel, use2AtomsMap)
}
}
| 20,479 | 43.137931 | 179 | scala |
deep-translator | deep-translator-master/.pre-commit-config.yaml | default_language_version:
python: python3
default_stages: [commit, push]
ci:
autofix_commit_msg: |
[pre-commit.ci] auto fixes from pre-commit.com hooks
for more information, see https://pre-commit.ci
autofix_prs: true
autoupdate_branch: ''
autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate'
autoupdate_schedule: 'quarterly'
skip: []
submodules: false
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
- id: trailing-whitespace
- id: mixed-line-ending
- id: detect-private-key
- id: name-tests-test
args: [--pytest-test-first]
- repo: https://github.com/psf/black
rev: 23.1.0
hooks:
- id: black
- repo: https://github.com/PyCQA/flake8.git
rev: 16c371d41cd742f975171826de0ad5d707162c1d
hooks:
- id: flake8
- repo: https://github.com/hadialqattan/pycln
rev: v2.1.3
hooks:
- id: pycln
args: [--config=pyproject.toml]
- repo: https://github.com/pycqa/isort
rev: 5.11.5
hooks:
- id: isort
files: "\\.(py)$"
args: ["--profile", "black"]
- repo: https://github.com/tox-dev/pyproject-fmt
rev: 0.9.0
hooks:
- id: pyproject-fmt
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
hooks:
- id: python-check-blanket-noqa
- id: python-check-mock-methods
- id: python-no-eval
- id: python-no-log-warn
- id: rst-backticks
- id: rst-directive-colons
| 1,708 | 26.564516 | 66 | yaml |
deep-translator | deep-translator-master/.readthedocs.yaml |
# File: .readthedocs.yaml
version: 2
# Build from the docs/ directory with Sphinx
sphinx:
configuration: docs/conf.py
# Explicitly set the version of Python and its requirements
python:
version: 3.7
install:
- requirements: docs/requirements_docs.txt
| 265 | 16.733333 | 59 | yaml |
deep-translator | deep-translator-master/.scrutinizer.yml | checks:
python:
code_rating: true
duplicate_code: true
build:
nodes:
analysis:
tests:
override:
- py-scrutinizer-run
| 168 | 13.083333 | 30 | yml |
deep-translator | deep-translator-master/.github/FUNDING.yml | # These are supported funding model platforms
github: nidhaloff
| 65 | 15.5 | 45 | yml |
deep-translator | deep-translator-master/.github/ISSUE_TEMPLATE.md | * deep_translator version:
* Python version:
* Operating System:
### Description
Describe what you were trying to get done.
Tell us what happened, what went wrong, and what you expected to happen.
### What I Did
```
Paste the command(s) you ran and the output.
If there was a crash, please include the traceback here.
```
| 326 | 19.4375 | 72 | md |
deep-translator | deep-translator-master/.github/release-drafter.yml | template: |
## What’s Changed
$CHANGES
| 44 | 8 | 19 | yml |
deep-translator | deep-translator-master/.github/workflows/pre-commit.yml | on:
pull_request:
push:
branches: [main, master]
jobs:
main:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: 3.7
- uses: pre-commit/[email protected]
| 255 | 16.066667 | 36 | yml |
deep-translator | deep-translator-master/.github/workflows/production-tests.yml | name: production-tests
on:
push:
tags:
- 'v*'
jobs:
test:
strategy:
matrix:
python-version: [ "3.7", "3.8", "3.9" ]
os: [ubuntu-latest] # we can add other os like windows if we want
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/[email protected]
with:
python-version: ${{ matrix.python-version }}
- name: Install poetry
run: curl -sSL https://install.python-poetry.org | python3 -
- name: Install dependencies
run: |
poetry config virtualenvs.in-project true
poetry install
- name: Run tests
run: |
poetry run pytest
| 931 | 27.242424 | 82 | yml |
deep-translator | deep-translator-master/.github/workflows/release.yml | name: Semantic Release
on:
workflow_dispatch:
inputs:
version:
description: Bump version
required: true
workflow_run:
workflows:
- "production-tests"
types:
- completed
jobs:
update_release_draft:
runs-on: ubuntu-latest
steps:
- uses: release-drafter/release-drafter@v5
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
release:
name: "Release on Pypi"
runs-on: ubuntu-latest
concurrency: release
steps:
- uses: actions/checkout@v2
- name: Install poetry
run: curl -sSL https://install.python-poetry.org | python3 -
- name: View poetry version
run: poetry --version
- name: Run install
run: poetry install
- name: Build package
run: poetry build
- name: Publish package to PyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.PYPI_TOKEN }}
| 1,197 | 25.043478 | 76 | yml |
deep-translator | deep-translator-master/.github/workflows/test-release.yml | name: test-release
on:
push:
branches:
- master
workflow_run:
workflows:
- "production-tests"
types:
- completed
jobs:
test-release:
name: "Release on Test Pypi"
runs-on: ubuntu-latest
concurrency: release
steps:
- uses: actions/checkout@v2
- name: Install poetry
run: curl -sSL https://install.python-poetry.org | python3 -
- name: View poetry version
run: poetry --version
- name: Update version
run: poetry version patch
- name: Build package
run: poetry build
- name: Publish package to TestPyPI
uses: pypa/gh-action-pypi-publish@release/v1
with:
user: __token__
password: ${{ secrets.TEST_PYPI_TOKEN }}
repository_url: https://test.pypi.org/legacy/
| 1,005 | 24.794872 | 76 | yml |
deep-translator | deep-translator-master/.github/workflows/test.yml | name: test
on: [push, pull_request]
jobs:
test:
strategy:
matrix:
python-version: [ "3.7", "3.8", "3.9" ]
os: [ubuntu-latest] # we can add other os like macOS-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/[email protected]
with:
python-version: ${{ matrix.python-version }}
- name: Install poetry
run: curl -sSL https://install.python-poetry.org | python3 -
- name: Set up cache
uses: actions/[email protected]
with:
path: .venv
key: venv-${{ matrix.python-version }}-${{ hashFiles('pyproject.toml') }}-${{ hashFiles('poetry.lock') }}
- name: Install dependencies
run: |
poetry config virtualenvs.in-project true
poetry install
- name: Run tests
run: |
poetry run pytest
| 1,150 | 30.972222 | 125 | yml |
deep-translator | deep-translator-master/deep_translator/__init__.py | """Top-level package for Deep Translator"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from deep_translator.baidu import BaiduTranslator
from deep_translator.chatgpt import ChatGptTranslator
from deep_translator.deepl import DeeplTranslator
from deep_translator.detection import batch_detection, single_detection
from deep_translator.google import GoogleTranslator
from deep_translator.libre import LibreTranslator
from deep_translator.linguee import LingueeTranslator
from deep_translator.microsoft import MicrosoftTranslator
from deep_translator.mymemory import MyMemoryTranslator
from deep_translator.papago import PapagoTranslator
from deep_translator.pons import PonsTranslator
from deep_translator.qcri import QcriTranslator
from deep_translator.tencent import TencentTranslator
from deep_translator.yandex import YandexTranslator
__author__ = """Nidhal Baccouri"""
__email__ = "[email protected]"
__version__ = "1.9.1"
__all__ = [
"GoogleTranslator",
"PonsTranslator",
"LingueeTranslator",
"MyMemoryTranslator",
"YandexTranslator",
"MicrosoftTranslator",
"QcriTranslator",
"DeeplTranslator",
"LibreTranslator",
"PapagoTranslator",
"ChatGptTranslator",
"TencentTranslator",
"BaiduTranslator",
"single_detection",
"batch_detection",
]
| 1,317 | 31.146341 | 71 | py |
deep-translator | deep-translator-master/deep_translator/__main__.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from deep_translator.cli import CLI
def main():
CLI().run()
if __name__ == "__main__":
main()
| 161 | 11.461538 | 52 | py |
deep-translator | deep-translator-master/deep_translator/baidu.py | """
baidu translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import hashlib
import os
import random
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import (
BAIDU_APPID_ENV_VAR,
BAIDU_APPKEY_ENV_VAR,
BAIDU_LANGUAGE_TO_CODE,
BASE_URLS,
)
from deep_translator.exceptions import (
ApiKeyException,
BaiduAPIerror,
ServerException,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid
class BaiduTranslator(BaseTranslator):
"""
class that wraps functions, which use the BaiduTranslator translator
under the hood to translate word(s)
"""
def __init__(
self,
source: str = "en",
target: str = "zh",
appid: Optional[str] = os.getenv(BAIDU_APPID_ENV_VAR, None),
appkey: Optional[str] = os.getenv(BAIDU_APPKEY_ENV_VAR, None),
**kwargs
):
"""
@param appid: your baidu cloud api appid.
Get one here: https://fanyi-api.baidu.com/choose
@param appkey: your baidu cloud api appkey.
@param source: source language
@param target: target language
"""
if not appid:
raise ApiKeyException(env_var=BAIDU_APPID_ENV_VAR)
if not appkey:
raise ApiKeyException(env_var=BAIDU_APPKEY_ENV_VAR)
self.appid = appid
self.appkey = appkey
super().__init__(
base_url=BASE_URLS.get("BAIDU"),
source=source,
target=target,
languages=BAIDU_LANGUAGE_TO_CODE,
**kwargs
)
def translate(self, text: str, **kwargs) -> str:
"""
@param text: text to translate
@return: translated text
"""
if is_input_valid(text):
if self._same_source_target() or is_empty(text):
return text
# Create the request parameters.
salt = random.randint(32768, 65536)
sign = hashlib.md5(
(self.appid + text + str(salt) + self.appkey).encode("utf-8")
).hexdigest()
headers = {"Content-Type": "application/x-www-form-urlencoded"}
payload = {
"appid": self.appid,
"q": text,
"from": self.source,
"to": self.target,
"salt": salt,
"sign": sign,
}
# Do the request and check the connection.
try:
response = requests.post(
self._base_url, params=payload, headers=headers
)
except ConnectionError:
raise ServerException(503)
if response.status_code != 200:
raise ServerException(response.status_code)
# Get the response and check is not empty.
res = response.json()
if not res:
raise TranslationNotFound(text)
# Process and return the response.
if "error_code" in res:
raise BaiduAPIerror(res["error_msg"])
if "trans_result" in res:
return "\n".join([s["dst"] for s in res["trans_result"]])
else:
raise TranslationNotFound(text)
def translate_file(self, path: str, **kwargs) -> str:
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 3,687 | 29.479339 | 77 | py |
deep-translator | deep-translator-master/deep_translator/base.py | """base translator class"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from abc import ABC, abstractmethod
from pathlib import Path
from typing import List, Optional, Union
from deep_translator.constants import GOOGLE_LANGUAGES_TO_CODES
from deep_translator.exceptions import (
InvalidSourceOrTargetLanguage,
LanguageNotSupportedException,
)
class BaseTranslator(ABC):
"""
Abstract class that serve as a base translator for other different translators
"""
def __init__(
self,
base_url: str = None,
languages: dict = GOOGLE_LANGUAGES_TO_CODES,
source: str = "auto",
target: str = "en",
payload_key: Optional[str] = None,
element_tag: Optional[str] = None,
element_query: Optional[dict] = None,
**url_params,
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self._base_url = base_url
self._languages = languages
self._supported_languages = list(self._languages.keys())
if not source:
raise InvalidSourceOrTargetLanguage(source)
if not target:
raise InvalidSourceOrTargetLanguage(target)
self._source, self._target = self._map_language_to_code(source, target)
self._url_params = url_params
self._element_tag = element_tag
self._element_query = element_query
self.payload_key = payload_key
super().__init__()
@property
def source(self):
return self._source
@source.setter
def source(self, lang):
self._source = lang
@property
def target(self):
return self._target
@target.setter
def target(self, lang):
self._target = lang
def _type(self):
return self.__class__.__name__
def _map_language_to_code(self, *languages):
"""
map language to its corresponding code (abbreviation) if the language was passed
by its full name by the user
@param languages: list of languages
@return: mapped value of the language or raise an exception if the language is
not supported
"""
for language in languages:
if language in self._languages.values() or language == "auto":
yield language
elif language in self._languages.keys():
yield self._languages[language]
else:
raise LanguageNotSupportedException(
language,
message=f"No support for the provided language.\n"
f"Please select on of the supported languages:\n"
f"{self._languages}",
)
def _same_source_target(self) -> bool:
return self._source == self._target
def get_supported_languages(
self, as_dict: bool = False, **kwargs
) -> Union[list, dict]:
"""
return the supported languages by the Google translator
@param as_dict: if True, the languages will be returned as a dictionary
mapping languages to their abbreviations
@return: list or dict
"""
return self._supported_languages if not as_dict else self._languages
def is_language_supported(self, language: str, **kwargs) -> bool:
"""
check if the language is supported by the translator
@param language: a string for 1 language
@return: bool or raise an Exception
"""
if (
language == "auto"
or language in self._languages.keys()
or language in self._languages.values()
):
return True
else:
return False
@abstractmethod
def translate(self, text: str, **kwargs) -> str:
"""
translate a text using a translator under the hood and return
the translated text
@param text: text to translate
@param kwargs: additional arguments
@return: str
"""
return NotImplemented("You need to implement the translate method!")
def _read_docx(self, f: str):
import docx2txt
return docx2txt.process(f)
def _read_pdf(self, f: str):
import pypdf
reader = pypdf.PdfReader(f)
page = reader.pages[0]
return page.extract_text()
def _translate_file(self, path: str, **kwargs) -> str:
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
if not isinstance(path, Path):
path = Path(path)
if not path.exists():
print("Path to the file is wrong!")
exit(1)
ext = path.suffix
if ext == ".docx":
text = self._read_docx(f=str(path))
elif ext == ".pdf":
text = self._read_pdf(f=str(path))
else:
with open(path, "r", encoding="utf-8") as f:
text = f.read().strip()
return self.translate(text)
def _translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
if not batch:
raise Exception("Enter your text list that you want to translate")
arr = []
for i, text in enumerate(batch):
translated = self.translate(text, **kwargs)
arr.append(translated)
return arr
| 5,625 | 29.576087 | 88 | py |
deep-translator | deep-translator-master/deep_translator/chatgpt.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import os
from typing import List, Optional
from deep_translator.base import BaseTranslator
from deep_translator.constants import OPEN_AI_ENV_VAR
from deep_translator.exceptions import ApiKeyException
class ChatGptTranslator(BaseTranslator):
"""
class that wraps functions, which use the DeeplTranslator translator
under the hood to translate word(s)
"""
def __init__(
self,
source: str = "auto",
target: str = "english",
api_key: Optional[str] = os.getenv(OPEN_AI_ENV_VAR, None),
model: Optional[str] = "gpt-3.5-turbo",
**kwargs,
):
"""
@param api_key: your openai api key.
@param source: source language
@param target: target language
"""
if not api_key:
raise ApiKeyException(env_var=OPEN_AI_ENV_VAR)
self.api_key = api_key
self.model = model
super().__init__(source=source, target=target, **kwargs)
def translate(self, text: str, **kwargs) -> str:
"""
@param text: text to translate
@return: translated text
"""
import openai
openai.api_key = self.api_key
prompt = f"Translate the text below into {self.target}.\n"
prompt += f'Text: "{text}"'
response = openai.ChatCompletion.create(
model=self.model,
messages=[
{
"role": "user",
"content": prompt,
}
],
)
return response.choices[0].message.content
def translate_file(self, path: str, **kwargs) -> str:
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 1,974 | 26.816901 | 72 | py |
deep-translator | deep-translator-master/deep_translator/cli.py | """Console script for deep_translator."""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import argparse
from typing import Optional
from deep_translator.engines import __engines__
class CLI(object):
translators_dict = __engines__
translator = None
def __init__(self, custom_args: Optional[list] = None):
self.custom_args = custom_args
self.args = self.parse_args()
translator_class = self.translators_dict.get(
self.args.translator, None
)
if not translator_class:
raise Exception(
f"Translator {self.args.translator} is not supported."
f"Supported translators: {list(self.translators_dict.keys())}"
)
self.translator = translator_class(
source=self.args.source, target=self.args.target
)
def translate(self) -> None:
"""
function used to provide translations from the parsed terminal arguments
@return: None
"""
res = self.translator.translate(self.args.text)
print(f"Translation from {self.args.source} to {self.args.target}")
print("-" * 50)
print(f"Translation result: {res}")
def get_supported_languages(self) -> None:
"""
function used to return the languages supported by the translator service
from the parsed terminal arguments
@return: None
"""
translator_supported_languages = (
self.translator.get_supported_languages(as_dict=True)
)
print(f"Languages supported by '{self.args.translator}' are :\n")
print(translator_supported_languages)
def parse_args(self) -> argparse.Namespace:
"""
function responsible for parsing terminal arguments and provide
them for further use in the translation process
"""
parser = argparse.ArgumentParser(
add_help=True,
description="Official CLI for deep-translator",
usage="dt --help",
)
parser.add_argument(
"--translator",
"-trans",
default="google",
type=str,
help="name of the translator you want to use",
)
parser.add_argument(
"--source",
"-src",
default="auto",
type=str,
help="source language to translate from",
)
parser.add_argument(
"--target", "-tg", type=str, help="target language to translate to"
)
parser.add_argument(
"--text", "-txt", type=str, help="text you want to translate"
)
parser.add_argument(
"--languages",
"-lang",
action="store_true",
help="all the languages available with the translator"
"Run the command deep_translator -trans <translator service> -lang",
)
parsed_args = (
parser.parse_args(self.custom_args)
if self.custom_args
else parser.parse_args()
)
return parsed_args
def run(self) -> None:
if self.args.languages:
self.get_supported_languages()
else:
self.translate()
| 3,253 | 30.592233 | 81 | py |
deep-translator | deep-translator-master/deep_translator/constants.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
OPEN_AI_ENV_VAR = "OPEN_API_KEY"
DEEPL_ENV_VAR = "DEEPL_API_KEY"
LIBRE_ENV_VAR = "LIBRE_API_KEY"
MSFT_ENV_VAR = "MICROSOFT_API_KEY"
QCRI_ENV_VAR = "QCRI_API_KEY"
YANDEX_ENV_VAR = "YANDEX_API_KEY"
TENCENT_SECRET_ID_ENV_VAR = "TENCENT_SECRET_ID"
TENCENT_SECRET_KEY_ENV_VAR = "TENCENT_SECRET_KEY"
BAIDU_APPID_ENV_VAR = "BAIDU_APPID"
BAIDU_APPKEY_ENV_VAR = "BAIDU_APPKEY"
BASE_URLS = {
"GOOGLE_TRANSLATE": "https://translate.google.com/m",
"PONS": "https://en.pons.com/translate/",
"YANDEX": "https://translate.yandex.net/api/{version}/tr.json/{endpoint}",
"LINGUEE": "https://www.linguee.com/",
"MYMEMORY": "http://api.mymemory.translated.net/get",
"QCRI": "https://mt.qcri.org/api/v1/{endpoint}?",
"DEEPL": "https://api.deepl.com/{version}/",
"DEEPL_FREE": "https://api-free.deepl.com/{version}/",
"MICROSOFT_TRANSLATE": "https://api.cognitive.microsofttranslator.com/translate?api-version=3.0",
"PAPAGO": "https://papago.naver.com/",
"PAPAGO_API": "https://openapi.naver.com/v1/papago/n2mt",
"LIBRE": "https://libretranslate.com/",
"LIBRE_FREE": "https://libretranslate.de/",
"TENENT": "https://tmt.tencentcloudapi.com",
"BAIDU": "https://fanyi-api.baidu.com/api/trans/vip/translate",
}
GOOGLE_LANGUAGES_TO_CODES = {
"afrikaans": "af",
"albanian": "sq",
"amharic": "am",
"arabic": "ar",
"armenian": "hy",
"assamese": "as",
"aymara": "ay",
"azerbaijani": "az",
"bambara": "bm",
"basque": "eu",
"belarusian": "be",
"bengali": "bn",
"bhojpuri": "bho",
"bosnian": "bs",
"bulgarian": "bg",
"catalan": "ca",
"cebuano": "ceb",
"chichewa": "ny",
"chinese (simplified)": "zh-CN",
"chinese (traditional)": "zh-TW",
"corsican": "co",
"croatian": "hr",
"czech": "cs",
"danish": "da",
"dhivehi": "dv",
"dogri": "doi",
"dutch": "nl",
"english": "en",
"esperanto": "eo",
"estonian": "et",
"ewe": "ee",
"filipino": "tl",
"finnish": "fi",
"french": "fr",
"frisian": "fy",
"galician": "gl",
"georgian": "ka",
"german": "de",
"greek": "el",
"guarani": "gn",
"gujarati": "gu",
"haitian creole": "ht",
"hausa": "ha",
"hawaiian": "haw",
"hebrew": "iw",
"hindi": "hi",
"hmong": "hmn",
"hungarian": "hu",
"icelandic": "is",
"igbo": "ig",
"ilocano": "ilo",
"indonesian": "id",
"irish": "ga",
"italian": "it",
"japanese": "ja",
"javanese": "jw",
"kannada": "kn",
"kazakh": "kk",
"khmer": "km",
"kinyarwanda": "rw",
"konkani": "gom",
"korean": "ko",
"krio": "kri",
"kurdish (kurmanji)": "ku",
"kurdish (sorani)": "ckb",
"kyrgyz": "ky",
"lao": "lo",
"latin": "la",
"latvian": "lv",
"lingala": "ln",
"lithuanian": "lt",
"luganda": "lg",
"luxembourgish": "lb",
"macedonian": "mk",
"maithili": "mai",
"malagasy": "mg",
"malay": "ms",
"malayalam": "ml",
"maltese": "mt",
"maori": "mi",
"marathi": "mr",
"meiteilon (manipuri)": "mni-Mtei",
"mizo": "lus",
"mongolian": "mn",
"myanmar": "my",
"nepali": "ne",
"norwegian": "no",
"odia (oriya)": "or",
"oromo": "om",
"pashto": "ps",
"persian": "fa",
"polish": "pl",
"portuguese": "pt",
"punjabi": "pa",
"quechua": "qu",
"romanian": "ro",
"russian": "ru",
"samoan": "sm",
"sanskrit": "sa",
"scots gaelic": "gd",
"sepedi": "nso",
"serbian": "sr",
"sesotho": "st",
"shona": "sn",
"sindhi": "sd",
"sinhala": "si",
"slovak": "sk",
"slovenian": "sl",
"somali": "so",
"spanish": "es",
"sundanese": "su",
"swahili": "sw",
"swedish": "sv",
"tajik": "tg",
"tamil": "ta",
"tatar": "tt",
"telugu": "te",
"thai": "th",
"tigrinya": "ti",
"tsonga": "ts",
"turkish": "tr",
"turkmen": "tk",
"twi": "ak",
"ukrainian": "uk",
"urdu": "ur",
"uyghur": "ug",
"uzbek": "uz",
"vietnamese": "vi",
"welsh": "cy",
"xhosa": "xh",
"yiddish": "yi",
"yoruba": "yo",
"zulu": "zu",
}
PONS_CODES_TO_LANGUAGES = {
"ar": "arabic",
"bg": "bulgarian",
"zh-cn": "chinese",
"cs": "czech",
"da": "danish",
"nl": "dutch",
"en": "english",
"fr": "french",
"de": "german",
"el": "greek",
"hu": "hungarian",
"it": "italian",
"la": "latin",
"no": "norwegian",
"pl": "polish",
"pt": "portuguese",
"ru": "russian",
"sl": "slovenian",
"es": "spanish",
"sv": "swedish",
"tr": "turkish",
"elv": "elvish",
}
LINGUEE_LANGUAGES_TO_CODES = {
"maltese": "maltese",
"english": "english",
"german": "german",
"bulgarian": "bulgarian",
"polish": "polish",
"portuguese": "portuguese",
"hungarian": "hungarian",
"romanian": "romanian",
"russian": "russian",
# "serbian": "sr",
"dutch": "dutch",
"slovakian": "slovakian",
"greek": "greek",
"slovenian": "slovenian",
"danish": "danish",
"italian": "italian",
"spanish": "spanish",
"finnish": "finnish",
"chinese": "chinese",
"french": "french",
# "croatian": "hr",
"czech": "czech",
"laotian": "laotian",
"swedish": "swedish",
"latvian": "latvian",
"estonian": "estonian",
"japanese": "japanese",
}
MY_MEMORY_LANGUAGES_TO_CODES = {
"acehnese": "ace-ID",
"afrikaans": "af-ZA",
"akan": "ak-GH",
"albanian": "sq-AL",
"amharic": "am-ET",
"antigua and barbuda creole english": "aig-AG",
"arabic": "ar-SA",
"arabic egyptian": "ar-EG",
"aragonese": "an-ES",
"armenian": "hy-AM",
"assamese": "as-IN",
"asturian": "ast-ES",
"austrian german": "de-AT",
"awadhi": "awa-IN",
"ayacucho quechua": "quy-PE",
"azerbaijani": "az-AZ",
"bahamas creole english": "bah-BS",
"bajan": "bjs-BB",
"balinese": "ban-ID",
"balkan gipsy": "rm-RO",
"bambara": "bm-ML",
"banjar": "bjn-ID",
"bashkir": "ba-RU",
"basque": "eu-ES",
"belarusian": "be-BY",
"belgian french": "fr-BE",
"bemba": "bem-ZM",
"bengali": "bn-IN",
"bhojpuri": "bho-IN",
"bihari": "bh-IN",
"bislama": "bi-VU",
"borana": "gax-KE",
"bosnian": "bs-BA",
"bosnian (cyrillic)": "bs-Cyrl-BA",
"breton": "br-FR",
"buginese": "bug-ID",
"bulgarian": "bg-BG",
"burmese": "my-MM",
"catalan": "ca-ES",
"catalan valencian": "cav-ES",
"cebuano": "ceb-PH",
"central atlas tamazight": "tzm-MA",
"central aymara": "ayr-BO",
"central kanuri (latin script)": "knc-NG",
"chadian arabic": "shu-TD",
"chamorro": "ch-GU",
"cherokee": "chr-US",
"chhattisgarhi": "hne-IN",
"chinese simplified": "zh-CN",
"chinese trad. (hong kong)": "zh-HK",
"chinese traditional": "zh-TW",
"chinese traditional macau": "zh-MO",
"chittagonian": "ctg-BD",
"chokwe": "cjk-AO",
"classical greek": "grc-GR",
"comorian ngazidja": "zdj-KM",
"coptic": "cop-EG",
"crimean tatar": "crh-RU",
"crioulo upper guinea": "pov-GW",
"croatian": "hr-HR",
"czech": "cs-CZ",
"danish": "da-DK",
"dari": "prs-AF",
"dimli": "diq-TR",
"dutch": "nl-NL",
"dyula": "dyu-CI",
"dzongkha": "dz-BT",
"eastern yiddish": "ydd-US",
"emakhuwa": "vmw-MZ",
"english": "en-GB",
"english australia": "en-AU",
"english canada": "en-CA",
"english india": "en-IN",
"english ireland": "en-IE",
"english new zealand": "en-NZ",
"english singapore": "en-SG",
"english south africa": "en-ZA",
"english us": "en-US",
"esperanto": "eo-EU",
"estonian": "et-EE",
"ewe": "ee-GH",
"fanagalo": "fn-FNG",
"faroese": "fo-FO",
"fijian": "fj-FJ",
"filipino": "fil-PH",
"finnish": "fi-FI",
"flemish": "nl-BE",
"fon": "fon-BJ",
"french": "fr-FR",
"french canada": "fr-CA",
"french swiss": "fr-CH",
"friulian": "fur-IT",
"fula": "ff-FUL",
"galician": "gl-ES",
"gamargu": "mfi-NG",
"garo": "grt-IN",
"georgian": "ka-GE",
"german": "de-DE",
"gilbertese": "gil-KI",
"glavda": "glw-NG",
"greek": "el-GR",
"grenadian creole english": "gcl-GD",
"guarani": "gn-PY",
"gujarati": "gu-IN",
"guyanese creole english": "gyn-GY",
"haitian creole french": "ht-HT",
"halh mongolian": "khk-MN",
"hausa": "ha-NE",
"hawaiian": "haw-US",
"hebrew": "he-IL",
"higi": "hig-NG",
"hiligaynon": "hil-PH",
"hill mari": "mrj-RU",
"hindi": "hi-IN",
"hmong": "hmn-CN",
"hungarian": "hu-HU",
"icelandic": "is-IS",
"igbo ibo": "ibo-NG",
"igbo ig": "ig-NG",
"ilocano": "ilo-PH",
"indonesian": "id-ID",
"inuktitut greenlandic": "kl-GL",
"irish gaelic": "ga-IE",
"italian": "it-IT",
"italian swiss": "it-CH",
"jamaican creole english": "jam-JM",
"japanese": "ja-JP",
"javanese": "jv-ID",
"jingpho": "kac-MM",
"k'iche'": "quc-GT",
"kabiyè": "kbp-TG",
"kabuverdianu": "kea-CV",
"kabylian": "kab-DZ",
"kalenjin": "kln-KE",
"kamba": "kam-KE",
"kannada": "kn-IN",
"kanuri": "kr-KAU",
"karen": "kar-MM",
"kashmiri (devanagari script)": "ks-IN",
"kashmiri (arabic script)": "kas-IN",
"kazakh": "kk-KZ",
"khasi": "kha-IN",
"khmer": "km-KH",
"kikuyu kik": "kik-KE",
"kikuyu ki": "ki-KE",
"kimbundu": "kmb-AO",
"kinyarwanda": "rw-RW",
"kirundi": "rn-BI",
"kisii": "guz-KE",
"kongo": "kg-CG",
"konkani": "kok-IN",
"korean": "ko-KR",
"northern kurdish": "kmr-TR",
"kurdish sorani": "ckb-IQ",
"kyrgyz": "ky-KG",
"lao": "lo-LA",
"latgalian": "ltg-LV",
"latin": "la-XN",
"latvian": "lv-LV",
"ligurian": "lij-IT",
"limburgish": "li-NL",
"lingala": "ln-LIN",
"lithuanian": "lt-LT",
"lombard": "lmo-IT",
"luba-kasai": "lua-CD",
"luganda": "lg-UG",
"luhya": "luy-KE",
"luo": "luo-KE",
"luxembourgish": "lb-LU",
"maa": "mas-KE",
"macedonian": "mk-MK",
"magahi": "mag-IN",
"maithili": "mai-IN",
"malagasy": "mg-MG",
"malay": "ms-MY",
"malayalam": "ml-IN",
"maldivian": "dv-MV",
"maltese": "mt-MT",
"mandara": "mfi-CM",
"manipuri": "mni-IN",
"manx gaelic": "gv-IM",
"maori": "mi-NZ",
"marathi": "mr-IN",
"margi": "mrt-NG",
"mari": "mhr-RU",
"marshallese": "mh-MH",
"mende": "men-SL",
"meru": "mer-KE",
"mijikenda": "nyf-KE",
"minangkabau": "min-ID",
"mizo": "lus-IN",
"mongolian": "mn-MN",
"montenegrin": "sr-ME",
"morisyen": "mfe-MU",
"moroccan arabic": "ar-MA",
"mossi": "mos-BF",
"ndau": "ndc-MZ",
"ndebele": "nr-ZA",
"nepali": "ne-NP",
"nigerian fulfulde": "fuv-NG",
"niuean": "niu-NU",
"north azerbaijani": "azj-AZ",
"sesotho": "nso-ZA",
"northern uzbek": "uzn-UZ",
"norwegian bokmål": "nb-NO",
"norwegian nynorsk": "nn-NO",
"nuer": "nus-SS",
"nyanja": "ny-MW",
"occitan": "oc-FR",
"occitan aran": "oc-ES",
"odia": "or-IN",
"oriya": "ory-IN",
"urdu": "ur-PK",
"palauan": "pau-PW",
"pali": "pi-IN",
"pangasinan": "pag-PH",
"papiamentu": "pap-CW",
"pashto": "ps-PK",
"persian": "fa-IR",
"pijin": "pis-SB",
"plateau malagasy": "plt-MG",
"polish": "pl-PL",
"portuguese": "pt-PT",
"portuguese brazil": "pt-BR",
"potawatomi": "pot-US",
"punjabi": "pa-IN",
"punjabi (pakistan)": "pnb-PK",
"quechua": "qu-PE",
"rohingya": "rhg-MM",
"rohingyalish": "rhl-MM",
"romanian": "ro-RO",
"romansh": "roh-CH",
"rundi": "run-BI",
"russian": "ru-RU",
"saint lucian creole french": "acf-LC",
"samoan": "sm-WS",
"sango": "sg-CF",
"sanskrit": "sa-IN",
"santali": "sat-IN",
"sardinian": "sc-IT",
"scots gaelic": "gd-GB",
"sena": "seh-ZW",
"serbian cyrillic": "sr-Cyrl-RS",
"serbian latin": "sr-Latn-RS",
"seselwa creole french": "crs-SC",
"setswana (south africa)": "tn-ZA",
"shan": "shn-MM",
"shona": "sn-ZW",
"sicilian": "scn-IT",
"silesian": "szl-PL",
"sindhi snd": "snd-PK",
"sindhi sd": "sd-PK",
"sinhala": "si-LK",
"slovak": "sk-SK",
"slovenian": "sl-SI",
"somali": "so-SO",
"sotho southern": "st-LS",
"south azerbaijani": "azb-AZ",
"southern pashto": "pbt-PK",
"southwestern dinka": "dik-SS",
"spanish": "es-ES",
"spanish argentina": "es-AR",
"spanish colombia": "es-CO",
"spanish latin america": "es-419",
"spanish mexico": "es-MX",
"spanish united states": "es-US",
"sranan tongo": "srn-SR",
"standard latvian": "lvs-LV",
"standard malay": "zsm-MY",
"sundanese": "su-ID",
"swahili": "sw-KE",
"swati": "ss-SZ",
"swedish": "sv-SE",
"swiss german": "de-CH",
"syriac (aramaic)": "syc-TR",
"tagalog": "tl-PH",
"tahitian": "ty-PF",
"tajik": "tg-TJ",
"tamashek (tuareg)": "tmh-DZ",
"tamasheq": "taq-ML",
"tamil india": "ta-IN",
"tamil sri lanka": "ta-LK",
"taroko": "trv-TW",
"tatar": "tt-RU",
"telugu": "te-IN",
"tetum": "tet-TL",
"thai": "th-TH",
"tibetan": "bo-CN",
"tigrinya": "ti-ET",
"tok pisin": "tpi-PG",
"tokelauan": "tkl-TK",
"tongan": "to-TO",
"tosk albanian": "als-AL",
"tsonga": "ts-ZA",
"tswa": "tsc-MZ",
"tswana": "tn-BW",
"tumbuka": "tum-MW",
"turkish": "tr-TR",
"turkmen": "tk-TM",
"tuvaluan": "tvl-TV",
"twi": "tw-GH",
"udmurt": "udm-RU",
"ukrainian": "uk-UA",
"uma": "ppk-ID",
"umbundu": "umb-AO",
"uyghur uig": "uig-CN",
"uyghur ug": "ug-CN",
"uzbek": "uz-UZ",
"venetian": "vec-IT",
"vietnamese": "vi-VN",
"vincentian creole english": "svc-VC",
"virgin islands creole english": "vic-US",
"wallisian": "wls-WF",
"waray (philippines)": "war-PH",
"welsh": "cy-GB",
"west central oromo": "gaz-ET",
"western persian": "pes-IR",
"wolof": "wo-SN",
"xhosa": "xh-ZA",
"yiddish": "yi-YD",
"yoruba": "yo-NG",
"zulu": "zu-ZA",
}
DEEPL_LANGUAGE_TO_CODE = {
"bulgarian": "bg",
"czech": "cs",
"danish": "da",
"german": "de",
"greek": "el",
"english": "en",
"spanish": "es",
"estonian": "et",
"finnish": "fi",
"french": "fr",
"hungarian": "hu",
"indonesian": "id",
"italian": "it",
"japanese": "ja",
"lithuanian": "lt",
"latvian": "lv",
"dutch": "nl",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovak": "sk",
"slovenian": "sl",
"swedish": "sv",
"turkish": "tr",
"ukrainian": "uk",
"chinese": "zh",
}
PAPAGO_LANGUAGE_TO_CODE = {
"ko": "Korean",
"en": "English",
"ja": "Japanese",
"zh-CN": "Chinese",
"zh-TW": "Chinese traditional",
"es": "Spanish",
"fr": "French",
"vi": "Vietnamese",
"th": "Thai",
"id": "Indonesia",
}
QCRI_LANGUAGE_TO_CODE = {"Arabic": "ar", "English": "en", "Spanish": "es"}
LIBRE_LANGUAGES_TO_CODES = {
"English": "en",
"Arabic": "ar",
"Chinese": "zh",
"French": "fr",
"German": "de",
"Hindi": "hi",
"Indonesian": "id",
"Irish": "ga",
"Italian": "it",
"Japanese": "ja",
"Korean": "ko",
"Polish": "pl",
"Portuguese": "pt",
"Russian": "ru",
"Spanish": "es",
"Turkish": "tr",
"Vietnamese": "vi",
}
TENCENT_LANGUAGE_TO_CODE = {
"arabic": "ar",
"chinese (simplified)": "zh",
"chinese (traditional)": "zh-TW",
"english": "en",
"french": "fr",
"german": "de",
"hindi": "hi",
"indonesian": "id",
"japanese": "ja",
"korean": "ko",
"malay": "ms",
"portuguese": "pt",
"russian": "ru",
"spanish": "es",
"thai": "th",
"turkish": "tr",
"vietnamese": "vi",
}
BAIDU_LANGUAGE_TO_CODE = {
"arabic": "ara",
"bulgarian": "bul",
"chinese (classical)": "wyw",
"chinese (simplified)": "zh",
"chinese (traditional)": "cht",
"czech": "cs",
"danish": "dan",
"dutch": "nl",
"english": "en",
"estonian": "est",
"finnish": "fin",
"french": "fra",
"german": "de",
"greek": "el",
"hungarian": "hu",
"italian": "it",
"japanese": "jp",
"korean": "kor",
"polish": "pl",
"portuguese": "pt",
"romanian": "ro",
"russian": "ru",
"slovenian": "slo",
"spanish": "spa",
"swedish": "swe",
"thai": "th",
"vietnamese": "vie",
"yueyu": "yue",
}
| 16,652 | 24.042105 | 101 | py |
deep-translator | deep-translator-master/deep_translator/deepl.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import os
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import (
BASE_URLS,
DEEPL_ENV_VAR,
DEEPL_LANGUAGE_TO_CODE,
)
from deep_translator.exceptions import (
ApiKeyException,
AuthorizationException,
ServerException,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid, request_failed
class DeeplTranslator(BaseTranslator):
"""
class that wraps functions, which use the DeeplTranslator translator
under the hood to translate word(s)
"""
def __init__(
self,
source: str = "de",
target: str = "en",
api_key: Optional[str] = os.getenv(DEEPL_ENV_VAR, None),
use_free_api: bool = True,
**kwargs
):
"""
@param api_key: your DeeplTranslator api key.
Get one here: https://www.deepl.com/docs-api/accessing-the-api/
@param source: source language
@param target: target language
"""
if not api_key:
raise ApiKeyException(env_var=DEEPL_ENV_VAR)
self.version = "v2"
self.api_key = api_key
url = (
BASE_URLS.get("DEEPL_FREE").format(version=self.version)
if use_free_api
else BASE_URLS.get("DEEPL").format(version=self.version)
)
super().__init__(
base_url=url,
source=source,
target=target,
languages=DEEPL_LANGUAGE_TO_CODE,
**kwargs
)
def translate(self, text: str, **kwargs) -> str:
"""
@param text: text to translate
@return: translated text
"""
if is_input_valid(text):
if self._same_source_target() or is_empty(text):
return text
# Create the request parameters.
translate_endpoint = "translate"
params = {
"auth_key": self.api_key,
"source_lang": self._source,
"target_lang": self._target,
"text": text,
}
# Do the request and check the connection.
try:
response = requests.get(
self._base_url + translate_endpoint, params=params
)
except ConnectionError:
raise ServerException(503)
# If the answer is not success, raise server exception.
if response.status_code == 403:
raise AuthorizationException(self.api_key)
if request_failed(status_code=response.status_code):
raise ServerException(response.status_code)
# Get the response and check is not empty.
res = response.json()
if not res:
raise TranslationNotFound(text)
# Process and return the response.
return res["translations"][0]["text"]
def translate_file(self, path: str, **kwargs) -> str:
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
if __name__ == "__main__":
d = DeeplTranslator(target="en", api_key="some-key")
t = d.translate("Ich habe keine ahnung")
print("text: ", t)
| 3,511 | 30.357143 | 77 | py |
deep-translator | deep-translator-master/deep_translator/detection.py | """
language detection API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from typing import List, Optional, Union
import requests
from requests.exceptions import HTTPError
# Module global config
config = {
"url": "https://ws.detectlanguage.com/0.2/detect",
"headers": {
"User-Agent": "Detect Language API Python Client 1.4.0",
"Authorization": "Bearer {}",
},
}
def get_request_body(
text: Union[str, List[str]], api_key: str, *args, **kwargs
):
"""
send a request and return the response body parsed as dictionary
@param text: target text that you want to detect its language
@type text: str
@type api_key: str
@param api_key: your private API key
"""
if not api_key:
raise Exception(
"you need to get an API_KEY for this to work. "
"Get one for free here: https://detectlanguage.com/documentation"
)
if not text:
raise Exception("Please provide an input text")
else:
try:
headers = config["headers"]
headers["Authorization"] = headers["Authorization"].format(api_key)
response = requests.post(
config["url"], json={"q": text}, headers=headers
)
body = response.json().get("data")
return body
except HTTPError as e:
print("Error occured while requesting from server: ", e.args)
raise e
def single_detection(
text: str,
api_key: Optional[str] = None,
detailed: bool = False,
*args,
**kwargs
):
"""
function responsible for detecting the language from a text
@param text: target text that you want to detect its language
@type text: str
@type api_key: str
@param api_key: your private API key
@param detailed: set to True if you want to get detailed
information about the detection process
"""
body = get_request_body(text, api_key)
detections = body.get("detections")
if detailed:
return detections[0]
lang = detections[0].get("language", None)
if lang:
return lang
def batch_detection(
text_list: List[str], api_key: str, detailed: bool = False, *args, **kwargs
):
"""
function responsible for detecting the language from a text
@param text_list: target batch that you want to detect its language
@param api_key: your private API key
@param detailed: set to True if you want to
get detailed information about the detection process
"""
body = get_request_body(text_list, api_key)
detections = body.get("detections")
res = [obj[0] for obj in detections]
if detailed:
return res
else:
return [obj["language"] for obj in res]
| 2,760 | 25.805825 | 79 | py |
deep-translator | deep-translator-master/deep_translator/engines.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from deep_translator.base import BaseTranslator
__engines__ = {
translator.__name__.replace("Translator", "").lower(): translator
for translator in BaseTranslator.__subclasses__()
}
| 245 | 26.333333 | 69 | py |
deep-translator | deep-translator-master/deep_translator/exceptions.py | __copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
class BaseError(Exception):
"""
base error structure class
"""
def __init__(self, val, message):
"""
@param val: actual value
@param message: message shown to the user
"""
self.val = val
self.message = message
super().__init__()
def __str__(self):
return "{} --> {}".format(self.val, self.message)
class LanguageNotSupportedException(BaseError):
"""
exception thrown if the user uses a language
that is not supported by the deep_translator
"""
def __init__(
self, val, message="There is no support for the chosen language"
):
super().__init__(val, message)
class NotValidPayload(BaseError):
"""
exception thrown if the user enters an invalid payload
"""
def __init__(
self,
val,
message="text must be a valid text with maximum 5000 character,"
"otherwise it cannot be translated",
):
super(NotValidPayload, self).__init__(val, message)
class InvalidSourceOrTargetLanguage(BaseError):
"""
exception thrown if the user enters an invalid payload
"""
def __init__(self, val, message="Invalid source or target language!"):
super(InvalidSourceOrTargetLanguage, self).__init__(val, message)
class TranslationNotFound(BaseError):
"""
exception thrown if no translation was found for the text provided by the user
"""
def __init__(
self,
val,
message="No translation was found using the current translator. Try another translator?",
):
super(TranslationNotFound, self).__init__(val, message)
class ElementNotFoundInGetRequest(BaseError):
"""
exception thrown if the html element was not found in the body parsed by beautifulsoup
"""
def __init__(
self, val, message="Required element was not found in the API response"
):
super(ElementNotFoundInGetRequest, self).__init__(val, message)
class NotValidLength(BaseError):
"""
exception thrown if the provided text exceed the length limit of the translator
"""
def __init__(self, val, min_chars, max_chars):
message = f"Text length need to be between {min_chars} and {max_chars} characters"
super(NotValidLength, self).__init__(val, message)
class RequestError(Exception):
"""
exception thrown if an error occurred during the request call, e.g a connection problem.
"""
def __init__(
self,
message="Request exception can happen due to an api connection error. "
"Please check your connection and try again",
):
self.message = message
def __str__(self):
return self.message
class MicrosoftAPIerror(Exception):
"""
exception thrown if Microsoft API returns one of its errors
"""
def __init__(self, api_message):
self.api_message = str(api_message)
self.message = "Microsoft API returned the following error"
def __str__(self):
return "{}: {}".format(self.message, self.api_message)
class TooManyRequests(Exception):
"""
exception thrown if an error occurred during the request call, e.g a connection problem.
"""
def __init__(
self,
message="Server Error: You made too many requests to the server."
"According to google, you are allowed to make 5 requests per second"
"and up to 200k requests per day. You can wait and try again later or"
"you can try the translate_batch function",
):
self.message = message
def __str__(self):
return self.message
class ServerException(Exception):
"""
Default YandexTranslate exception from the official website
"""
errors = {
400: "ERR_BAD_REQUEST",
401: "ERR_KEY_INVALID",
402: "ERR_KEY_BLOCKED",
403: "ERR_DAILY_REQ_LIMIT_EXCEEDED",
404: "ERR_DAILY_CHAR_LIMIT_EXCEEDED",
413: "ERR_TEXT_TOO_LONG",
429: "ERR_TOO_MANY_REQUESTS",
422: "ERR_UNPROCESSABLE_TEXT",
500: "ERR_INTERNAL_SERVER_ERROR",
501: "ERR_LANG_NOT_SUPPORTED",
503: "ERR_SERVICE_NOT_AVAIBLE",
}
def __init__(self, status_code, *args):
message = self.errors.get(status_code, "API server error")
super(ServerException, self).__init__(message, *args)
class ApiKeyException(BaseError):
"""
exception thrown if no ApiKey was provided
"""
def __init__(self, env_var):
msg = f"""
You have to pass your api_key!
You can do this by passing the key as a parameter/argument to the translator class
or by setting the environment variable {env_var}
Example: export {env_var}="your_api_key"
"""
super().__init__(None, msg)
class AuthorizationException(Exception):
def __init__(self, api_key, *args):
msg = "Unauthorized access with the api key " + api_key
super().__init__(msg, *args)
class TencentAPIerror(Exception):
"""
exception thrown if Tencent API returns one of its errors
"""
def __init__(self, api_message):
self.api_message = str(api_message)
self.message = "Tencent API returned the following error"
def __str__(self):
return "{}: {}".format(self.message, self.api_message)
class BaiduAPIerror(Exception):
"""
exception thrown if Baidu API returns one of its errors
"""
def __init__(self, api_message):
self.api_message = str(api_message)
self.message = "Baidu API returned the following error"
def __str__(self):
return "{}: {}".format(self.message, self.api_message)
| 5,691 | 26.23445 | 97 | py |
deep-translator | deep-translator-master/deep_translator/google.py | """
google translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from typing import List, Optional
import requests
from bs4 import BeautifulSoup
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS
from deep_translator.exceptions import (
RequestError,
TooManyRequests,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid, request_failed
class GoogleTranslator(BaseTranslator):
"""
class that wraps functions, which use Google Translate under the hood to translate text(s)
"""
def __init__(
self,
source: str = "auto",
target: str = "en",
proxies: Optional[dict] = None,
**kwargs
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self.proxies = proxies
super().__init__(
base_url=BASE_URLS.get("GOOGLE_TRANSLATE"),
source=source,
target=target,
element_tag="div",
element_query={"class": "t0"},
payload_key="q", # key of text in the url
**kwargs
)
self._alt_element_query = {"class": "result-container"}
def translate(self, text: str, **kwargs) -> str:
"""
function to translate a text
@param text: desired text to translate
@return: str: translated text
"""
if is_input_valid(text, max_chars=5000):
text = text.strip()
if self._same_source_target() or is_empty(text):
return text
self._url_params["tl"] = self._target
self._url_params["sl"] = self._source
if self.payload_key:
self._url_params[self.payload_key] = text
response = requests.get(
self._base_url, params=self._url_params, proxies=self.proxies
)
if response.status_code == 429:
raise TooManyRequests()
if request_failed(status_code=response.status_code):
raise RequestError()
soup = BeautifulSoup(response.text, "html.parser")
element = soup.find(self._element_tag, self._element_query)
response.close()
if not element:
element = soup.find(self._element_tag, self._alt_element_query)
if not element:
raise TranslationNotFound(text)
if element.get_text(strip=True) == text.strip():
to_translate_alpha = "".join(
ch for ch in text.strip() if ch.isalnum()
)
translated_alpha = "".join(
ch for ch in element.get_text(strip=True) if ch.isalnum()
)
if (
to_translate_alpha
and translated_alpha
and to_translate_alpha == translated_alpha
):
self._url_params["tl"] = self._target
if "hl" not in self._url_params:
return text.strip()
del self._url_params["hl"]
return self.translate(text)
else:
return element.get_text(strip=True)
def translate_file(self, path: str, **kwargs) -> str:
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 3,935 | 31 | 94 | py |
deep-translator | deep-translator-master/deep_translator/libre.py | """
LibreTranslate API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import os
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import (
BASE_URLS,
LIBRE_ENV_VAR,
LIBRE_LANGUAGES_TO_CODES,
)
from deep_translator.exceptions import (
ApiKeyException,
AuthorizationException,
ServerException,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid, request_failed
class LibreTranslator(BaseTranslator):
"""
class that wraps functions, which use libre translator under the hood to translate text(s)
"""
def __init__(
self,
source: str = "en",
target: str = "es",
api_key: Optional[str] = os.getenv(LIBRE_ENV_VAR, None),
use_free_api: bool = True,
custom_url: Optional[str] = None,
**kwargs
):
"""
@param api_key: your api key
@param source: source language to translate from
List of LibreTranslate endpoint can be found at :
https://github.com/LibreTranslate/LibreTranslate#mirrors
Some require an API key
@param target: target language to translate to
@param use_free_api: set True if you want to use the free api.
This means a url that does not require and api key would be used
@param custom_url: you can use a custom endpoint
"""
if not api_key:
raise ApiKeyException(env_var=LIBRE_ENV_VAR)
self.api_key = api_key
url = (
BASE_URLS.get("LIBRE")
if not use_free_api
else BASE_URLS.get("LIBRE_FREE")
)
super().__init__(
base_url=url if not custom_url else custom_url,
source=source,
target=target,
languages=LIBRE_LANGUAGES_TO_CODES,
)
def translate(self, text: str, **kwargs) -> str:
"""
function that uses microsoft translate to translate a text
@param text: desired text to translate
@return: str: translated text
"""
if is_input_valid(text):
if self._same_source_target() or is_empty(text):
return text
translate_endpoint = "translate"
params = {
"q": text,
"source": self._source,
"target": self._target,
"format": "text",
}
# Add API Key if required
if self.api_key:
params["api_key"] = self.api_key
# Do the request and check the connection.
try:
response = requests.post(
self._base_url + translate_endpoint, params=params
)
except ConnectionError:
raise ServerException(503)
# If the answer is not success, raise server exception.
if response.status_code == 403:
raise AuthorizationException(self.api_key)
elif request_failed(status_code=response.status_code):
raise ServerException(response.status_code)
# Get the response and check is not empty.
res = response.json()
if not res:
raise TranslationNotFound(text)
# Process and return the response.
return res["translatedText"]
def translate_file(self, path: str, **kwargs) -> str:
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 4,002 | 31.282258 | 94 | py |
deep-translator | deep-translator-master/deep_translator/linguee.py | """
linguee translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from typing import List, Optional, Union
import requests
from bs4 import BeautifulSoup
from requests.utils import requote_uri
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS, LINGUEE_LANGUAGES_TO_CODES
from deep_translator.exceptions import (
ElementNotFoundInGetRequest,
NotValidPayload,
RequestError,
TooManyRequests,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid, request_failed
class LingueeTranslator(BaseTranslator):
"""
class that wraps functions, which use the linguee translator under the hood to translate word(s)
"""
def __init__(
self,
source: str = "en",
target: str = "de",
proxies: Optional[dict] = None,
**kwargs,
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self.proxies = proxies
super().__init__(
base_url=BASE_URLS.get("LINGUEE"),
source=source,
target=target,
languages=LINGUEE_LANGUAGES_TO_CODES,
element_tag="a",
element_query={"class": "dictLink featured"},
payload_key=None, # key of text in the url
)
def translate(
self, word: str, return_all: bool = False, **kwargs
) -> Union[str, List[str]]:
"""
function that uses linguee to translate a word
@param word: word to translate
@type word: str
@param return_all: set to True to return all synonym of the translated word
@type return_all: bool
@return: str: translated word
"""
if self._same_source_target() or is_empty(word):
return word
if is_input_valid(word, max_chars=50):
# %s-%s/translation/%s.html
url = f"{self._base_url}{self._source}-{self._target}/search/?source={self._source}&query={word}"
url = requote_uri(url)
response = requests.get(url, proxies=self.proxies)
if response.status_code == 429:
raise TooManyRequests()
if request_failed(status_code=response.status_code):
raise RequestError()
soup = BeautifulSoup(response.text, "html.parser")
elements = soup.find_all(self._element_tag, self._element_query)
response.close()
if not elements:
raise ElementNotFoundInGetRequest(elements)
filtered_elements = []
for el in elements:
try:
pronoun = el.find(
"span", {"class": "placeholder"}
).get_text(strip=True)
except AttributeError:
pronoun = ""
filtered_elements.append(
el.get_text(strip=True).replace(pronoun, "")
)
if not filtered_elements:
raise TranslationNotFound(word)
return filtered_elements if return_all else filtered_elements[0]
def translate_words(self, words: List[str], **kwargs) -> List[str]:
"""
translate a batch of words together by providing them in a list
@param words: list of words you want to translate
@param kwargs: additional args
@return: list of translated words
"""
if not words:
raise NotValidPayload(words)
translated_words = []
for word in words:
translated_words.append(self.translate(word=word, **kwargs))
return translated_words
| 3,744 | 31.284483 | 109 | py |
deep-translator | deep-translator-master/deep_translator/microsoft.py | # -*- coding: utf-8 -*-
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import logging
import os
import sys
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS, MSFT_ENV_VAR
from deep_translator.exceptions import ApiKeyException, MicrosoftAPIerror
from deep_translator.validate import is_input_valid
class MicrosoftTranslator(BaseTranslator):
"""
the class that wraps functions, which use the Microsoft translator under the hood to translate word(s)
"""
def __init__(
self,
source: str = "auto",
target: str = "en",
api_key: Optional[str] = os.getenv(MSFT_ENV_VAR, None),
region: Optional[str] = None,
proxies: Optional[dict] = None,
**kwargs,
):
"""
@params api_key and target are the required params
@param api_key: your Microsoft API key
@param region: your Microsoft Location
"""
if not api_key:
raise ApiKeyException(env_var=MSFT_ENV_VAR)
self.api_key = api_key
self.proxies = proxies
self.headers = {
"Ocp-Apim-Subscription-Key": self.api_key,
"Content-type": "application/json",
}
# parameter region is not required but very common and goes to headers if passed
if region:
self.region = region
self.headers["Ocp-Apim-Subscription-Region"] = self.region
super().__init__(
base_url=BASE_URLS.get("MICROSOFT_TRANSLATE"),
source=source,
target=target,
languages=self._get_supported_languages(),
**kwargs,
)
# this function get the actual supported languages of the msft translator and store them in a dict, where
# the keys are the abbreviations and the values are the languages
# a common variable used in the other translators would be: MICROSOFT_CODES_TO_LANGUAGES
def _get_supported_languages(self):
microsoft_languages_api_url = (
"https://api.cognitive.microsofttranslator.com/languages?api-version=3.0&scope"
"=translation "
)
microsoft_languages_response = requests.get(
microsoft_languages_api_url
)
translation_dict = microsoft_languages_response.json()["translation"]
return {
translation_dict[k]["name"].lower(): k.lower()
for k in translation_dict.keys()
}
def translate(self, text: str, **kwargs) -> str:
"""
function that uses microsoft translate to translate a text
@param text: desired text to translate
@return: str: translated text
"""
# a body must be a list of dicts to process multiple texts;
# I have not added multiple text processing here since it is covered by the translate_batch method
response = None
if is_input_valid(text):
self._url_params["from"] = self._source
self._url_params["to"] = self._target
valid_microsoft_json = [{"text": text}]
try:
response = requests.post(
self._base_url,
params=self._url_params,
headers=self.headers,
json=valid_microsoft_json,
proxies=self.proxies,
)
except requests.exceptions.RequestException:
exc_type, value, traceback = sys.exc_info()
logging.warning(f"Returned error: {exc_type.__name__}")
# Where Microsoft API responds with an api error, it returns a dict in response.json()
if type(response.json()) is dict:
error_message = response.json()["error"]
raise MicrosoftAPIerror(error_message)
# Where it responds with a translation, its response.json() is a list
# e.g. [{'translations': [{'text':'Hello world!', 'to': 'en'}]}]
elif type(response.json()) is list:
all_translations = [
i["text"] for i in response.json()[0]["translations"]
]
return "\n".join(all_translations)
def translate_file(self, path: str, **kwargs) -> str:
"""
translate from a file
@param path: path to file
@return: translated text
"""
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a batch of texts
@param batch: list of texts to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 4,768 | 35.684615 | 109 | py |
deep-translator | deep-translator-master/deep_translator/mymemory.py | """
mymemory translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from typing import List, Optional, Union
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS, MY_MEMORY_LANGUAGES_TO_CODES
from deep_translator.exceptions import (
RequestError,
TooManyRequests,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid, request_failed
class MyMemoryTranslator(BaseTranslator):
"""
class that uses the mymemory translator to translate texts
"""
def __init__(
self,
source: str = "auto",
target: str = "en",
proxies: Optional[dict] = None,
**kwargs,
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self.proxies = proxies
self.email = kwargs.get("email", None)
super().__init__(
base_url=BASE_URLS.get("MYMEMORY"),
source=source,
target=target,
payload_key="q",
languages=MY_MEMORY_LANGUAGES_TO_CODES,
)
def translate(
self, text: str, return_all: bool = False, **kwargs
) -> Union[str, List[str]]:
"""
function that uses the mymemory translator to translate a text
@param text: desired text to translate
@type text: str
@param return_all: set to True to return all synonym/similars of the translated text
@return: str or list
"""
if is_input_valid(text, max_chars=500):
text = text.strip()
if self._same_source_target() or is_empty(text):
return text
self._url_params["langpair"] = f"{self._source}|{self._target}"
if self.payload_key:
self._url_params[self.payload_key] = text
if self.email:
self._url_params["de"] = self.email
response = requests.get(
self._base_url, params=self._url_params, proxies=self.proxies
)
if response.status_code == 429:
raise TooManyRequests()
if request_failed(status_code=response.status_code):
raise RequestError()
data = response.json()
if not data:
TranslationNotFound(text)
response.close()
translation = data.get("responseData").get("translatedText")
all_matches = data.get("matches", [])
if translation:
if not return_all:
return translation
else:
# append translation at the start of the matches list
return [translation] + list(all_matches)
elif not translation:
matches = (match["translation"] for match in all_matches)
next_match = next(matches)
return next_match if not return_all else list(all_matches)
def translate_file(self, path: str, **kwargs) -> str:
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 3,627 | 30.824561 | 92 | py |
deep-translator | deep-translator-master/deep_translator/papago.py | """
papago translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
import json
from typing import List, Optional
import requests
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS, PAPAGO_LANGUAGE_TO_CODE
from deep_translator.exceptions import TranslationNotFound
from deep_translator.validate import is_input_valid, request_failed
class PapagoTranslator(BaseTranslator):
"""
class that wraps functions, which use google translate under the hood to translate text(s)
"""
def __init__(
self,
client_id: Optional[str] = None,
secret_key: Optional[str] = None,
source: str = "auto",
target: str = "en",
**kwargs,
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
if not client_id or not secret_key:
raise Exception(
"Please pass your client id and secret key! visit the papago website for more infos"
)
self.client_id = client_id
self.secret_key = secret_key
super().__init__(
base_url=BASE_URLS.get("PAPAGO_API"),
source=source,
target=target,
languages=PAPAGO_LANGUAGE_TO_CODE,
**kwargs,
)
def translate(self, text: str, **kwargs) -> str:
"""
function that uses google translate to translate a text
@param text: desired text to translate
@return: str: translated text
"""
if is_input_valid(text):
payload = {
"source": self._source,
"target": self._target,
"text": text,
}
headers = {
"X-Naver-Client-Id": self.client_id,
"X-Naver-Client-Secret": self.secret_key,
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
response = requests.post(
self._base_url, headers=headers, data=payload
)
if request_failed(status_code=response.status_code):
raise Exception(
f"Translation error! -> status code: {response.status_code}"
)
res_body = json.loads(response.text)
if "message" not in res_body:
raise TranslationNotFound(text)
msg = res_body.get("message")
result = msg.get("result", None)
if not result:
raise TranslationNotFound(text)
translated_text = result.get("translatedText")
return translated_text
def translate_file(self, path: str, **kwargs) -> str:
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
return self._translate_file(path, **kwargs)
def translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
return self._translate_batch(batch, **kwargs)
| 3,285 | 31.215686 | 100 | py |
deep-translator | deep-translator-master/deep_translator/pons.py | """
pons translator API
"""
__copyright__ = "Copyright (C) 2020 Nidhal Baccouri"
from typing import List, Optional, Union
import requests
from bs4 import BeautifulSoup
from requests.utils import requote_uri
from deep_translator.base import BaseTranslator
from deep_translator.constants import BASE_URLS, PONS_CODES_TO_LANGUAGES
from deep_translator.exceptions import (
ElementNotFoundInGetRequest,
NotValidPayload,
RequestError,
TooManyRequests,
TranslationNotFound,
)
from deep_translator.validate import is_empty, is_input_valid, request_failed
class PonsTranslator(BaseTranslator):
"""
class that uses PONS translator to translate words
"""
def __init__(
self,
source: str,
target: str = "en",
proxies: Optional[dict] = None,
**kwargs,
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self.proxies = proxies
super().__init__(
base_url=BASE_URLS.get("PONS"),
languages=PONS_CODES_TO_LANGUAGES,
source=source,
target=target,
payload_key=None,
element_tag="div",
element_query={"class": "target"},
**kwargs,
)
def translate(
self, word: str, return_all: bool = False, **kwargs
) -> Union[str, List[str]]:
"""
function that uses PONS to translate a word
@param word: word to translate
@type word: str
@param return_all: set to True to return all synonym of the translated word
@type return_all: bool
@return: str: translated word
"""
if is_input_valid(word, max_chars=50):
if self._same_source_target() or is_empty(word):
return word
url = f"{self._base_url}{self._source}-{self._target}/{word}"
url = requote_uri(url)
response = requests.get(url, proxies=self.proxies)
if response.status_code == 429:
raise TooManyRequests()
if request_failed(status_code=response.status_code):
raise RequestError()
soup = BeautifulSoup(response.text, "html.parser")
elements = soup.find("div", {"class": "result_list"}).findAll(
self._element_tag, self._element_query
)
response.close()
if not elements:
raise ElementNotFoundInGetRequest(word)
filtered_elements = []
for el in elements:
temp = []
for e in el.findAll("a"):
temp.append(e.get_text())
filtered_elements.append(" ".join(temp))
if not filtered_elements:
raise ElementNotFoundInGetRequest(word)
word_list = [
word for word in filtered_elements if word and len(word) > 1
]
if not word_list:
raise TranslationNotFound(word)
return word_list if return_all else word_list[0]
def translate_words(self, words: List[str], **kwargs) -> List[str]:
"""
translate a batch of words together by providing them in a list
@param words: list of words you want to translate
@param kwargs: additional args
@return: list of translated words
"""
if not words:
raise NotValidPayload(words)
translated_words = []
for word in words:
translated_words.append(self.translate(word=word, **kwargs))
return translated_words
| 3,657 | 29.739496 | 83 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.