Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
OLED | OLED-master/install-scripts/logger.sh | #!/usr/bin/env bash
# Colours for logging messages
red='\033[0;31m'
green='\033[0;32m'
orange='\033[0;33m'
cyan='\033[0;36m'
noColour='\033[0m'
# param $1: info message
log_info(){
echo -e "${cyan}"`date`" ${green}[ INFO ] $1 ${noColour}"
}
# param $1: warning message
log_warn(){
echo -e "${cyan}"`date`" ${orange}[ WARN ] $1 ${noColour}"
}
# param $1: error message
log_error(){
echo -e "${cyan}"`date`" ${red}[ ERROR ] $1 ${noColour}"
}
# param $1: error message (default is "An error occurred, exiting...")
# param $2: error code (default is 1)
exit_error(){
if [ $? -ne 0 ]; then
log_error ${1:-"An error occured, exiting..."}
exit ${2:-1}
fi
}
| 684 | 19.147059 | 70 | sh |
OLED | OLED-master/project/CodeStyle.scala | import sbt.{Def, _}
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
import scalariform.formatter.preferences.AlignSingleLineCaseStatements.MaxArrowIndent
import scalariform.formatter.preferences._
object CodeStyle {
lazy val formatSettings: Seq[Def.Setting[IFormattingPreferences]] = Seq(
ScalariformKeys.preferences := setPreferences(ScalariformKeys.preferences.value),
ScalariformKeys.preferences in Compile := setPreferences(ScalariformKeys.preferences.value),
ScalariformKeys.preferences in Test := setPreferences(ScalariformKeys.preferences.value)
)
def setPreferences(preferences: IFormattingPreferences): IFormattingPreferences = preferences
.setPreference(AlignArguments, true)
.setPreference(AlignParameters, false)
.setPreference(AlignSingleLineCaseStatements, false)
.setPreference(MaxArrowIndent, 40)
.setPreference(AllowParamGroupsOnNewlines, true)
.setPreference(CompactControlReadability, false)
.setPreference(CompactStringConcatenation, false)
.setPreference(DanglingCloseParenthesis, Preserve)
.setPreference(DoubleIndentConstructorArguments, true)
.setPreference(DoubleIndentMethodDeclaration, true)
.setPreference(FirstArgumentOnNewline, Preserve)
.setPreference(FirstParameterOnNewline, Force)
.setPreference(IndentLocalDefs, true)
.setPreference(IndentPackageBlocks, true)
.setPreference(IndentSpaces, 2)
.setPreference(IndentWithTabs, false)
.setPreference(MultilineScaladocCommentsStartOnFirstLine, false)
.setPreference(NewlineAtEndOfFile, true)
.setPreference(PlaceScaladocAsterisksBeneathSecondAsterisk, true)
.setPreference(PreserveSpaceBeforeArguments, true)
.setPreference(RewriteArrowSymbols, false)
.setPreference(SingleCasePatternOnNewline, false)
.setPreference(SpaceBeforeColon, false)
.setPreference(SpaceBeforeContextColon, false)
.setPreference(SpaceInsideBrackets, false)
.setPreference(SpaceInsideParentheses, false)
.setPreference(SpacesAroundMultiImports, false)
.setPreference(SpacesWithinPatternBinders, true)
} | 2,095 | 47.744186 | 96 | scala |
OLED | OLED-master/project/Dependency.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import sbt._
object Dependency {
object v {
//final val Akka = "2.5.17"
final val Akka = "2.5.6"
final val ScalaLogging = "3.9.2"
final val Logback = "1.2.3"
final val MongoDB = "3.1.1"
final val ScalaTest = "3.0.5"
final val ScalaZ = "7.2.29"
final val SizeOf = "0.1"
final val Parboiled = "2.1.8"
final val Optimus = "3.2.0"
final val LoMRF = "1.0.0-SNAPSHOT"
}
// Akka.io
lazy val Akka = "com.typesafe.akka" %% "akka-actor" % v.Akka
// Logging using SLF4J and logback
lazy val Logging = Seq(
"com.typesafe.scala-logging" %% "scala-logging" % v.ScalaLogging,
"ch.qos.logback" % "logback-classic" % v.Logback
)
// MongoDB (update to "org.mongodb.scala" %% "mongo-scala-driver" % "2.1.0")
lazy val MongoDB = "org.mongodb" %% "casbah" % v.MongoDB
// ScalaTest for UNIT testing
lazy val ScalaTest = "org.scalatest" %% "scalatest" % v.ScalaTest % "test"
// Tools
lazy val Tools = Seq(
"org.scalaz" %% "scalaz-core" % v.ScalaZ,
"com.madhukaraphatak" % "java-sizeof_2.11" % v.SizeOf,
"org.parboiled" %% "parboiled" % v.Parboiled,
"com.github.vagmcs" %% "scalatikz" % "0.4.4"
)
// Optimus library for linear and quadratic optimization
lazy val Optimus = Seq(
"com.github.vagmcs" %% "optimus" % v.Optimus,
"com.github.vagmcs" %% "optimus-solver-lp" % v.Optimus
)
// LoMRF library for Markov Logic Networks
lazy val LoMRF = "com.github.anskarl" %% "lomrf" % v.LoMRF
//lazy val vegas = "org.vegas-viz" %% "vegas" % "0.3.12" // plotting library
}
| 2,265 | 28.815789 | 78 | scala |
OLED | OLED-master/project/OLEDBuild.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import sbt._
import sbt.Keys._
import sbt.plugins.JvmPlugin
import sbtassembly.AssemblyPlugin
import sbtassembly.AssemblyPlugin.autoImport._
import de.heikoseeberger.sbtheader.HeaderPlugin
import de.heikoseeberger.sbtheader.License._
import de.heikoseeberger.sbtheader.HeaderPlugin.autoImport._
object OLEDBuild extends AutoPlugin {
private val logger = ConsoleLogger()
override def requires: Plugins = JvmPlugin && AssemblyPlugin && HeaderPlugin
// Allow the plug-in to be included automatically
override def trigger: PluginTrigger = allRequirements
override def projectSettings: Seq[Setting[_]] = settings
private val javaVersion: Double = sys.props("java.specification.version").toDouble
private lazy val settings: Seq[Setting[_]] = {
logger.info(s"Loading settings for Java $javaVersion or higher.")
if (javaVersion < 1.8) sys.error("Java 8 or higher is required for building Optimus.")
else commonSettings ++ assemblySettings ++ javaSettings ++ CodeStyle.formatSettings
}
private val commonSettings: Seq[Setting[_]] = Seq(
name := "OLED",
organization := "com.github.nkatzz",
description := "A system for online learning of event definitions.",
headerLicense := Some(GPLv3("2016", "Nikos Katzouris")),
scalaVersion := "2.12.9",
autoScalaLibrary := false,
managedScalaInstance := true,
resolvers ++= Seq(
Resolver.mavenLocal,
Resolver.typesafeRepo("releases"),
Resolver.sonatypeRepo("releases"),
Resolver.sonatypeRepo("snapshots")
),
libraryDependencies ++= Seq(
"org.scala-lang" % "scala-library" % scalaVersion.value,
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.2"
),
dependencyOverrides ++= Seq(
"org.scala-lang" % "scala-compiler" % scalaVersion.value,
"org.scala-lang" % "scala-library" % scalaVersion.value,
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"org.scala-lang.modules" %% "scala-parser-combinators" % "1.1.2",
"org.scala-lang.modules" %% "scala-xml" % "1.2.0"
)
)
private lazy val assemblySettings: Seq[Setting[_]] = Seq(
assemblyJarName in assembly := s"${name.value.toLowerCase}-${version.value}.jar",
/*
* Avoid the 'deduplicate: different file contents found in the following (logback.xml)' error.
* This error started after the merging LoMRF.
*/
assemblyMergeStrategy in assembly := {
case PathList("META-INF", _ @ _*) => MergeStrategy.discard
case _ => MergeStrategy.first
}
)
private lazy val javaSettings: Seq[Setting[_]] = Seq(
javacOptions ++= Seq("-source", "1.8", "-target", "1.8", "-Xlint:unchecked", "-Xlint:deprecation"),
javaOptions ++= Seq(
"-XX:+DoEscapeAnalysis",
"-XX:+UseFastAccessorMethods",
"-XX:+OptimizeStringConcat",
"-Dlogback.configurationFile=src/main/resources/logback.xml")
)
}
| 3,674 | 32.715596 | 103 | scala |
OLED | OLED-master/src/main/scala/app/runners/LoMCTSRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import akka.actor.{ActorSystem, Props}
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.{CMDArgs, Globals}
import logic.Examples.Example
import lomcts.LoMCTS
object LoMCTSRunner {
// --inpath=/home/nkatz/dev/iled/datasets/CaviarMLN/move
// --foldpath=/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/move/fold_2
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val foldPath = args.map(x => x.split("=")).find(x => x(0) == "--foldpath").getOrElse(throw new RuntimeException("--foldpath missing."))(1)
val inps = CMDArgs.getOLEDInputArgs(args)
//val trainingDataOptions = new MLNDataOptions(foldPath, inps.chunkSize, take = 10000)
val trainingDataOptions = new MLNDataOptions(foldPath, inps.chunkSize)
val testingDataOptions = new MLNDataOptions(foldPath, inps.chunkSize)
val trainingDataFunction: MLNDataOptions => Iterator[Example] = MLNDataHandler.getTrainingData
val testingDataFunction: MLNDataOptions => Iterator[Example] = MLNDataHandler.getTestingData
/*-----------------------------------------------*/
Globals.glvalues("perfect-fit") = "false"
Globals.glvalues("smallest-nonempty") = "true"
/*-----------------------------------------------*/
//val msg = "eval"
val msg = "start"
if (msg == "eval" && inps.evalth == "None") {
throw new RuntimeException("No theory file provided (start msg = eval)")
}
val system = ActorSystem("LoMCTSLearningSystem")
system.actorOf(Props(new LoMCTS(inps, trainingDataOptions, testingDataOptions,
trainingDataFunction, testingDataFunction)), name = "lomcts") ! msg
}
}
}
| 2,514 | 35.985294 | 144 | scala |
OLED | OLED-master/src/main/scala/app/runners/OLEDCaviarIntervalsRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import app.runutils.IOHandling.MongoSource
import com.mongodb.casbah.{MongoClient, MongoCollection}
import experiments.datautils.caviar_intervals.{MeetingTrainingData, MovingTrainingData}
import logic.Examples.Example
import oled.single_core.Master
import utils.CaviarUtils
import utils.DataUtils.{DataAsIntervals, Interval}
import scala.util.Random
/**
* Created by nkatz on 7/3/17.
*/
object OLEDCaviarIntervalsRunner {
private class DataOptions(
val dbName: String,
val collectionName: String = "examples",
val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt,
val targetConcept: String = "None",
val sortDbByField: String = "None",
val sort: String = "ascending",
val intervals: List[Interval] = Nil,
val trainSetid: Int,
val randomOrder: Boolean = false,
val shuffle: Boolean = false) extends MongoSource
def main(args: Array[String]) = {
val trainSetId = args.map(x => x.split("=")).find(x => x(0) == "--trainset").
getOrElse(throw new RuntimeException("--trainset missing."))(1).toInt
//val trainSetId = 2 // change this to run different folds
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
val trainingDataOptions =
new DataOptions(dbName = runningOptions.train,
collectionName = runningOptions.mongoCollection,
chunkSize = runningOptions.chunkSize,
limit = runningOptions.dataLimit,
targetConcept = runningOptions.targetHLE,
sortDbByField = "time",
trainSetid = trainSetId,
randomOrder = runningOptions.randomOrder)
val testingDataOptions = trainingDataOptions
val trainingDataFunction: DataOptions => Iterator[Example] = getTrainingData
val testingDataFunction: DataOptions => Iterator[Example] = getTestingData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runningOptions.evalth != "None") "eval" else "start"
system.actorOf(Props(new Master(runningOptions, trainingDataOptions,
testingDataOptions, trainingDataFunction, testingDataFunction)), name = "Master-Actor") ! startMsg
}
}
def getTrainingData(opts: DataOptions) = {
val data = getIntervals(opts)
val trainingIntervals = data.trainingSet
val mc = MongoClient()
val collection: MongoCollection = mc(opts.dbName)(opts.collectionName)
CaviarUtils.getDataFromIntervals(collection, opts.targetConcept, trainingIntervals, opts.chunkSize)
}
def getTestingData(opts: DataOptions) = {
val data = getIntervals(opts)
val testingIntervals = data.testingSet
val mc = MongoClient()
val collection: MongoCollection = mc(opts.dbName)(opts.collectionName)
//CaviarUtils.getDataFromIntervals(collection, opts.targetConcept, testingIntervals, opts.chunkSize)
CaviarUtils.getDataFromIntervals(collection, opts.targetConcept, testingIntervals, 200) // fix the chunk size in testing
}
def getIntervals(opts: DataOptions) = {
if (opts.targetConcept == "meeting") {
MeetingTrainingData.getMeetingTrainingData(opts.trainSetid, randomOrder = opts.randomOrder)
} else {
MovingTrainingData.getMovingTrainingData(opts.trainSetid, randomOrder = opts.randomOrder)
}
}
}
| 4,342 | 38.844037 | 136 | scala |
OLED | OLED-master/src/main/scala/app/runners/OLEDDefaultRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import app.runutils.IOHandling.MongoSource
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.typesafe.scalalogging.LazyLogging
import experiments.caviar.FullDatasetHoldOut.MongoDataOptions
import experiments.caviar.{FullDatasetHoldOut, MeetingTrainTestSets}
import logic.Examples.Example
import oled.single_core.Master
import utils.DataUtils.Interval
/**
* Created by nkatz on 6/30/17.
*/
object OLEDDefaultRunner extends LazyLogging {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
logger.error(argsok._2)
System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
///* This works for the github demo
val trainingDataOptions = new DefaultMongoDataOptions(
dbName = runningOptions.train,
collectionName = runningOptions.mongoCollection,
chunkSize = runningOptions.chunkSize,
limit = runningOptions.dataLimit,
targetConcept = runningOptions.targetHLE,
sortDbByField = "_id"
)
val testingDataOptions = new DefaultMongoDataOptions(
dbName = runningOptions.test,
collectionName = runningOptions.mongoCollection,
chunkSize = runningOptions.chunkSize,
limit = runningOptions.dataLimit,
targetConcept = runningOptions.targetHLE,
sortDbByField = "None"
)
val trainingDataFunction: DefaultMongoDataOptions => Iterator[Example] = getMongoData
val testingDataFunction: DefaultMongoDataOptions => Iterator[Example] = getMongoData
//*/
/*
val dataset = MeetingTrainTestSets.meeting6
val trainingDataOptions =
new MongoDataOptions(dbNames = dataset._1,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions =
new MongoDataOptions(dbNames = dataset._2,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "testing")
val trainingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val testingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
*/
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runningOptions.evalth != "None") "eval" else "start"
system.actorOf(Props(new Master(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction,
testingDataFunction)), name = "Master-Actor") ! startMsg
}
}
private class DefaultMongoDataOptions(
val dbName: String,
val collectionName: String = "examples",
val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt,
val targetConcept: String = "None",
val sortDbByField: String = "None",
val sort: String = "ascending",
val intervals: List[Interval] = Nil,
val examplesIds: List[String] = Nil) extends MongoSource
def getMongoData(opts: DefaultMongoDataOptions): Iterator[Example] = {
val mc = MongoClient()
val collection: MongoCollection = mc(opts.dbName)(opts.collectionName)
val data = opts.allData(collection, opts.sort, opts.sortDbByField) map { x =>
val e = Example(x)
opts.targetConcept match {
case "None" => new Example(annot = e.annotation, nar = e.narrative, _time = e.time)
case _ => new Example(annot = e.annotation filter (_.contains(opts.targetConcept)), nar = e.narrative, _time = e.time)
}
}
opts.chunkSize > 1 match {
case false => data
case _ =>
data.grouped(opts.chunkSize).map { x =>
//data.sliding(opts.chunkSize).map { x =>
x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
}
}
}
}
| 4,841 | 35.134328 | 132 | scala |
OLED | OLED-master/src/main/scala/app/runners/OLEDMaritimeRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import com.mongodb.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.Imports.MongoDBObject
import com.mongodb.casbah.Imports.DBObject
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import experiments.datautils.maritime_data.yet_another_attempt.MaritimeToMongo.{LLEMap, populateLLEsMap, populatePortsMap, populateSpeedLimitsMap, portsMap, speedLimitsMap}
import logic.Examples.Example
import oled.single_core.Master
import scala.io.Source
/**
* Created by nkatz on 7/5/17.
*/
// highSpeedIn:
//--inpath=/home/nkatz/dev/iled/datasets/MaritimeAegean/highSpeedIn --chunksize=1 --target=highSpeedIn --minseen=100000 --prune=0.9 --ties=0.0005 --delta=0.00000005
// stopped:
//--inpath=/home/nkatz/dev/iled/datasets/MaritimeAegean/stopped --chunksize=1 --target=stopped --minseen=100000 --prune=0.9 --ties=0.0005 --delta=0.00000005
// sailing:
//--inpath=/home/nkatz/dev/iled/datasets/MaritimeAegean/sailing --chunksize=1 --target=sailing --minseen=100000 --prune=0.9 --ties=0.0005 --delta=0.00000005
// lowSpeed
//--inpath=/home/nkatz/dev/iled/datasets/MaritimeAegean/lowSpeed --chunksize=1 --target=lowSpeed --minseen=100000 --prune=0.9 --ties=0.0005 --delta=0.00000005
// To create the lles dbs (ONLY DONE ONCE, THEN DATA ARE FETCHED FROM MONGO) do this:
// 1. populateLLEsMap(dataOpts.llePath)
// 2. llesToMongo
// For example:
/*
val dataOpts1 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset88.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed-no-infs.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
targetConcept = "lowSpeed",
limit = 100000.0,
trainingMode = false)
populateLLEsMap(dataOpts1.llePath)
llesToMongo("brest-8-8")
*/
object OLEDMaritimeRunner {
lazy val highSpeedInDataOptionsTraining = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
db = "brest-1",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
limit = 10000.0,
targetConcept = "highSpeedIn",
hlesMap = HLEsMap,
proxMap = proximityMap,
portsMap = portsMap)
lazy val highSpeedInDataOptionsTesting = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
db = "brest-1",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 100,
limit = 10000.0,
targetConcept = "highSpeedIn",
hlesMap = HLEsMap,
proxMap = proximityMap,
portsMap = portsMap)
lazy val stoppedDataOptions = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/stopped-no-infs.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
targetConcept = "stopped",
hlesMap = HLEsMap,
proxMap = proximityMap,
portsMap = portsMap)
lazy val sailingDataOptions = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/sailing-no-infs.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
targetConcept = "sailing",
hlesMap = HLEsMap,
proxMap = proximityMap,
portsMap = portsMap)
lazy val lowSpeedDataOptionsTraining = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed-no-infs.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
targetConcept = "lowSpeed",
limit = 100000.0,
trainingMode = true,
hlesMap = HLEsMap,
proxMap = proximityMap,
portsMap = portsMap)
lazy val lowSpeedDataOptionsTesting = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed-no-infs.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
targetConcept = "lowSpeed",
limit = 100000.0,
trainingMode = false,
hlesMap = HLEsMap,
proxMap = proximityMap,
portsMap = portsMap)
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runOpts = CMDArgs.getOLEDInputArgs(args)
val dataOpts1 = highSpeedInDataOptionsTraining
val dataOpts2 = highSpeedInDataOptionsTesting
//val dataOpts = stoppedDataOptions
//val dataOpts = sailingDataOptions
//val dataOpts = lowSpeedDataOptions
//val dataOpts = lowSpeedDataOptionsTraining
//val dataOptsTest = lowSpeedDataOptionsTesting
val trainingDataOptions = dataOpts1
val testingDataOptions = dataOpts2
val trainingDataFunction: MaritimeDataOptions => Iterator[Example] = getData
val testingDataFunction: MaritimeDataOptions => Iterator[Example] = getData
populateHLEsMap(dataOpts1.hlePath, dataOpts1.targetConcept)
populatePortsMap(dataOpts1.closeToPortsPath)
populateProximityMap(dataOpts1.llePath)
populateSpeedLimitsMap(dataOpts1.speedLimitsPath)
///*
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runOpts.evalth != "None") "eval" else "start"
system.actorOf(Props(new Master(runOpts, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)), name = "Master-Actor") ! startMsg
//*/
}
}
class MaritimeDataOptions(
val llePath: String = "",
val db: String = "",
val hlePath: String,
val speedLimitsPath: String = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
val closeToPortsPath: String,
val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt,
val targetConcept: String = "None",
val trainingMode: Boolean = true,
val hlesMap: scala.collection.mutable.Map[String, AnnotationPerVessel],
val proxMap: scala.collection.mutable.Map[String, AnnotationPerVessel],
val portsMap: scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]) extends app.runutils.IOHandling.InputSource
def getData(opts: MaritimeDataOptions): Iterator[Example] = {
val mongoClient = MongoClient()
val collection = mongoClient(opts.db)("examples")
val times =
if (opts.trainingMode) {
//LLEMap.keySet.toVector.sorted.toIterator.take(opts.limit.toInt).grouped(opts.chunkSize)
collection.find().sort(MongoDBObject("time" -> 1)).limit(opts.limit.toInt).grouped(opts.chunkSize)
} else {
//LLEMap.keySet.toVector.sorted.toIterator.drop(opts.limit.toInt).grouped(opts.chunkSize)
collection.find().sort(MongoDBObject("time" -> 1)).drop(opts.limit.toInt).take(10000).grouped(opts.chunkSize)
}
times map { timeSlice => getDataSlice(timeSlice, opts.hlesMap, opts.proxMap, opts.portsMap) }
}
def getDataSlice(
objects: Seq[DBObject],
hlesMap: scala.collection.mutable.Map[String, AnnotationPerVessel],
proxMap: scala.collection.mutable.Map[String, AnnotationPerVessel],
ports: scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]) = {
def convert(o: DBObject) = {
val obj = o.asInstanceOf[BasicDBObject]
//val time = obj.get("time").toString
val atoms = obj.get("lles").asInstanceOf[BasicDBList].toList.map(_.toString).toSet
val vessels = obj.get("vessels").asInstanceOf[BasicDBList].toList.map(_.toString).toSet
val areas = obj.get("areas").asInstanceOf[BasicDBList].toList.map(_.toString).toSet
(atoms, vessels, areas)
}
def getTimes = objects.map(o => o.asInstanceOf[BasicDBObject].get("time").toString.toInt)
val times = getTimes
val finalExampleStartTime = times.min
val (lleAtoms, vessels, areas) = objects.foldLeft(Set[String](), Set[String](), Set[String]()) { (accum, o) =>
val obj = convert(o)
(accum._1 ++ obj._1, accum._2 ++ obj._2, accum._3 ++ obj._3)
}
val hleAtoms = times.flatMap(t => getCurrentVesselsAnnotation(t, vessels, hlesMap)).filter(x => x != "None")
val proximityAtoms = times.flatMap(t => getCurrentVesselsAnnotation(t, vessels, proxMap)).distinct.filter(x => x != "None")
val (closeToPortsAtoms, speedLimitAtoms) = times.foldLeft(Set[String](), Set[String]()){ (accum, time) =>
val closeToPortsAtoms_ = {
if (ports.contains(time.toString)) ports(time.toString)
else scala.collection.mutable.Set[String]()
}
val speedLimitAtoms_ = areas.flatMap { a =>
if (speedLimitsMap.contains(a)) speedLimitsMap(a)
else scala.collection.mutable.Set[String]()
}.filter(p => p != "None")
(accum._1 ++ closeToPortsAtoms_, accum._2 ++ speedLimitAtoms_)
}
val narrative = lleAtoms.toList ++ proximityAtoms.toList ++ closeToPortsAtoms.toList ++ speedLimitAtoms.toList
new Example(annot = hleAtoms.toList, nar = narrative, _time = finalExampleStartTime.toString)
}
def isWithinInterval(i: Int, interval: (Int, Int)) = {
i >= interval._1 && i <= interval._2
}
def checkInterval(time: Int, interval: VesselAnnotationAtom): String = {
if (isWithinInterval(time.toInt, (interval.startTime, interval.endTime))) {
interval.getActualAtom(time.toInt)
} else {
"None"
}
}
def getCurrentVesselsAnnotation(time: Int, vessels: Set[String], map: scala.collection.mutable.Map[String, AnnotationPerVessel]) = {
vessels.foldLeft(Set[String]()){ (accum, v) =>
if (map.contains(v)) {
val vesselAnnotation = map(v)
val intervals = vesselAnnotation.atoms
accum ++ intervals.map(i => checkInterval(time, i))
} else {
accum + "None"
}
}
}
case class VesselAnnotationAtom(atom: String, startTime: Int, endTime: Int, var hasBeenChecked: Boolean = false) {
def getActualAtom(time: Int) = this.atom.replaceAll("ReplaceThisByActualTime", time.toString)
}
class AnnotationPerVessel(val vessel: String = "", var atoms: Vector[VesselAnnotationAtom] = Vector.empty, var currentIndex: Int = 0) {
def updateAtoms(va: VesselAnnotationAtom) = this.atoms = this.atoms :+ va
def updateIndex = this.currentIndex = this.currentIndex + 1
def getCurrentAnnotationInterval = this.atoms(this.currentIndex)
lazy val sortAtoms: Vector[VesselAnnotationAtom] = this.atoms.sortBy(atom => atom.endTime)
}
// The key is a vessel and the value is the set of all its annotation atoms wrapped in an AnnotationPerVessel instance
var HLEsMap = scala.collection.mutable.Map[String, AnnotationPerVessel]()
// The key is a vessel and the value is the set of all proximity atoms for that vessel.
var proximityMap = scala.collection.mutable.Map[String, AnnotationPerVessel]()
def updateMap(vessel: String, a: VesselAnnotationAtom, map: scala.collection.mutable.Map[String, AnnotationPerVessel]) = {
if (map.contains(vessel)) map(vessel).updateAtoms(a)
else map(vessel) = new AnnotationPerVessel(vessel, Vector(a))
}
def populateHLEsMap(dataPath: String, hle: String) = {
println("Generating HLEs map")
val data = Source.fromFile(dataPath).getLines.filter(x => !x.contains("inf"))
hle match {
case "highSpeedIn" | "withinArea" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel, area) = (s(4).toInt, s(5).toInt - 1, s(1), s(2))
val atom = s"""holdsAt($hle("$vessel","$area"),"ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel, a, HLEsMap)
}
case "loitering" | "stopped" | "lowSpeed" | "sailing" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel) = (s(3).toInt, s(4).toInt - 1, s(1))
val atom = s"""holdsAt($hle("$vessel"),"ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel, a, HLEsMap)
}
case "rendezVous" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel1, vessel2) = (s(4).toInt, s(5).toInt - 1, s(1), s(0))
val atom = s"""holdsAt($hle("$vessel1","$vessel2"),"ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel1, a, HLEsMap)
updateMap(vessel2, a, HLEsMap)
}
}
println("Sorting the HLEs map values")
//HLEsMap.foreach(x => x._2.atoms.sortBy(z => z.endTime))
HLEsMap foreach { case (k, v) =>
val v1 = v.atoms.sortBy(z => z.endTime)
HLEsMap(k).atoms = v1
}
}
def populateProximityMap(dataPath: String) = {
println("Getting proximity map")
val lines = Source.fromFile(dataPath).getLines.filter(x => x.contains("proximity"))
lines foreach { x =>
val s = x.split("=")(0).split(" ")
val vessel1 = s(2)
val vessel2 = s(3)
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
val startTime = z(0).toInt
val endTime = z(1).toInt - 1
val atom = s"""close("$vessel1","$vessel2","ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel1, a, proximityMap)
updateMap(vessel2, a, proximityMap)
}
}
}
| 15,895 | 42.550685 | 172 | scala |
OLED | OLED-master/src/main/scala/app/runners/OLEDRunner_MLNExperiments.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import java.io.File
import akka.actor.{ActorSystem, Props}
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.{CMDArgs, Globals}
import logic.Examples.Example
import logic.{Clause, LogicUtils, Theory}
import oled.single_core.Dispatcher
import utils.DataUtils.DataAsExamples
import utils.Utils
import xhail.Xhail
import scala.io.Source
/**
* Created by nkatz on 9/14/16.
*/
object OLEDRunner_MLNExperiments {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
Globals.glvalues("OLEDdownscoreBySimilarity") = "false" // re-set this if you need to try it
val learnWholeTheories = false
//val foldPath = "/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/move/fold_9"
val foldPath = args.map(x => x.split("=")).find(x => x(0) == "--foldpath").getOrElse(throw new RuntimeException("--foldpath missing."))(1)
val inps = CMDArgs.getOLEDInputArgs(args)
val trainingDataOptions = new MLNDataOptions(foldPath, inps.chunkSize, take = 1000000)
//val trainingDataOptions = new MLNDataOptions(foldPath, inps.chunkSize)
val testingDataOptions = new MLNDataOptions(foldPath, inps.chunkSize)
val trainingDataFunction: MLNDataOptions => Iterator[Example] = MLNDataHandler.getTrainingData
val testingDataFunction: MLNDataOptions => Iterator[Example] = MLNDataHandler.getTestingData
//val msg = "eval"
val msg = "start"
if (msg == "eval" && inps.evalth == "None") {
throw new RuntimeException("No theory file provided (start msg = eval)")
}
if (!learnWholeTheories) {
val system = ActorSystem("HoeffdingLearningSystem")
system.actorOf(Props(new Dispatcher(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)), name = "Learner") ! msg
} else {
/* This doesn't work */
/*
Globals.LEARNING_WHOLE_THEORIES = true
val system = ActorSystem("HoeffdingLearningSystem")
system.actorOf(Props(new WholeTheoryLearner(DB,delta,ties,prune,nmin,trainSize,repeatFor,chunkSize,"",withInertia,withPostPruning,onlinePruning,
dataset, HLE, true, kernel)), name = "Learner") ! "go"
*/
}
}
}
/* This is used in "offline" mode (EXPERIMENTAL): the training set is passed once
* to collect the bottom clauses and these bottom clauses are generalized
* with a second pass over the data. The advantage of this approach is that
* a bottom theory is available from the start and hopefully there is enough
* time to generalize it to a good hypothesis (better than one constructed in
* online, where new bottom clauses are constructed "on demand" by the theory
* expansion routine.)*/
def collectBottomClauses(dataset: DataAsExamples, globals: Globals): Theory = {
val infile = Utils.getTempFile("example", ".lp")
val bk = globals.BK_WHOLE_EC
Globals.glvalues("perfect-fit") = "false"
var time = 0
val (accumKernel, accumAnnotation, accumNarrative) =
dataset.testingSet.foldLeft(List[Clause](), List[String](), List[String]()) { (x, y) =>
val ker = x._1
val annotAccum = x._2
val narrativeAccum = x._3
println(y.time.toInt)
if (y.time.toInt <= time) time = y.time.toInt
// generate a kernel set from the current example
val interpretation = y.annotationASP ++ y.narrativeASP
Utils.writeToFile(infile, "overwrite") { p => interpretation.foreach(p.println) }
val (_, varKernel) =
Xhail.runXhail(fromFile = infile.getAbsolutePath, kernelSetOnly = true, bkFile = bk, globals = globals)
(ker ++ varKernel, annotAccum ++ y.annotation, narrativeAccum ++ y.narrative)
}
val compressedKernel = LogicUtils.compressTheory(accumKernel.toList)
Theory(compressedKernel)
}
}
object MLNDataHandler {
//"/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/meet/fold_0/training/batch/training.fold_0.db"
///home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/meet/fold_0
class MLNDataOptions(val foldPath: String, val chunkSize: Int, val take: Int = 0) extends app.runutils.IOHandling.MongoSource
def getTrainingData(opts: MLNDataOptions): Iterator[Example] = {
val d = getData(opts)
val training = if (opts.take == 0) d._1 else d._1.take(opts.take / opts.chunkSize)
training.toIterator
}
def getTestingData(opts: MLNDataOptions): Iterator[Example] = {
val d = getData(opts)
d._2.toIterator
}
def getData(opts: MLNDataOptions) = {
val word = "[\\w]+".r
val map = Map("HoldsAt" -> "holdsAt", "Happens" -> "happensAt", "Close" -> "close", "OrientationMove" -> "orientationMove",
"Active" -> "active", "Inactive" -> "inactive", "Walking" -> "walking", "Abrupt" -> "abrupt" -> "Running" -> "running",
"Enter" -> "appear", "Exit" -> "disappear", "Meet" -> "meeting", "Move" -> "moving")
def process(s: String) = {
def replAll(s: String, chunks: List[(String, String)]) = chunks.foldLeft(s)((x, y) => x.replaceAll(y._1, y._2))
val wordChunks = word findAllIn s toList
val time = wordChunks.reverse.head
val chunks = ((word findAllIn s).map(x => (x, map.getOrElse(x, handleInnerTerms(x)))).toList, time.toInt)
val newS = (replAll(s, chunks._1).replaceAll("\\s", ""), time)
newS
}
def isPred(s: String) = {
s.startsWith("HoldsAt") || s.startsWith("Happens") || s.startsWith("Close") || s.startsWith("OrientationMove")
}
def handleInnerTerms(s: String) = {
def lowerFirst(s: String) = s.replace(s(0), s(0).toLower)
//val p = "[A-Z][\\w]+[_][\\w]" // matches stuff like Walking_A and Meet_B_A
val split = s.split("_")
split.length match {
case 1 => lowerFirst(s) // no underscores, simply to lower-case
case 2 => s"${map(split(0))}(${lowerFirst(split(1))})"
case 3 => s"${map(split(0))}(${lowerFirst(split(1))},${lowerFirst(split(2))})"
}
}
///*
def formatAndSplitData(data: List[String], split: Boolean) = {
//val slideStep = if (split) chunkSize - 1 else data.length-1
val sorted = (data map process _ groupBy (_._2) map { case (k, v) => (k, v.map(_._1)) }).toList.sortBy(_._1.toInt) map (_._2)
val iter = if (split) sorted.sliding(opts.chunkSize, opts.chunkSize - 1) map (_.flatten) toList else sorted
val d = iter map { p =>
val (annotation, narrative) = p.foldLeft(List[String](), List[String]()) { (x, y) =>
if (y.startsWith("holdsAt")) (x._1 :+ y, x._2) else (x._1, x._2 :+ y)
}
val time = (word findAllIn p.head toList).reverse.head
new Example(annot = annotation, nar = narrative, _time = time)
}
d.toList
}
//*/
/*
def formatAndSplitData(data: List[String], split: Boolean): List[Example] = {
//val slideStep = if (split) chunkSize - 1 else data.length-1
val sorted = (data map process _ groupBy (_._2) map { case (k, v) => (k, v.map(_._1)) }).toList.sortBy(_._1.toInt) map (_._2)
val iter = if (split) sorted.sliding(opts.chunkSize, opts.chunkSize - 1) map (_.flatten) toList else sorted
//val iter = if (split) sorted.sliding(opts.chunkSize, opts.chunkSize) map (_.flatten) toList else sorted
val d = iter map { p =>
val (annotation, narrative) = p.foldLeft(List[String](), List[String]()) { (x, y) =>
if (y.startsWith("holdsAt")) (x._1 :+ y, x._2) else (x._1, x._2 :+ y)
}
val time = (word findAllIn p.head toList).reverse.head
// Remove all narrative atoms with a lastTime time stamp. This is to
// avoid situations where e.g. an initiation rule correctly fires at
// the last time point of a mini-batch, but there is no corresponding
// annotation atom in the mini-batch (it's been carried over to the next batch)
// therefore we erroneously count FPs. The "edge" narrative atoms are carried over
// to the next batch (notice the "sliding(chunkSize, chunkSize - 1)" above).
val lastTime = (word findAllIn p.last toList).reverse.head
val narrative_ = narrative.filter{ x =>
val lit = Literal.parseWPB2(x)
val litTime = lit.terms.last.name
litTime != lastTime
}
//new Example(annot = annotation, nar = narrative, _time = time)
new Example(annot = annotation, nar = narrative_, _time = time)
}
d.toList
}
*/
val training = {
val trainingSetPath = s"${opts.foldPath}/training/batch"
val innerFiles = new File(trainingSetPath).listFiles
innerFiles flatMap (f => Source.fromFile(f).getLines.toList.filter(isPred _)) toList
}
// This is for getting one video per mini-batch in the training set.
// It's how the data were partitioned for the OSLa experiments and that's how we'll compare.
val t = {
val trainingSetPath = s"${opts.foldPath}/training"
val innerFiles = new File(trainingSetPath).listFiles.filter(p => p.getName.contains("training")).sortBy{ file =>
file.getName.split("\\.")(1).split("_")(2).toInt
}
innerFiles.map(f => Source.fromFile(f).getLines.toList.filter(isPred _)).
map(singleVideo => formatAndSplitData(singleVideo, split = false)).map(x => Example.merge(x))
}.toList
val testingData = {
val testingSetPath = s"${opts.foldPath}/testing"
val annotationPath = s"${new File(new File(testingSetPath).getParent).getParent}/annotation"
val innerFiles = new File(testingSetPath).listFiles
val testingFilesNames = innerFiles.map(f => f.getName.substring(0, f.getName.lastIndexOf("."))).toList
val exmpls = testingFilesNames.foldLeft(List[Example]()) { (p, testingFile) =>
val narrative = Source.fromFile(testingSetPath + "/" + testingFile + ".db").getLines.toList.filter(isPred _)
// Find the proper annotation file
val annotFile = new File(annotationPath).listFiles.toList.filter(f => f.getName.contains(testingFile)).head
val annotation = Source.fromFile(annotFile).getLines.toList.filter(p => isPred(p) && p.split(" ")(1) == "1").map(_.split(" ")(0))
val e = formatAndSplitData(narrative ++ annotation, split = false)
p :+ Example.merge(e)
}
exmpls
}
// use this to split training videos to mini-batches
val trainingData = formatAndSplitData(training, split = true)
// use this to use a whole video per mini-batch (as in the OSLa experiments)
//val trainingData = t
// make sure that we start with positive examples (don't waste negatives from which nothing is learnt)
val pos = trainingData.filter(x => x.annotation.nonEmpty)
//val _trainingData = trainingData//List(pos.head) ++ Random.shuffle(trainingData) // trainingData //
val _trainingData = trainingData //List(pos.head) ++ trainingData // trainingData //
(_trainingData, testingData)
}
}
| 11,794 | 43.847909 | 159 | scala |
OLED | OLED-master/src/main/scala/app/runners/Opportunity.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import logic.Examples.Example
import scala.io.Source
import java.io.File
/**
* Created by nkatz on 7/9/17.
*/
object Opportunity extends App {
val labelsPath = "/home/nkatz/dev/OpportunityUCIDataset/dataset/label_legend.txt"
val dataFilePath = "/home/nkatz/dev/OpportunityUCIDataset/dataset/S1-ADL1.dat"
val person = new File(dataFilePath).getName.split("-")(0).toLowerCase
val events = List("0 none", "1 stand", "2 walk", "4 sit", "5 lie", "101 relaxing", "102 coffee_time", "103 early_morning",
"104 cleanup", "105 sandwich_time", "201 unlock", "202 stir", "203 lock", "204 close", "205 reach", "206 open", "207 sip",
"208 clean", "209 bite", "210 cut", "211 spread", "212 release", "213 move", "301 bottle", "302 salami", "303 bread",
"304 sugar", "305 dishwasher", "306 switch", "307 milk", "308 drawer3", "309 spoon", "310 knife_cheese", "311 drawer2",
"312 table", "313 glass", "314 cheese", "315 chair", "316 door1", "317 door2", "318 plate", "319 drawer1", "320 fridge",
"321 cup", "322 knife_salami", "323 lazychair", "401 unlock", "402 stir", "403 lock", "404 close", "405 reach", "406 open",
"407 sip", "408 clean", "409 bite", "410 cut", "411 spread", "412 release", "413 move", "501 Bottle", "502 salami", "503 bread",
"504 sugar", "505 dishwasher", "506 switch", "507 milk", "508 drawer3", "509 spoon", "510 knife_cheese", "511 drawer2",
"512 table", "513 glass", "514 cheese", "515 chair", "516 door1", "517 door2", "518 plate", "519 drawer1", "520 fridge", "521 cup",
"522 knife_salami", "523 lazychair", "406516 open_door1", "406517 open_door2", "404516 close_door1", "404517 close_door2",
"406520 open_fridge", "404520 close_fridge", "406505 open_dishwasher", "404505 close_dishwasher", "406519 open_drawer1",
"404519 close_drawer1", "406511 open_drawer2", "404511 close_drawer2", "406508 open_drawer3", "404508 close_drawer3",
"408512 clean_table", "407521 drink_fromCup", "405506 toggle_switch")
val labels = events.map{ x => val s = x.split(" "); (s(0), s(1)) } toMap
val columns = Map("locomotion" -> 244, "HL_activity" -> 245, "left_arm" -> 246,
"left_arm_obj" -> 247, "right_arm" -> 248, "right_arm_obj" -> 249, "both_arms" -> 250)
val lines = Source.fromFile(dataFilePath).getLines.map(x => x.split(" "))
var time = 0
lines foreach { line =>
val a = line(columns("locomotion") - 1)
val b = line(columns("left_arm") - 1)
val c = line(columns("left_arm_obj") - 1)
val d = line(columns("right_arm") - 1)
val e = line(columns("right_arm_obj") - 1)
val f = line(columns("both_arms") - 1)
val h = line(columns("HL_activity") - 1)
val locomotion = if (labels(a) != "none") s"happensAt(${labels(a)}($person),$time)" else ""
val leftArm = if (labels(b) != "none" && labels(c) != "none") s"happensAt(${labels(b)}($person,${labels(c)}),$time)" else ""
val rightArm = if (labels(d) != "none" && labels(e) != "none") s"happensAt(${labels(d)}($person,${labels(e)}),$time)" else ""
val bothArms = if (labels(f) != "none") s"happensAt(${labels(f).split("_")(0)}($person,${labels(f).split("_")(1)}),$time)" else ""
val highLevelActivity = if (labels(h) != "none") s"holdsAt(${labels(h)}($person),$time)" else ""
if (bothArms != "") {
val stop = ""
}
val all = List(locomotion, leftArm, rightArm, bothArms, highLevelActivity).distinct.filter(x => x != "")
println(all)
time += 1
}
}
| 4,172 | 48.678571 | 135 | scala |
OLED | OLED-master/src/main/scala/app/runners/RendezvousMLNRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import java.io.File
import akka.actor.{ActorSystem, Props}
import app.runners.RendezvousMLNRunner.RendezvousMLNDataHandler.RendezvousDataOptions
import app.runutils.CMDArgs
import logic.Examples.Example
import oled.single_core.Dispatcher
import scala.io.Source
object RendezvousMLNRunner {
/*
*
* HappensAt(Change_in_heading_227569380, 776) --> happensAt(change_in_heading("227569380"), 776)
* HappensAt(Change_in_speed_end_227315110, 774) --> happensAt(change_in_speed_end("227315110"), 774)
* HappensAt(Change_in_speed_start_227315110, 773) --> happensAt(change_in_speed_start("227315110"), 773)
* HappensAt(Gap_end_227315110, 770) --> happensAt(gap_end("227315110"), 770)
* HappensAt(Gap_start_227569380, 769) --> happensAt(gap_start("227569380", 769)
* HappensAt(Proximity_227315110_227569380, 780) --> happensAt(proximity("227315110","227569380", 780)
* HappensAt(Slow_motion_start_227315110, 751) --> happensAt(slow_motion_start("227315110"), 751)
* HappensAt(Slow_motion_end_227315110, 814) ->> happensAt(slow_motion_end("227315110"), 814)
* HappensAt(Stop_start_227315110, 730) --> happensAt(stop_start("227315110"), 730)
* HappensAt(Stop_end_227315110, 731) --> happensAt(stop_end("227315110"), 731)
* HappensAt(Velocity_6_7_227315110, 731) --> happensAt(velocity_6_7_227315110, 731)
* HoldsAt(Rendezvous_227362110_227069470, 838263) --> holdsAt(rendezvous("227362110","227069470", 838263)
*
* */
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val foldPath = args.map(x => x.split("=")).find(x => x(0) == "--foldpath").getOrElse(throw new RuntimeException("--foldpath missing."))(1)
val inps = CMDArgs.getOLEDInputArgs(args)
val trainingDataOptions = new RendezvousDataOptions(foldPath, inps.chunkSize)
val testingDataOptions = trainingDataOptions
val trainingDataFunction: RendezvousDataOptions => Iterator[Example] = RendezvousMLNDataHandler.getTrainingData
val testingDataFunction: RendezvousDataOptions => Iterator[Example] = RendezvousMLNDataHandler.getTestingData
val system = ActorSystem("HoeffdingLearningSystem")
val msg = "start"
//val msg = "EvaluateHandCrafted"
system.actorOf(Props(new Dispatcher(inps, trainingDataOptions, testingDataOptions, trainingDataFunction, testingDataFunction)), name = "Learner") ! msg
}
}
object RendezvousMLNDataHandler {
class RendezvousDataOptions(val foldPath: String, val chunkSize: Int) extends app.runutils.IOHandling.MongoSource
def isPred(s: String) = s.startsWith("HoldsAt") || s.startsWith("HappensAt")
def getTrainingData(opts: RendezvousDataOptions) = {
var i = 0
val trainingSetPath = s"${opts.foldPath}/training/batch"
val trainingFile = new File(trainingSetPath).listFiles.head
val chunkSize = opts.chunkSize
Source.fromFile(trainingFile.getCanonicalPath).getLines.sliding(chunkSize, chunkSize - 1) map { chunk =>
val (narrative, annotation) = chunk.flatMap(x => x.split("\\|").map(x => convert(x))).foldLeft(List[String](), List[String]()) { (accum, y) =>
if (y.contains("happens")) (accum._1 :+ y, accum._2) else (accum._1, accum._2 :+ y)
}
i += 1
new Example(annot = annotation, nar = narrative, _time = i.toString)
}
}
def getTestingData(opts: RendezvousDataOptions) = {
val testingSetPath = s"${opts.foldPath}/testing"
val annotationPath = s"${new File(new File(testingSetPath).getParent).getParent}/annotation"
val innerFiles = new File(testingSetPath).listFiles
val testingFilesNames = innerFiles.map(f => f.getName.substring(0, f.getName.lastIndexOf("."))).toList
val exmpls = testingFilesNames.foldLeft(List[Example]()) { (p, testingFile) =>
val narrative = Source.fromFile(testingSetPath + "/" + testingFile + ".db").getLines.toList.filter(isPred _)
// Find the proper annotation file
val annotFile = new File(annotationPath).listFiles.toList.filter(f => f.getName.contains(testingFile)).head
val annotation = Source.fromFile(annotFile).getLines.toList.filter(p => isPred(p)).map(_.replaceAll("\\s", ""))
val exmpl = new Example(annot = annotation.map(x => convert(x)), nar = narrative.map(x => convert(x)), _time = "-")
p :+ exmpl
}
exmpls.toIterator
}
}
def getTrainingData(dataPath: String, chunkSize: Int) = {
var i = 0
Source.fromFile(dataPath).getLines.sliding(chunkSize, chunkSize - 1) map { chunk =>
val (narrative, annotation) = chunk.flatMap(x => x.split("\\|").map(x => convert(x))).foldLeft(List[String](), List[String]()) { (accum, y) =>
if (y.contains("happens")) (accum._1 :+ y, accum._2) else (accum._1, accum._2 :+ y)
}
i += 1
new Example(annot = annotation, nar = narrative, _time = i.toString)
}
}
def split1(in: String) = {
val split = in.split(",")
val time = split(1).split("\\)")(0).trim
val pred = split(0).split("\\(")(1)
val vessel = pred.split("_").reverse.head.trim
(vessel, time)
}
def split2(in: String) = {
try {
val split = in.split(",")
val time = split(1).split("\\)")(0).trim
val pred = split(0).split("\\(")(1)
val predSplit = pred.split("_")
val vessel1 = predSplit(1)
val vessel2 = predSplit(2)
(vessel1, vessel2, time)
} catch {
case e: ArrayIndexOutOfBoundsException => println(in)
}
}
def split3(in: String) = {
val split = in.split(",")
val time = split(1).split("\\)")(0).trim
val pred = split(0).split("\\(")(1)
val predSplit = pred.split("_")
val (lower, upper, vessel) = (predSplit(1), predSplit(2), predSplit(3))
(lower, upper, vessel, time)
}
def convert(in: String) = {
if (in.contains("Change_in_heading")) {
val (vessel, time) = split1(in)
s"""happensAt(change_in_heading("$vessel"),$time)"""
} else if (in.contains("Change_in_speed_end")) {
val (vessel, time) = split1(in)
s"""happensAt(change_in_speed_end("$vessel"),$time)"""
} else if (in.contains("Change_in_speed_start")) {
val (vessel, time) = split1(in)
s"""happensAt(change_in_speed_start("$vessel"),$time)"""
} else if (in.contains("Gap_end")) {
val (vessel, time) = split1(in)
s"""happensAt(gap_end("$vessel"),$time)"""
} else if (in.contains("Gap_start")) {
val (vessel, time) = split1(in)
s"""happensAt(gap_start("$vessel"),$time)"""
} else if (in.contains("Proximity")) {
val (vessel1, vessel2, time) = split2(in)
s"""happensAt(proximity("$vessel1","$vessel2"),$time)"""
} else if (in.contains("Slow_motion_start")) {
val (vessel, time) = split1(in)
s"""happensAt(slow_motion_start("$vessel"),$time)"""
} else if (in.contains("Slow_motion_end")) {
val (vessel, time) = split1(in)
s"""happensAt(slow_motion_end("$vessel"),$time)"""
} else if (in.contains("Stop_start")) {
val (vessel, time) = split1(in)
s"""happensAt(stop_start("$vessel"),$time)"""
} else if (in.contains("Stop_end")) {
val (vessel, time) = split1(in)
s"""happensAt(stop_end("$vessel"),$time)"""
} else if (in.contains("Velocity")) {
val (lower, upper, vessel, time) = split3(in)
s"""happensAt(velocity($lower, $upper, "$vessel"),$time)"""
} else if (in.contains("HoldsAt")) {
val (vessel1, vessel2, time) = split2(in)
s"""holdsAt(rendezvous("$vessel1","$vessel2"),$time)"""
} else {
throw new RuntimeException(s"Don't know what to do with $in")
}
}
}
| 8,451 | 37.244344 | 157 | scala |
OLED | OLED-master/src/main/scala/app/runners/XHAILRunner_MLNExperiments.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import com.typesafe.scalalogging.LazyLogging
/**
* Created by nkatz on 12/3/2017.
*/
object XHAILRunner_MLNExperiments extends LazyLogging {
/*
* The code is at iled.core.noisyILED.experimentsMLNdata.XHAILExperiments
* in the latset-stable-version-august-2016 branch. It needs changes to compile
* (because the structure of the project, package names etc are different in the
* main-version branch).
*
* */
}
| 1,148 | 30.054054 | 81 | scala |
OLED | OLED-master/src/main/scala/app/runners/d_OLEDRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners
import akka.actor.{ActorSystem, Props}
import app.runners.OLEDMaritimeRunner._
import app.runutils.{CMDArgs, Globals}
import experiments.datautils.caviar_intervals.MeetingTrainingDistributed
import experiments.datautils.maritime_data.yet_another_attempt.MaritimeToMongo.{populatePortsMap, populateSpeedLimitsMap}
import logic.Examples.Example
import oled.distributed.{Dispatcher, Utils}
import utils.DataUtils.DataAsIntervals
/**
* Created by nkatz on 2/13/17.
*/
object d_OLEDRunner {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
Globals.glvalues("distributed") = "true"
val params = CMDArgs.getOLEDInputArgs(args)
val dataFunction: MaritimeDataOptions => Iterator[Example] = getData
//val options = List( (core_2_1, dataFunction), (core_2_2, dataFunction) )
//val testingOptions = testingData
//populateHLEsMap(dataOpts1.hlePath, dataOpts1.targetConcept)
//populatePortsMap(dataOpts1.closeToPortsPath)
//populateProximityMap(dataOpts1.llePath)
//populateSpeedLimitsMap(dataOpts1.speedLimitsPath)
val message = "go"
// Start the actor system
val system = ActorSystem("distributed-oled")
//system.actorOf(Props( new Dispatcher(options, params, 2, testingOptions, dataFunction) ), name = "TopLevelDispatcher") ! message
}
}
}
| 2,155 | 30.705882 | 136 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/DataCRONDelivRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import akka.actor.{ActorSystem, Props}
import app.runners.OLEDDefaultRunner.{DefaultMongoDataOptions, getMongoData}
import app.runners.maritime_experiments.DataCRONDelivRunner.dataPath
import app.runutils.CMDArgs
import logic.Examples.Example
import oled.single_core.Master
import scala.io.Source
object DataCRONDelivRunner {
val dataPath = "/home/nkatz/dev/Brest-data-5-5-2018/rendezVous-batchsize-10"
def main(args: Array[String]) = {
///*
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
//val trainingDataOptions = new DataOptions(dataPath, take = 15000)
val trainingDataOptions = new DataOptions(dataPath, take = 200000)
//val testingDataOptions = new DataOptions(dataPath, drop = 15000)
//val testingDataOptions = new DataOptions(dataPath, drop = 30000)
val testingDataOptions = new DataOptions(dataPath)
val trainingDataFunction: DataOptions => Iterator[Example] = getData
val testingDataFunction: DataOptions => Iterator[Example] = getData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runningOptions.evalth != "None") "eval" else "start"
system.actorOf(Props(new Master(runningOptions, trainingDataOptions, testingDataOptions,
trainingDataFunction, testingDataFunction)), name = "Master-Actor") ! startMsg
}
//*/
/*
val data = getData(new DataOptions(dataPath))
data foreach { x =>
if (x.narrative.exists(p => p.contains("stop_start"))) println(x.time)
if (x.time == "1924") {
val stop = "stop"
}
if (x.annotation.nonEmpty) {
println(x)
println("")
}
}
*/
}
private class DataOptions(val dataFilePath: String, val take: Int = 0, val drop: Int = 0) extends app.runutils.IOHandling.InputSource
def getData(opts: DataOptions) = {
val dataPath = opts.dataFilePath
val data = Source.fromFile(dataPath).getLines
var i = 0
val iter = data map { currentBatch =>
val batch = currentBatch.split(" ")
val (annotation, narrative) = batch.foldLeft(List[String](), List[String]()) { (x, y) =>
val (anot, nar) = (x._1, x._2)
if (y.startsWith("holdsAt")) (anot :+ y, nar) else (anot, nar :+ y)
}
i += 1
// drop the first annotation atom: Because rendezVous is defined with holdsFor...
val _annotation = if (annotation.length > 1) annotation.drop(1) else annotation
new Example(annot = annotation, nar = narrative, _time = i.toString)
}
if (opts.take != 0) {
iter.take(opts.take)
} else if (opts.drop != 0) {
iter.drop(opts.drop)
} else {
iter
}
}
}
| 3,562 | 31.688073 | 135 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/LLEsToMongo.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import com.mongodb.casbah.Imports.MongoDBObject
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import scala.io.Source
/**
* Created by nkatz on 7/9/17.
*/
/**
*
* IMPORTANT:
*
* TO CREATE THIS MAP YOY NEED LOTS OF MEMORY.
*
* GIVE SOMETHING LIKE -Xmx4G TO THE JVM.
*
*
* THIS CLASS IS TO BE USED ONLY ONCE PER LLE FILE (SEE EXAMPLE BELOW) TO GET THE LLES INTO MONGO.
*
*/
object LLEsToMongo {
// The key is time
var LLEMap = scala.collection.mutable.Map[Int, (scala.collection.mutable.Set[String], scala.collection.mutable.Set[String], scala.collection.mutable.Set[String])]()
def main(args: Array[String]) = {
val dataOpts1 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset88.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/21/lowSpeed-no-infs.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/88/close_to_ports.csv",
chunkSize = 10,
targetConcept = "lowSpeed",
limit = 100000.0,
trainingMode = false)
populateLLEsMap(dataOpts1.llePath)
llesToMongo("brest-8-8")
}
def populateLLEsMap(dataPath: String) = {
println("Getting LLEs map")
var counter = 0
val data = Source.fromFile(dataPath).getLines
while (data.hasNext) {
val x = data.next()
if (!x.contains("HoldsFor") && !x.contains("coord")) {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = _info(1).trim.toInt
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
if (LLEMap.contains(time)) {
val currentValue = LLEMap(time)
val updatedAtoms = scala.collection.mutable.Set[String]() ++= currentValue._1 += predicate
val updatedVessels = scala.collection.mutable.Set[String]() ++= currentValue._2 += vessel
val updatedAreas = scala.collection.mutable.Set[String]() ++= currentValue._3 += area
LLEMap(time) = (updatedAtoms, updatedVessels, updatedAreas)
} else {
LLEMap(time) = (scala.collection.mutable.Set(predicate), scala.collection.mutable.Set(vessel), scala.collection.mutable.Set(area))
}
}
counter += 1
//println(s"Grouping LLEs by time. Data point: $counter")
}
}
/* Call this method to insert the LLEs from a dataset to mongo. This is supposed to be called once. */
def llesToMongo(dbName: String) = {
val mongoClient = MongoClient()
mongoClient(dbName).dropDatabase()
val collection = mongoClient(dbName)("examples")
collection.createIndex(MongoDBObject("time" -> 1))
val times = LLEMap.keySet.toVector.sorted
var counter = 0
times.foreach { time =>
val record = LLEMap(time)
val (lleAtoms, vessels, areas) = (record._1, record._2, record._3)
val entry = MongoDBObject("lles" -> lleAtoms) ++ ("vessels" -> vessels) ++ ("areas" -> areas) ++ ("time" -> time) ++ ("index" -> counter)
collection.insert(entry)
println(s"inserting $counter to $dbName")
counter += 1
}
}
}
| 6,460 | 40.416667 | 166 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/MultiCoreDataOptions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
/**
* Created by nkatz on 7/9/17.
*/
object MultiCoreDataOptions {
def getOptions(hle: String, chunkSize: Int, limit: Int, coresNum: Int) = {
val prefixes = coresNum match {
case 2 => List("2-1", "2-2")
case 4 => List("4-1", "4-2", "4-3", "4-4")
case 8 => List("8-1", "8-2", "8-3", "8-4", "8-5", "8-6", "8-7", "8-8")
}
val joinPrefix = (p: String) => p.split("-").mkString("")
val llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split"
val hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split"
val speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv"
val info = prefixes map (x => (s"$llePath/dataset${joinPrefix(x)}.txt", s"brest-$x", s"$hlePath/${joinPrefix(x)}/$hle.csv", s"$hlePath/${joinPrefix(x)}/close_to_ports.csv"))
info map { x =>
new MaritimeDataOptions(llePath = x._1, db = x._2, hlePath = x._3, speedLimitsPath = speedLimitsPath, closeToPortsPath = x._4, chunkSize, limit.toDouble, hle)
}
}
/*
lazy val highSpeedInDataOptions21 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset21.txt",
db = "brest-2-1",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/21/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/21/close_to_ports.csv",
chunkSize = 10,
limit = 5000.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions22 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset22.txt",
db = "brest-2-2",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/22/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/22/close_to_ports.csv",
chunkSize = 10,
limit = 5000.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions41 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset41.txt",
db = "brest-4-1",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/41/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/41/close_to_ports.csv",
chunkSize = 10,
limit = 2500.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions42 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset42.txt",
db = "brest-4-2",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/42/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/42/close_to_ports.csv",
chunkSize = 10,
limit = 2500.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions43 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset43.txt",
db = "brest-4-3",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/43/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/43/close_to_ports.csv",
chunkSize = 10,
limit = 2500.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions44 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset44.txt",
db = "brest-4-4",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/44/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/44/close_to_ports.csv",
chunkSize = 10,
limit = 2500.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions81 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset81.txt",
db = "brest-8-1",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/81/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/81/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions82 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset82.txt",
db = "brest-8-2",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/82/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/82/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions83 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset83.txt",
db = "brest-8-3",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/83/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/83/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions84 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset84.txt",
db = "brest-8-4",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/84/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/84/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions85 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset85.txt",
db = "brest-8-5",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/85/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/85/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions86 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset86.txt",
db = "brest-8-6",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/86/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/86/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions87 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset87.txt",
db = "brest-8-7",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/87/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/87/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
lazy val highSpeedInDataOptions88 = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split/dataset88.txt",
db = "brest-8-8",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/88/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split/88/close_to_ports.csv",
chunkSize = 10,
limit = 1250.0,
targetConcept = "highSpeedIn")
*/
}
| 9,191 | 45.897959 | 177 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/MultiCoreMaritimeRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import akka.actor.{ActorSystem, Props}
import app.runutils.{CMDArgs, RunningOptions}
import logic.Examples.Example
import oled.distributed.Dispatcher
/**
* Created by nkatz on 7/9/17.
*/
object MultiCoreMaritimeRunner {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runOpts = CMDArgs.getOLEDInputArgs(args)
val opts = getOptions("lowSpeed", 50, 1250, 8)
val speedLimitsMap = app.runners.maritime_experiments.SingleCoreMaritimeRunner.populateSpeedLimitsMap(opts.head.speedLimitsPath, scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]())
val p = prepare(runOpts, opts, speedLimitsMap)
val testingFunction: MaritimeDataOptions => Iterator[Example] = p.head._1.getTestingData
val testingOptions = opts.head
val message = "go"
// Start the actor system
val system = ActorSystem("distributed-oled")
system.actorOf(Props(new Dispatcher(opts zip p.map(x => x._2), runOpts, 2, testingOptions, testingFunction)), name = "TopLevelDispatcher") ! message
}
}
def getOptions(hle: String, chunkSize: Int, limit: Int, coresNum: Int) = {
val prefixes = coresNum match {
case 2 => List("2-1", "2-2")
case 4 => List("4-1", "4-2", "4-3", "4-4")
case 8 => List("8-1", "8-2", "8-3", "8-4", "8-5", "8-6", "8-7", "8-8")
}
val joinPrefix = (p: String) => p.split("-").mkString("")
val llePath = "/home/nkatz/dev/maritime/brest-data/datasets-my-split"
val hlePath = "/home/nkatz/dev/maritime/brest-data/recognition-my-split"
val speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv"
val info = prefixes map (x => (s"$llePath/dataset${joinPrefix(x)}.txt", s"brest-$x", s"$hlePath/${joinPrefix(x)}/$hle.csv", s"$hlePath/${joinPrefix(x)}/close_to_ports.csv"))
info map { x =>
new MaritimeDataOptions(llePath = x._1, db = x._2, hlePath = x._3, speedLimitsPath = speedLimitsPath, closeToPortsPath = x._4, chunkSize, limit.toDouble, hle)
}
}
def prepare(runOpts: RunningOptions, opts: List[MaritimeDataOptions],
speedLimitsMap: scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]) = {
opts.map { opt =>
val nodeData = new NodeData(opt.hlePath, opt.llePath, opt.closeToPortsPath, opt.targetConcept, speedLimitsMap)
val trainingFunction: MaritimeDataOptions => Iterator[Example] = nodeData.getTrainingData
(nodeData, trainingFunction)
}
//val testingFunction: MaritimeDataOptions => Iterator[Example] = nodeData.getTestingData
}
}
| 3,427 | 35.860215 | 212 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/MyDataBreaking.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import java.io.{File, PrintWriter}
import scala.io.Source
/**
* Created by nkatz on 7/10/17.
*/
object MyDataBreaking {
def splitN[A](list: List[A], n: Int): List[List[A]] =
if (n == 1) List(list) else List(list.head) :: splitN(list.tail, n - 1)
def main(args: Array[String]) = {
val vessels = getAllVessels("/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt")
println(vessels)
println(vessels.size)
val partitioned2 = getVesselSplit(vessels, 2).toList
val partitioned4 = getVesselSplit(vessels, 4).toList
val partitioned8 = getVesselSplit(vessels, 8).toList
//generateSplitData(partitioned2, "/home/nkatz/dev/maritime/brest-data/datasets-my-split", 2)
//generateSplitData(partitioned4, "/home/nkatz/dev/maritime/brest-data/datasets-my-split", 4)
//generateSplitData(partitioned8, "/home/nkatz/dev/maritime/brest-data/datasets-my-split", 8)
geterateSplitData_Annotation(partitioned2,
"/home/nkatz/dev/maritime/brest-data/recognition/1/highSpeedIn.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"highSpeedIn", 2)
geterateSplitData_Annotation(partitioned4,
"/home/nkatz/dev/maritime/brest-data/recognition/1/highSpeedIn.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"highSpeedIn", 4)
geterateSplitData_Annotation(partitioned8,
"/home/nkatz/dev/maritime/brest-data/recognition/1/highSpeedIn.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"highSpeedIn", 8)
geterateSplitData_Annotation(partitioned2,
"/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"close_to_ports", 2)
geterateSplitData_Annotation(partitioned4,
"/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"close_to_ports", 4)
geterateSplitData_Annotation(partitioned8,
"/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"close_to_ports", 8)
geterateSplitData_Annotation(partitioned2,
"/home/nkatz/dev/maritime/brest-data/recognition/1/stopped.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"stopped", 2)
geterateSplitData_Annotation(partitioned4,
"/home/nkatz/dev/maritime/brest-data/recognition/1/stopped.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"stopped", 4)
geterateSplitData_Annotation(partitioned8,
"/home/nkatz/dev/maritime/brest-data/recognition/1/stopped.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"stopped", 8)
geterateSplitData_Annotation(partitioned2,
"/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"lowSpeed", 2)
geterateSplitData_Annotation(partitioned4,
"/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"lowSpeed", 4)
geterateSplitData_Annotation(partitioned8,
"/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"lowSpeed", 8)
geterateSplitData_Annotation(partitioned2,
"/home/nkatz/dev/maritime/brest-data/recognition/1/sailing.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"sailing", 2)
geterateSplitData_Annotation(partitioned4,
"/home/nkatz/dev/maritime/brest-data/recognition/1/sailing.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"sailing", 4)
geterateSplitData_Annotation(partitioned8,
"/home/nkatz/dev/maritime/brest-data/recognition/1/sailing.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"sailing", 8)
geterateSplitData_Annotation(partitioned2,
"/home/nkatz/dev/maritime/brest-data/recognition/1/loitering.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"loitering", 2)
geterateSplitData_Annotation(partitioned4,
"/home/nkatz/dev/maritime/brest-data/recognition/1/loitering.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"loitering", 4)
geterateSplitData_Annotation(partitioned8,
"/home/nkatz/dev/maritime/brest-data/recognition/1/loitering.csv",
"/home/nkatz/dev/maritime/brest-data/recognition-my-split",
"loitering", 8)
}
def geterateSplitData_Annotation(iter: List[Set[String]], originalAnnotationPath: String, pathForNewAnnotation: String, hle: String, coresNum: Int) = {
var count = 1
iter foreach { currentVessels =>
val file = s"$pathForNewAnnotation/$coresNum$count/$hle.csv"
println(file)
val pw = new PrintWriter(new File(file))
val singleAnnotationCoreData = Source.fromFile(originalAnnotationPath).getLines()
singleAnnotationCoreData foreach { line =>
val vessel = line.split("\\|")(1)
if (currentVessels.contains(vessel)) {
pw.write(line + "\n")
}
}
count += 1
pw.close()
}
}
def generateSplitData(iter: Iterator[Set[String]], path: String, coresNum: Int) = {
var count = 1
iter foreach { currentVessels =>
val file = s"$path/dataset$coresNum$count.txt"
println(file)
val pw = new PrintWriter(new File(file))
val singleCoreData = Source.fromFile("/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt").getLines.filter(p => !p.contains("HoldsFor") && !p.contains("coord"))
singleCoreData foreach { line =>
val info = line.split("HappensAt")(1)
val _info = info.split("\\]")
val rest = _info(0).split("\\[")(1)
//val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
if (currentVessels.contains(vessel)) {
pw.write(line + "\n")
}
}
count += 1
pw.close()
}
}
def getVesselSplit(vessels: Set[String], coresNum: Int) = {
val partitionSize = (vessels.size.toDouble / coresNum).toInt
vessels.grouped(partitionSize).take(coresNum)
}
def getAllVessels(dataPath: String) = {
val data = Source.fromFile(dataPath).getLines
data.foldLeft(Set[String]()) { (accum, x) =>
if (!x.contains("HoldsFor") && !x.contains("coord")) {
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
//val time = _info(1).trim.toInt
val rest = _info(0).split("\\[")(1)
//val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
accum + vessel
} else {
accum
}
}
}
}
| 7,609 | 37.241206 | 173 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/NodeData.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import scala.io.Source
import com.mongodb.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.Imports.MongoDBObject
import com.mongodb.casbah.Imports.DBObject
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import logic.Examples.Example
/**
* Created by nkatz on 7/9/17.
*/
class MaritimeDataOptions(
val llePath: String = "",
val db: String = "",
val hlePath: String,
val speedLimitsPath: String = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
val closeToPortsPath: String,
val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt,
val targetConcept: String = "None",
val trainingMode: Boolean = true) extends app.runutils.IOHandling.InputSource
case class VesselAnnotationAtom(atom: String, startTime: Int, endTime: Int, var hasBeenChecked: Boolean = false) {
def getActualAtom(time: Int) = this.atom.replaceAll("ReplaceThisByActualTime", time.toString)
}
class AnnotationPerVessel(val vessel: String = "", var atoms: Vector[VesselAnnotationAtom] = Vector.empty, var currentIndex: Int = 0) {
def updateAtoms(va: VesselAnnotationAtom) = this.atoms = this.atoms :+ va
def updateIndex = this.currentIndex = this.currentIndex + 1
def getCurrentAnnotationInterval = this.atoms(this.currentIndex)
lazy val sortAtoms: Vector[VesselAnnotationAtom] = this.atoms.sortBy(atom => atom.endTime)
}
class NodeData(
val hlePath: String,
val llePath: String,
val closeToPortsPath: String,
val targetConcept: String,
val speedLimitsMap: scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]) {
// The key is a vessel and the value is the set of all its annotation atoms wrapped in an AnnotationPerVessel instance
var HLEsMap = scala.collection.mutable.Map[String, AnnotationPerVessel]()
// The key is a vessel and the value is the set of all proximity atoms for that vessel.
var proximityMap = scala.collection.mutable.Map[String, AnnotationPerVessel]()
// The key is time
var portsMap = scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]()
def updateMap(vessel: String, a: VesselAnnotationAtom, map: scala.collection.mutable.Map[String, AnnotationPerVessel]) = {
if (map.contains(vessel)) map(vessel).updateAtoms(a)
else map(vessel) = new AnnotationPerVessel(vessel, Vector(a))
}
populateHLEsMap(hlePath, targetConcept)
populatePortsMap(closeToPortsPath)
populateProximityMap(llePath)
def getTrainingData(opts: MaritimeDataOptions): Iterator[Example] = {
val mongoClient = MongoClient()
val collection = mongoClient(opts.db)("examples")
val times = collection.find().sort(MongoDBObject("time" -> 1)).limit(opts.limit.toInt).grouped(opts.chunkSize)
times map { timeSlice => getDataSlice(timeSlice) }
}
def getTestingData(opts: MaritimeDataOptions): Iterator[Example] = {
val mongoClient = MongoClient()
val collection = mongoClient(opts.db)("examples")
val times = collection.find().sort(MongoDBObject("time" -> 1)).drop(opts.limit.toInt).take(10000).grouped(opts.chunkSize)
times map { timeSlice => getDataSlice(timeSlice) }
}
def getDataSlice(objects: Seq[DBObject]) = {
def convert(o: DBObject) = {
val obj = o.asInstanceOf[BasicDBObject]
//val time = obj.get("time").toString
val atoms = obj.get("lles").asInstanceOf[BasicDBList].toList.map(_.toString).toSet
val vessels = obj.get("vessels").asInstanceOf[BasicDBList].toList.map(_.toString).toSet
val areas = obj.get("areas").asInstanceOf[BasicDBList].toList.map(_.toString).toSet
(atoms, vessels, areas)
}
def getTimes = objects.map(o => o.asInstanceOf[BasicDBObject].get("time").toString.toInt)
val times = getTimes
val finalExampleStartTime = times.min
val (lleAtoms, vessels, areas) = objects.foldLeft(Set[String](), Set[String](), Set[String]()) { (accum, o) =>
val obj = convert(o)
(accum._1 ++ obj._1, accum._2 ++ obj._2, accum._3 ++ obj._3)
}
val hleAtoms = times.flatMap(t => getCurrentVesselsAnnotation(t, vessels, this.HLEsMap)).filter(x => x != "None")
val proximityAtoms = times.flatMap(t => getCurrentVesselsAnnotation(t, vessels, this.proximityMap)).distinct.filter(x => x != "None")
val (closeToPortsAtoms, speedLimitAtoms) = times.foldLeft(Set[String](), Set[String]()){ (accum, time) =>
val closeToPortsAtoms_ = {
if (this.portsMap.contains(time.toString)) this.portsMap(time.toString)
else scala.collection.mutable.Set[String]()
}
val speedLimitAtoms_ = areas.flatMap { a =>
if (speedLimitsMap.contains(a)) speedLimitsMap(a)
else scala.collection.mutable.Set[String]()
}.filter(p => p != "None")
(accum._1 ++ closeToPortsAtoms_, accum._2 ++ speedLimitAtoms_)
}
val narrative = lleAtoms.toList ++ proximityAtoms.toList ++ closeToPortsAtoms.toList ++ speedLimitAtoms.toList
new Example(annot = hleAtoms.toList, nar = narrative, _time = finalExampleStartTime.toString)
}
def isWithinInterval(i: Int, interval: (Int, Int)) = {
i >= interval._1 && i <= interval._2
}
def checkInterval(time: Int, interval: VesselAnnotationAtom): String = {
if (isWithinInterval(time.toInt, (interval.startTime, interval.endTime))) {
interval.getActualAtom(time.toInt)
} else {
"None"
}
}
def getCurrentVesselsAnnotation(time: Int, vessels: Set[String], map: scala.collection.mutable.Map[String, AnnotationPerVessel]) = {
vessels.foldLeft(Set[String]()){ (accum, v) =>
if (map.contains(v)) {
val vesselAnnotation = map(v)
val intervals = vesselAnnotation.atoms
accum ++ intervals.map(i => checkInterval(time, i))
} else {
accum + "None"
}
}
}
def populateHLEsMap(dataPath: String, hle: String) = {
println("Generating HLEs map")
val data = Source.fromFile(dataPath).getLines.filter(x => !x.contains("inf"))
hle match {
case "highSpeedIn" | "withinArea" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel, area) = (s(4).toInt, s(5).toInt - 1, s(1), s(2))
val atom = s"""holdsAt($hle("$vessel","$area"),"ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel, a, this.HLEsMap)
}
case "loitering" | "stopped" | "lowSpeed" | "sailing" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel) = (s(3).toInt, s(4).toInt - 1, s(1))
val atom = s"""holdsAt($hle("$vessel"),"ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel, a, this.HLEsMap)
}
case "rendezVous" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel1, vessel2) = (s(4).toInt, s(5).toInt - 1, s(1), s(0))
val atom = s"""holdsAt($hle("$vessel1","$vessel2"),"ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel1, a, this.HLEsMap)
updateMap(vessel2, a, this.HLEsMap)
}
}
println("Sorting the HLEs map values")
//HLEsMap.foreach(x => x._2.atoms.sortBy(z => z.endTime))
this.HLEsMap foreach { case (k, v) =>
val v1 = v.atoms.sortBy(z => z.endTime)
this.HLEsMap(k).atoms = v1
}
}
def populateProximityMap(dataPath: String) = {
println("Getting proximity map")
val lines = Source.fromFile(dataPath).getLines.filter(x => x.contains("proximity"))
lines foreach { x =>
val s = x.split("=")(0).split(" ")
val vessel1 = s(2)
val vessel2 = s(3)
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
val startTime = z(0).toInt
val endTime = z(1).toInt - 1
val atom = s"""close("$vessel1","$vessel2","ReplaceThisByActualTime")"""
val a = VesselAnnotationAtom(atom, startTime, endTime)
updateMap(vessel1, a, this.proximityMap)
updateMap(vessel2, a, this.proximityMap)
}
}
def populatePortsMap(dataPath: String) = {
println("Getting close-to-ports map")
val data = Source.fromFile(dataPath).getLines
data foreach { x =>
val s = x.split("\\|")
val time = s(0)
val vessel = s(1)
val atom = s"""notCloseToPorts("$vessel","$time")"""
if (this.portsMap.contains(time)) this.portsMap(time) = this.portsMap(time) += atom
else this.portsMap(time) = scala.collection.mutable.Set(atom)
}
}
}
| 9,413 | 40.471366 | 137 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/NonBlockingMaritimeDistRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import akka.actor.{ActorSystem, Props}
import app.runners.maritime_experiments.MultiCoreMaritimeRunner.getOptions
import app.runutils.{CMDArgs, RunningOptions}
import logic.Examples.Example
import oled.non_blocking.Dispatcher
/**
* Created by nkatz on 7/10/17.
*/
object NonBlockingMaritimeDistRunner {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runOpts = CMDArgs.getOLEDInputArgs(args)
val opts = getOptions("highSpeedIn", 10, 5000, 2)
val speedLimitsMap = app.runners.maritime_experiments.SingleCoreMaritimeRunner.populateSpeedLimitsMap(opts.head.speedLimitsPath, scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]())
val p = prepare(runOpts, opts, speedLimitsMap)
val testingFunction: MaritimeDataOptions => Iterator[Example] = p.head._1.getTestingData
val testingOptions = opts.head
val message = "go"
// Start the actor system
val system = ActorSystem("distributed-oled")
system.actorOf(Props(new Dispatcher(opts zip p.map(x => x._2), runOpts, 2, testingOptions, testingFunction)), name = "TopLevelDispatcher") ! message
}
}
def prepare(runOpts: RunningOptions, opts: List[MaritimeDataOptions],
speedLimitsMap: scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]) = {
opts.map { opt =>
val nodeData = new NodeData(opt.hlePath, opt.llePath, opt.closeToPortsPath, opt.targetConcept, speedLimitsMap)
val trainingFunction: MaritimeDataOptions => Iterator[Example] = nodeData.getTrainingData
(nodeData, trainingFunction)
}
//val testingFunction: MaritimeDataOptions => Iterator[Example] = nodeData.getTestingData
}
}
| 2,549 | 32.116883 | 212 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/SingleCoreDataOptions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
/**
* Created by nkatz on 7/9/17.
*/
object SingleCoreDataOptions {
lazy val highSpeedInDataOptionsTraining = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
db = "brest-1",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/highSpeedIn.csv",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 50,
limit = 10000.0,
targetConcept = "highSpeedIn")
lazy val stoppedDataOptionsTraining = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/stopped-no-infs.csv",
db = "brest-1",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 10,
limit = 10000.0,
targetConcept = "stopped")
lazy val sailingDataOptionsTraining = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/sailing-no-infs.csv",
db = "brest-1",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 100,
limit = 100000.0,
targetConcept = "sailing")
lazy val lowSpeedDataOptionsTraining = new MaritimeDataOptions(
llePath = "/home/nkatz/dev/maritime/brest-data/datasets/dataset1.txt",
hlePath = "/home/nkatz/dev/maritime/brest-data/recognition/1/lowSpeed-no-infs.csv",
db = "brest-1",
speedLimitsPath = "/home/nkatz/dev/maritime/brest-data/areas_speed_limits.csv",
closeToPortsPath = "/home/nkatz/dev/maritime/brest-data/recognition/1/close_to_ports.csv",
chunkSize = 200,
limit = 10000.0,
targetConcept = "lowSpeed")
}
| 3,036 | 44.328358 | 96 | scala |
OLED | OLED-master/src/main/scala/app/runners/maritime_experiments/SingleCoreMaritimeRunner.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runners.maritime_experiments
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import logic.Examples.Example
import oled.single_core.Master
import scala.io.Source
/**
* Created by nkatz on 7/9/17.
*/
object SingleCoreMaritimeRunner {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runOpts = CMDArgs.getOLEDInputArgs(args)
val trainingDataOptions = SingleCoreDataOptions.sailingDataOptionsTraining
val testingDataOptions = trainingDataOptions
// the key is area
// This speedLimitsMap is common to all cores (there's only one general speed limits file)
val speedLimitsMap = populateSpeedLimitsMap(trainingDataOptions.speedLimitsPath, scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]())
val nodeData = new NodeData(trainingDataOptions.hlePath, trainingDataOptions.llePath, trainingDataOptions.closeToPortsPath,
trainingDataOptions.targetConcept, speedLimitsMap)
val trainingFunction: MaritimeDataOptions => Iterator[Example] = nodeData.getTrainingData
val testingFunction: MaritimeDataOptions => Iterator[Example] = nodeData.getTestingData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runOpts.evalth != "None") "eval" else "start"
system.actorOf(Props(new Master(runOpts, trainingDataOptions, testingDataOptions, trainingFunction, testingFunction)), name = "Master-Actor") ! startMsg
}
}
def populateSpeedLimitsMap(dataPath: String, speedLimitsMap: scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]) = {
println("Getting speed limits map")
val data = Source.fromFile(dataPath).getLines
data foreach { x =>
val s = x.split("\\|")
val area = s(0)
val limit = s(1)
val atom = s"""speedLimit("$area","$limit")"""
if (speedLimitsMap.contains(area)) speedLimitsMap(area) = speedLimitsMap(area) += atom
else speedLimitsMap(area) = scala.collection.mutable.Set(atom)
}
speedLimitsMap
}
}
| 2,882 | 36.934211 | 164 | scala |
OLED | OLED-master/src/main/scala/app/runutils/BKHandling.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
import com.typesafe.scalalogging.LazyLogging
import logic.Literal
import logic.Modes.ModeAtom
/**
* Created by nkatz at 18/10/2018
*/
object BKHandling extends LazyLogging {
def getCoverageDirectives(varbedExmplPatterns: List[String]) = {
//val varbedExmplPatterns = for (x <- eps2) yield x.varbed.tostring
val tps = (x: String) => s"\ntps($x):- $x, example($x).\n"
val tpsMarked = (x: String) => s"\ntps(I,$x):- marked(I,$x), example($x), rule(I).\n"
val fps = (x: String) => s"\nfps($x):- $x, not example($x).\n"
val fpsMarked = (x: String) => s"\nfps(I,$x):- marked(I,$x), not example($x), rule(I).\n"
val fns = (x: String) => s"\nfns($x) :- example($x), not $x.\n"
val fnsMarked = (x: String) => s"\nfns(I,$x):- not marked(I,$x), example($x), rule(I).\n"
val coverAllPositivesConstraint = (x: String) => s"\n:- example($x), not $x.\n"
val excludeAllNegativesConstraint = (x: String) => s"\n:- $x, not example($x).\n"
val (tp, fp, fn, tpm, fpm, fnm, allpos, allnegs) =
varbedExmplPatterns.
foldLeft(List[String](), List[String](), List[String](), List[String](),
List[String](), List[String](), List[String](), List[String]()){ (x, y) =>
(x._1 :+ tps(y), x._2 :+ fps(y), x._3 :+ fns(y),
x._4 :+ tpsMarked(y), x._5 :+ fpsMarked(y),
x._6 :+ fnsMarked(y), x._7 :+ coverAllPositivesConstraint(y), x._8 :+ excludeAllNegativesConstraint(y))
}
val mkString = (x: List[String]) => x.mkString("\n")
(mkString(tp), mkString(fp), mkString(fn), mkString(tpm), mkString(fpm), mkString(fnm), mkString(allpos), mkString(allnegs))
}
def tpsCountRules(eps: List[Literal]) = {
eps.map{ x =>
s"\ntps(I,X) :- rule(I), X = #count { ${x.getVars.toList.map(_.tostring).mkString(",")}: marked(I,${x.tostring}), " +
s"example(${x.tostring}) }."
}.mkString("\n")
}
def fpsCountRules(eps: List[Literal]) = {
eps.map{ x =>
s"\nfps(I,X) :- rule(I), X = #count { ${x.getVars.toList.map(_.tostring).mkString(",")}: marked(I,${x.tostring}), " +
s"not example(${x.tostring}) }."
}.mkString("\n")
}
def fnsCountRules(eps: List[Literal]) = {
eps.map{ x =>
s"\nfns(I,X) :- rule(I), X = #count { ${x.getVars.toList.map(_.tostring).mkString(",")}: example(${x.tostring}), " +
s"not marked(I,${x.tostring}) }."
}.mkString("\n")
}
/* This method is used to generate the ASP code that scores initiation and termination rules */
def generateScoringBK(modehs: List[ModeAtom]) = {
if (modehs.isEmpty) { logger.error("No head mode declarations found."); System.exit(-1) }
if (Globals.glvalues("with-ec").toBoolean) { // We're learning with the Event Calculus in the BK.
// We can get the fluent from the head modes.
val targetFluent = {
// We can take the first one of the head modes (the target fluent is the same
// regardless of whether the mode atom is an initiation of termination one).
// Then, to get the target fluent, simply retrieve the first one of the 'terms' arg.
val t = modehs.head.varbed.terms.head
// The 'if' is for cases where the target pred is of the form initiatedAt(#fluent, +time), as in
// initiatedAt(#fluent, +time) where fluent(leisure) is in the BK.
// The 'else' is for compound fluents.
if (t.isVariabe) Literal(predSymbol = t._type) else modehs.head.varbed.terms.head.asInstanceOf[Literal]
//modehs.head.varbed.terms.head.asInstanceOf[Literal]
}
val varNamesTostr = targetFluent.getVars.map(x => x.name).mkString(",")
// The 'if' is for cases where the target pred is of the form initiatedAt(#fluent, +time)
// the 'else' is for compound fluents.
val vars = if (varNamesTostr == "") "X0,Te,Ts" else s"$varNamesTostr,Te,Ts"
val fluent = if (varNamesTostr == "") "X0" else s"${targetFluent.tostring}"
val typePreds = if (varNamesTostr == "") s"${targetFluent.tostring}(X0), next(Ts,Te), time(Te), time(Ts)" else "next(Ts,Te), time(Te), time(Ts)"
/* Initiation scoring rules: */
val initScoringRule1 = s"tps(I, X) :- rule(I), X = #count {$vars: example( holdsAt($fluent,Te) ), " +
s"marked(I, initiatedAt($fluent,Ts) ), $typePreds }."
val initScoringRule2 = s"fps(I, X) :- rule(I), X = #count {$vars: not example( holdsAt($fluent,Te) ), " +
s"marked(I, initiatedAt($fluent,Ts) ), $typePreds }."
val initScoringRule3 = s"fns(I, X) :- rule(I), X = #count {$vars: example( holdsAt($fluent,Te) ), " +
s"not marked(I, initiatedAt($fluent,Ts) ), $typePreds }."
/* Termination scoring rules: */
val termScoringRule1 = s"tps(I, X) :- rule(I), X = #count {$vars: example( holdsAt($fluent,Te) ), " +
s"not marked(I, terminatedAt($fluent,Ts) ), $typePreds }."
val termScoringRule2 = s"tps(I, X) :- rule(I), X = #count {$vars: example( holdsAt($fluent,Ts) ), " +
s"not example( holdsAt($fluent,Te) ), marked(I, terminatedAt($fluent,Ts) ), $typePreds }."
val termScoringRule3 = s"fns(I, X) :- rule(I), X = #count {$vars: example( holdsAt($fluent,Te) ), " +
s"marked(I, terminatedAt($fluent,Ts) ), $typePreds }."
val termScoringRule4 = s"fps(I, X) :- rule(I), X = #count {$vars: example( holdsAt($fluent,Ts) ), " +
s"not example( holdsAt($fluent,Te) ), not marked(I, terminatedAt($fluent,Ts) ), $typePreds }."
val initRulesToStr = List(initScoringRule1, initScoringRule2, initScoringRule3).mkString("\n")
val termRulesToStr = List(termScoringRule1, termScoringRule2, termScoringRule3, termScoringRule4).mkString("\n")
(initRulesToStr, termRulesToStr)
} else { // No Event Calculus
val targetPred = modehs.head.varbed
val varNamesTostr = targetPred.getVars.map(x => x.name).mkString(",")
val tpsScoringRule = s"tps(I, X) :- rule(I), X = #count {$varNamesTostr: example(${targetPred.tostring}), marked(I, ${targetPred.tostring}) }."
val fpsScoringRule = s"fps(I, X) :- rule(I), X = #count {$varNamesTostr: not example(${targetPred.tostring}), marked(I, ${targetPred.tostring}) }."
val fnsScoringRule = s"fns(I, X) :- rule(I), X = #count {$varNamesTostr: example(${targetPred.tostring}), not marked(I, ${targetPred.tostring}) }."
val scoringRules = List(tpsScoringRule, fpsScoringRule, fnsScoringRule).mkString("\n")
(scoringRules, scoringRules)
}
}
}
| 7,198 | 49.697183 | 153 | scala |
OLED | OLED-master/src/main/scala/app/runutils/CMDArgs.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
import com.mongodb.casbah.MongoClient
import com.typesafe.scalalogging.LazyLogging
/**
* Created by nkatz on 6/20/17.
*/
/*
*
* To Add a new CMD
*
* */
object CMDArgs extends LazyLogging {
private val map = scala.collection.mutable.Map[String, String]()
def getOLEDInputArgs(args: Array[String]) = {
val split = args map { x => val z = x.replaceAll("\\s", "").split("="); (z(0), z(1)) }
def getMatchingArgumentValue(argname: String): Any = {
val arg = arguments.find(x => x.name == argname).getOrElse(throw new RuntimeException("Argument not found."))
val value = arg.valueType match {
case "String" => split.find(x => x._1 == arg.name).getOrElse(("", arg.default))._2.toString
case "Int" => split.find(x => x._1 == arg.name).getOrElse(("", arg.default))._2.toInt
case "Double" => split.find(x => x._1 == arg.name).getOrElse(("", arg.default))._2.toDouble
case "Boolean" => split.find(x => x._1 == arg.name).getOrElse(("", arg.default))._2.toBoolean
case _ => throw new RuntimeException("Don't know what to do with these arguments...")
}
map += argname -> value.toString
value
}
val evaluate_existing = getMatchingArgumentValue("--evalth")
map += "--evalth" -> evaluate_existing.toString
val with_jep = getMatchingArgumentValue("--wjep")
val entryPath = getMatchingArgumentValue("--inpath")
val delta = getMatchingArgumentValue("--delta")
val pruningThreshold = getMatchingArgumentValue("--prune")
val minSeenExmpls = getMatchingArgumentValue("--minseen")
val specializationDepth = getMatchingArgumentValue("--spdepth")
val breakTiesThreshold = getMatchingArgumentValue("--ties")
val repeatFor = getMatchingArgumentValue("--repfor")
val chunkSize = getMatchingArgumentValue("--chunksize")
val onlinePruning = getMatchingArgumentValue("--onlineprune")
val withPostPruning = getMatchingArgumentValue("--postprune")
val tryMoreRules = getMatchingArgumentValue("--try-more-rules")
val targetConcept = getMatchingArgumentValue("--target")
val trainSetNum = getMatchingArgumentValue("--trainset")
val randomOrder = getMatchingArgumentValue("--randorder")
val scoringFun = getMatchingArgumentValue("--scorefun")
val minEvaluatedOn = getMatchingArgumentValue("--eval-atleast-on")
val cores = getMatchingArgumentValue("--coresnum")
val compress_new_rules = getMatchingArgumentValue("--compress-new-rules")
val mintps = getMatchingArgumentValue("--min-pos-covered")
val processBatchBeforeMailBox = getMatchingArgumentValue("--mailbox-check")
val shuffleData = getMatchingArgumentValue("--shuffle-data")
val showRefs = getMatchingArgumentValue("--showrefs")
val pruneAfter = getMatchingArgumentValue("--prune-after")
val mongoCol = getMatchingArgumentValue("--mongo-collection")
val dataLimit = getMatchingArgumentValue("--data-limit")
val tpWeight = getMatchingArgumentValue("--tps-weight")
val fpWeight = getMatchingArgumentValue("--fps-weight")
val fnWeight = getMatchingArgumentValue("--fns-weight")
val withInertia = getMatchingArgumentValue("--with-inertia")
val weightLearn = getMatchingArgumentValue("--weight-learning")
val mlnWeightThreshold = getMatchingArgumentValue("--mln-weight-at-least")
val parallelClauseEval = getMatchingArgumentValue("--parallel-clause-eval")
val adagradDelta = getMatchingArgumentValue("--ada-delta")
val adaLearnRate = getMatchingArgumentValue("--ada-learn-rate")
val adaRegularization = getMatchingArgumentValue("--ada-regularization")
val adaLossFunction = getMatchingArgumentValue("--ada-loss-function")
val withEventCalculus = getMatchingArgumentValue("--with-ec")
val showStats = getMatchingArgumentValue("--show-stats")
val saveTheoryTo = getMatchingArgumentValue("--saveto")
val holdout = getMatchingArgumentValue("--holdout")
val prequential = getMatchingArgumentValue("--prequential")
val train = getMatchingArgumentValue("--train")
val test = getMatchingArgumentValue("--test")
val selfTraining = getMatchingArgumentValue("--selftrain")
val preprune = getMatchingArgumentValue("--preprune")
//-------------
// Global sets:
//-------------
Globals.glvalues("with-jep") = with_jep.toString
Globals.glvalues("specializationDepth") = specializationDepth.toString
Globals.scoringFunction = scoringFun.toString
Globals.glvalues("tp-weight") = tpWeight.toString
Globals.glvalues("fp-weight") = fpWeight.toString
Globals.glvalues("fn-weight") = fnWeight.toString
Globals.glvalues("with-inertia") = withInertia.toString
Globals.glvalues("weight-learning") = weightLearn.toString
Globals.glvalues("with-ec") = withEventCalculus.toString
// Define this here so that all values in Globals.glvalues be already set.
val globals = new Globals(entryPath.toString)
// show the params:
logger.info(s"\nRunning with options:\n${map.map{ case (k, v) => s"$k=$v" }.mkString(" ")}\n")
val inps = new RunningOptions(entryPath.toString, delta.toString.toDouble, pruningThreshold.toString.toDouble,
minSeenExmpls.toString.toInt, specializationDepth.toString.toInt, breakTiesThreshold.toString.toDouble,
repeatFor.toString.toInt, chunkSize.toString.toInt, processBatchBeforeMailBox.toString.toInt,
onlinePruning.toString.toBoolean, withPostPruning.toString.toBoolean, targetConcept.toString,
compress_new_rules.toString.toBoolean, mintps.toString.toInt, tryMoreRules.toString.toBoolean,
trainSetNum.toString.toInt, randomOrder.toString.toBoolean, scoringFun.toString, with_jep.toString.toBoolean,
evaluate_existing.toString, train.toString, globals, minEvaluatedOn.toString.toInt, cores.toString.toInt,
shuffleData.toString.toBoolean, showRefs.toString.toBoolean, pruneAfter.toString.toInt, mongoCol.toString,
dataLimit.toString.toInt, tpWeight.toString.toInt, fpWeight.toString.toInt, fnWeight.toString.toInt,
withInertia.toString.toBoolean, weightLearn.toString.toBoolean, mlnWeightThreshold.toString.toDouble,
parallelClauseEval.toString.toBoolean, adagradDelta.toString.toDouble, adaLearnRate.toString.toDouble,
adaRegularization.toString.toDouble, adaLossFunction.toString, withEventCalculus.toString.toBoolean,
showStats.toString.toBoolean, saveTheoryTo.toString, holdout.toString.toInt, prequential.toString.toBoolean,
test.toString, selfTraining.toString.toBoolean, preprune.toString.toDouble)
if (inps.train == "None") {
if (inps.evalth == "None") {
logger.error("No training set provided. Re-run with --train=<db name or path to training set file>.")
System.exit(-1)
} else {
checkData(inps.test, inps.mongoCollection, "test")
}
} else {
checkData(inps.train, inps.mongoCollection, "train")
}
//if (inps.test != "None") {
// checkData(inps.test, inps.mongoCollection, "test")
//}
if (inps.entryPath == "None") {
logger.error("No background knowledge provided. At least a mode declarations file is necessary. Re-run with --inpath=<path to background knowledge.>")
System.exit(-1)
}
inps
}
val arguments = Vector(
Arg(name = "--inpath", valueType = "String", text = "The path to the background knowledge files.", default = "None"),
Arg(name = "--delta", valueType = "Double", text = "Delta for the Hoeffding test.", default = "0.05"),
Arg(name = "--evalth", valueType = "String", text = "If true a hand-crafted theory in a file whose path is given by this parameter is evaluated (no learning).", default = "None"),
Arg(name = "--wjep", valueType = "Boolean", text = "If true the ASP solver is called via the java-embedded-python (jep) interface.", default = "false"),
Arg(name = "--prune", valueType = "Double", text = "Pruning threshold. Clauses with a lower score are removed. Set to 0.0 to disable pruning.", default = "0.0"),
Arg(name = "--minseen", valueType = "Int", text = "Minimum number of examples to evaluate on before breaking ties.", default = "1000"),
Arg(name = "--spdepth", valueType = "Int", text = "Specialization depth. All specializations of a rule up to this length are tried simultaneously.", default = "1"),
Arg(name = "--ties", valueType = "Double", text = "Tie-breaking threshold.", default = "0.05"),
Arg(name = "--repfor", valueType = "Int", text = "Re-see the data this-many times. ", default = "1"),
Arg(name = "--chunksize", valueType = "Int", text = "Mini-batch size. ", default = "1"),
Arg(name = "--onlineprune", valueType = "Boolean", text = "If true bad rules are pruned in an online fashion.", default = "false"),
Arg(name = "--postprune", valueType = "Boolean", text = "If true bad rules are pruned after learning terminates.", default = "true"),
Arg(name = "--try-more-rules", valueType = "Boolean", text = "If true, a larger number of rules will be generated.", default = "false"),
Arg(name = "--target", valueType = "String", text = "The target concept. This is used in case the training data contain more than one target concept", default = "None"),
Arg(name = "--trainset", valueType = "Int", text = "Number of training-testing set pair (this is used in a cross-validation setting).", default = "1"),
Arg(name = "--randorder", valueType = "Boolean", text = "If true the training data are given in random order.", default = "true"),
Arg(name = "--scorefun", valueType = "String", text = "Specify a scoring function. Available values are 'default' (uses precision-recall), 'foilgain', 'fscore'.", default = "default"),
Arg(name = "--eval-atleast-on", valueType = "Int", text = "Minimum number of examples on which a rule must be evaluated in order to be included in an output theory.", default = "1000"),
Arg(name = "--coresnum", valueType = "Int", text = "Number of cores. This is used by the distributed version.", default = "1"),
Arg(name = "--compress-new-rules", valueType = "Boolean", text = "If true new rules originating from bottom clauses that have already been generated previously are dropped.", default = "true"),
Arg(name = "--min-pos-covered", valueType = "Int", text = "Require of a rule to cover a minimum number of positives (set to zero to ignore).", default = "0"),
Arg(name = "--mailbox-check", valueType = "Int", text = "Number of mini batches to check before returning to idle state to check mailbox (for the distributed version).", default = "1"),
Arg(name = "--shuffle-data", valueType = "Boolean", text = "If true the data are shuffled each time they are seen (used for order effects).", default = "false"),
Arg(name = "--showrefs", valueType = "Boolean", text = "If true all candidate refinements are printed out during learning.", default = "false"),
Arg(name = "--prune-after", valueType = "Int", text = "Minimum number of examples after which a bad rule may be pruned.", default = "100000"),
Arg(name = "--mongo-collection", valueType = "String", text = "A mongo collection containing the data.", default = "examples"),
Arg(name = "--data-limit", valueType = "Int", text = "Fetch that-many data from the db to learn from (default is max integer).", default = s"${Double.PositiveInfinity.toInt}"),
Arg(name = "--tps-weight", valueType = "Int", text = "Weight on true positive instances.", default = "1"),
Arg(name = "--fps-weight", valueType = "Int", text = "Weight on false positive instances.", default = "1"),
Arg(name = "--fns-weight", valueType = "Int", text = "Weight on false negative instances.", default = "10"),
Arg(name = "--with-inertia", valueType = "Boolean", text = "If true learns with inertia from edge interval points only.", default = "false"),
Arg(name = "--weight-learning", valueType = "Boolean", text = "If true use AdaGrad to learn weighted clauses.", default = "false"),
Arg(name = "--mln-weight-at-least", valueType = "Double", text = "Remove rules with mln-weight lower than this.", default = "0.1"),
Arg(name = "--parallel-clause-eval", valueType = "Boolean", text = "Evaluate clauses in parallel during weight learning.", default = "true"),
Arg(name = "--ada-delta", valueType = "Double", text = "Delta parameter for AdaGrad (weight learning).", default = "1.0"),
Arg(name = "--ada-learn-rate", valueType = "Double", text = "Learning rate parameter (eta) for AdaGrad (weight learning).", default = "1.0"),
Arg(name = "--ada-regularization", valueType = "Double", text = "Regularization parameter (lambda) for AdaGrad (weight learning).", default = "0.01"),
Arg(name = "--ada-loss-function", valueType = "String", text = "Loss function for AdaGrad. Either 'default' (for predictive loss) or 'custom'", default = "default"),
Arg(name = "--with-ec", valueType = "Boolean", text = "Learning using the Event Calculus in the Background knowledge.", default = "true"),
Arg(name = "--show-stats", valueType = "Boolean", text = "If true performance stats are printed out.", default = "false"),
Arg(name = "--saveto", valueType = "String", text = "Path to a file to wtite the learnt theory to.", default = ""),
Arg(name = "--holdout", valueType = "Int", text = "Perform holdout evaluation on a test set every <Int> time points. Omit if --holdout=0", default = "0"),
Arg(name = "--prequential", valueType = "Boolean", text = "If true perform prequential evaluation on every incoming data batch.", default = "true"),
Arg(name = "--train", valueType = "String", text = "Training set location. May either by a path to a file or a mongodb name", default = "None"),
Arg(name = "--test", valueType = "String", text = "Testing set location. May either by a path to a file or a mongodb name", default = "None"),
Arg(name = "--selftrain", valueType = "Boolean", text = "If true performs simple self-training from unlabeled data (experimental).", default = "false"),
Arg(name = "--preprune", valueType = "Double", text = "Do not specialize a rule if its score is greater than this threshold.", default = "1.0")
)
//--carry-last-inferred
def checkData(dataInput: String, collection: String, trainOrTest: String) = {
val msg = if (trainOrTest == "train") "train" else "test"
// Check if it's a file
val fileExists = new java.io.File(dataInput).exists
if (!fileExists) {
// check if it's a db
val dbok = checkDB(dataInput, collection)
if (!dbok) {
logger.error(s"Running with --$msg=$dataInput but that's neither a database nor a file")
System.exit(-1)
}
}
}
def splitString(s: String, l: Int, chunks: Vector[String]): Vector[String] = {
s.length > l match {
case true =>
val first = s.splitAt(l)
splitString(first._2, l, chunks :+ first._1)
case _ => chunks :+ s
}
}
def helpMesg = {
val msg = (x: Arg) => s"${x.name}=<${x.valueType}> | default=<${x.default}>"
val maxLength = arguments.map(x => msg(x).length).max
val thisLength = (x: Arg) => msg(x).length
val message = (x: Arg) => s" ${msg(x)} ${" " * (maxLength - thisLength(x))} : ${x.text}"
//val message = (x: Arg) => s" ${msg(x)} ${" " * (maxLength - thisLength(x))} : ${splitString(x.text, 30, Vector[String]())}"
(List("\nOLED options:\n") ++ arguments.map(x => message(x))).mkString("\n")
}
/*Checks if mandatory arguments are in place. Returns (msg, false) if they are not else ("", true)*/
def argsOk(args: Array[String]): (Boolean, String) = {
if (args.isEmpty) {
(false, "Missing options. Run with --help.")
} else if (args.exists(x => x.contains("--help"))) {
(false, helpMesg)
} else if (!args.exists(x => x.contains("--inpath"))) {
(false, "A mandatory option is missing (e.g. path to bk/mode declarations files or the name of a database with training examples)." +
" Re-run with --help to see options")
} else {
(true, "")
}
}
/* If this returns false either the db does not exist or it is empty. */
def checkDB(dbName: String, colName: String) = {
val mongoClient = MongoClient()
val exists = mongoClient.databaseNames().toSet.contains(dbName)
if (!exists) {
logger.error(s"Database $dbName does not exist")
false
} else {
val collection = mongoClient(dbName)(colName)
val nonEmpty = collection.nonEmpty
mongoClient.close()
if (!nonEmpty) {
logger.error(s"Database $dbName is empty.")
}
nonEmpty
}
}
}
case class Arg(name: String, valueType: String, text: String, default: String)
class RunningOptions(
val entryPath: String,
val delta: Double,
val pruneThreshold: Double,
val minSeenExmpls: Int,
val specializationDepth: Int,
val breakTiesThreshold: Double,
val repeatFor: Int,
val chunkSize: Int,
val processBatchBeforeMailBox: Int,
val onlinePruning: Boolean,
val withPostPruning: Boolean,
val targetHLE: String,
val compressNewRules: Boolean,
val minTpsRequired: Int,
val tryMoreRules: Boolean,
val trainSetNum: Int,
val randomOrder: Boolean,
val scoringFun: String,
val wjep: Boolean,
val evalth: String,
val train: String,
val globals: Globals,
val minEvalOn: Int,
val cores: Int,
val shuffleData: Boolean,
val showRefs: Boolean,
val pruneAfter: Int,
val mongoCollection: String,
val dataLimit: Int,
val tpWeight: Int,
val fpWeight: Int,
val fnWeight: Int,
val withInertia: Boolean,
val weightLean: Boolean,
val mlnWeightThreshold: Double,
val parallelClauseEval: Boolean,
val adaGradDelta: Double,
val adaLearnRate: Double,
val adaRegularization: Double,
val adaLossFunction: String,
val withEventCalculs: Boolean,
val showStats: Boolean,
val saveTheoryTo: String,
val holdout: Int,
val prequential: Boolean,
val test: String,
val selfTraining: Boolean,
val preprune: Double)
| 19,405 | 57.451807 | 202 | scala |
OLED | OLED-master/src/main/scala/app/runutils/Debug.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
object Debug {
/*
*
* Collect total timings for certain operations
*
* */
var newRulesTime: Double = 0
var mapInferenceTime: Double = 0
var adgradTime: Double = 0
var groundNetworkTime: Double = 0
var expandRulesTime: Double = 0
var totalILPSolverTime: Double = 0
}
| 1,009 | 27.055556 | 72 | scala |
OLED | OLED-master/src/main/scala/app/runutils/Globals.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
import java.io.PrintWriter
import logic.Modes.ModeAtom
import logic.{AtomSignature, Clause, Literal, Theory, Variable}
import utils.lookaheads._
import utils.parsers.ModesParser
import BKHandling._
import com.typesafe.scalalogging.LazyLogging
import woled.State
import scala.io.Source
import scala.util.matching.Regex
/**
* Created by nkatz on 9/13/16.
*/
object Globals {
def apply(): Unit = {
new Globals("")
}
//var hedgePredictionThreshold = 0.0 // Quick & dirty, for experiments
var sleepingExpertsLearningRate = 0.0 // Quick & dirty, for experiments
var sleepingExpertsFeedBackBias = 0.0
var hedgeInertia = false
var timeDebug = List[Double]()
var scoringFunction = "default" // precision for initiation, recall for termination
var totalPos = 0
var totalNegs = 0
// This may be set to a different value (e.g. passed from cmd) during the construction of the Globals instance
var MAX_CLAUSE_LENGTH = 15
var LEARNING_WHOLE_THEORIES = false // not really used anywhere
val cwd: String = System.getProperty("user.dir") // Current working dir
val ASPHandler = s"$cwd/asp/ASPHandler.py"
/* Global names */
val FIND_ALL_REFMS = "findAllRefs"
val ABDUCTION = "abduction"
val DEDUCTION = "deduction"
val GET_QUERIES = "getQueries"
val GET_GROUNDINGS = "getGroundings"
val XHAIL = "xhail"
val CHECKSAT = "checksat"
val ILED = "iled"
val INFERENCE = "inference"
val SEARCH_MODELS = "search_models" //used to search alternative abductive explanations with iterative abductive search
val SCORE_RULES = "score_rules"
val GROW_NEW_RULE_TEST = "grow_new_rule_test"
// These values may be set during the construction of the Globals instance
var glvalues =
scala.collection.mutable.Map[String, String](
"cwa" -> "true",
"iter-deepening" -> "false",
"mode" -> "incremental",
"perfect-fit" -> "true",
"iterations" -> "1", //"1000",
"variableDepth" -> "1",
"withWeaks" -> "false",
"withBacktracking" -> "true",
"refinementSearch" -> "setCover", // either setCover or fullSearch
// specializeOnly does not generate new kernel sets on new examples,
// it only tries to refine an initial hypothesis based
// on an initial kernel set, acquired from the first window
"specializeOnly" -> "false",
"compressKernels" -> "true",
"ruleEvaluationFunction" -> "precision", //"mestimate"//"precision"
// specializationDepth is used by OLED only. It specifies how "deep" in the
// specialization lattice of a bottom clause we want to search. For instance
// with specializationDepth=2, OLED generates candidate refinements of a clause
// by using the 1-subsets and the 2-subsets of the corresponding bottom clause.
// with specializationDepth=2 it uses 1-subsets, 2-subsets and 3-subsets and so on
"specializationDepth" -> "1",
// if OLEDdownscoreBySimilarity is true then OLED penalizes candidate clauses
// that are too similar to existing clauses in the current hypothesis,
// to allow for exploring the quality of different clauses (that may be never
// selected, because of a tie in score with other clauses)
"OLEDdownscoreBySimilarity" -> "true",
"distributed" -> "false",
"with-jep" -> "false",
"domain" -> "any",
// Use this to get non-empty revisions at any point. This is necessary
// because there are cases where the model corresponding to an empty
// theory may have lower cost (in the optimization) than a model that
// corresponds to a theory (e.g. when including any initiation rule in the theory yields
// many fps). In such cases the solver will opt for an empty theory, which is not
// always desirable. This parameter is used by the MCTS version of OLED.
"smallest-nonempty" -> "false",
// Weights on examples
"tp-weight" -> "1",
"fp-weight" -> "1",
"fn-weight" -> "1",
"with-inertia" -> "false",
"weight-learning" -> "false",
"with-ec" -> "true"
)
// if jep is used "UNSAT" else "UNSATISFIABLE"
def UNSAT = if (glvalues("with-jep").toBoolean) "UNSAT" else "UNSATISFIABLE"
// This is a storage of the current initiation/termination
// parts of the theory. These fields are used by the monolithic version
// of OLED only, when learning with inertia (from edge interval points)
// to get the joint theory and see if it satisfies each new example. Abduction
// and new clauses are generated if not.
//--------------------------------------------------------------------------------------
// UPDATE: Testing for new clause generation using the satisfiability of the
// current joint theory works, for srongly-initiated fluents with no or very
// small amount of noise, but it takes a lot of time in large learning tasks.
// The reason is that the joint theory is unsatisfiable in most cases, since it
// contains over-general rules that erroneously re-initiate or terminate a target
// fluent. This means that abduction and new kernel set generation takes place
// almost always, in every new mini-batch, which causes great delays in the execution.
// For this to work we'd need a more ILED-style apporach, where clauses are not scored,
// but corrected at every new mistake. In the absence on noise this makes the joint
// theory to quickly converge to the correct one. On the other hand, if there is a
// substantial amount of noise in the data, therefore the edge interval points are
// frequently corrupted, there is no hope to learn strongly-initiated fluents, so there
// is no point discussing it or trying to fix it with simple modifications in the BK.
//--------------------------------------------------------------------------------------
var CURRENT_THEORY_INITIATED: Vector[Clause] = Vector[Clause]()
var CURRENT_THEORY_TERMINATED: Vector[Clause] = Vector[Clause]()
def getCurrentJointTheory() = {
Theory((CURRENT_THEORY_INITIATED ++ CURRENT_THEORY_TERMINATED).toList)
}
//var errorProb = Vector.empty[Int]
}
class Globals(val entryPath: String) extends LazyLogging {
/*
* Global values and utils.
*/
val state = new State
val cwd: String = System.getProperty("user.dir") // Current working dir
val inputPath: String = entryPath // Path to bk and modes files
val modesFile: String = s"$inputPath/modes" // Mode Declarations file
//val AUXILIARY_PREDS = "auxiliaryPredicates"
val BK_INITIATED_ONLY = s"$inputPath/bk-initiated-only.lp"
val BK_TERMINATED_ONLY = s"$inputPath/bk-terminated-only.lp"
val ABDUCE_WITH_INERTIA = s"$inputPath/abduce-with-inertia.lp"
val INITIATED_ONLY_INERTIA = s"$inputPath/initiated-only-with-inertia.lp"
val BK_INITIATED_ONLY_MARKDED = s"$inputPath/bk-score-initiated.lp" // BK for scoring initiation rules
val BK_TERMINATED_ONLY_MARKDED = s"$inputPath/bk-score-terminated.lp" // BK for scoring termination rules
val BK_RULE_SCORING_MARKDED = s"$inputPath/bk-score.lp" // BK for rule scoring when learning without the EC.
val USER_BK = s"$inputPath/bk"
val BK_WHOLE_EC = s"$inputPath/bk.lp"
val BK_WHOLE = s"$inputPath/bk.lp" // for learning without the EC, no practical difference
val BK_CROSSVAL = s"$inputPath/bk-for-crossval.lp"
val ILED_NO_INERTIA: String = inputPath + "/bk-no-inertia.lp"
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
val modesParser = new ModesParser
val MODES: List[String] = Source.fromFile(modesFile).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val MODEHS: List[ModeAtom] = MODES.filter(m => m.contains("modeh") && !m.startsWith("%")).map(x => x).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.modeh, x)))
if (MODEHS.isEmpty) logger.error("No head mode declarations found.")
val MODEBS: List[ModeAtom] = MODES.filter(m => m.contains("modeb") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.modeb, x)))
if (MODEBS.isEmpty) logger.error("No body mode declarations found.")
/* The input to this method is a Literal representation of mode atoms and example pattern atoms (variabilized). */
def getTypeAxioms(m: Literal): Set[String] = {
val plmrkTerms = m.placeMarkers
val (posPlmrkTerms, negPlrmTerms, grndPlmrkTerms) = (plmrkTerms._1, plmrkTerms._2, plmrkTerms._3)
val allPlmrks = (posPlmrkTerms ++ negPlrmTerms ++ grndPlmrkTerms).map(x => x.asInstanceOf[Variable]).toSet
allPlmrks.foldLeft(Set[String]()) { (accum, y) =>
val allOtherPlmrks = allPlmrks diff Set(y)
if (y.inOrOutVar == "+" || y.inOrOutVar == "-") {
val result_ = s"${y._type}(${{ y.name }}) :- ${m.tostring}."
// the regex below matches variable symbols which do not appear in predicate of function
// names. So it will match X0 in p(X0) but not in pX0(X0), pxX0(X0), pX_0(X0), p_2X0(X0) and so on
val result = allOtherPlmrks.foldLeft(result_) { (x1, y1) => x1.replaceAll(s"(?<![a-zA-Z0-9_]+)${y1.name}", "_") }
accum + result
} else {
accum
}
}
}
// Example patterns as a list[ModeAtom] (helper)
val eps1: List[ModeAtom] =
MODES.filter(m => m.contains("examplePattern") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.exmplPattern, x)))
val eps2: List[ModeAtom] = eps1 match {
case List() => MODEHS // if no example patterns are found, use the head mode declarations for them
case _ => eps1
}
// Auxiliary predicates. These are input predicates which are not part of the target language
// but are necessary for extracting the types of entities in the domain (e.g. think of coords/4 in CAVIAR).
private val inputPreds: List[ModeAtom] = {
MODES.filter(m => m.contains("inputPredicate") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.inputPred, x)))
}
if (inputPreds.exists(p => p.isNAF)) {
logger.error(s"NAF is not allowed in input predicates.")
System.exit(-1)
}
// This method generates types axioms for the mode declarations,
// i.e. rules of the form: time(X1) :- happensAt(active(_),X1).
private val typeAxioms = {
val m = inputPreds.filter(x => !x.isNAF).map(x => x.varbed)
val x = m.flatMap(getTypeAxioms).toSet
//x foreach println
x
}
/*
* Comparison predicates compare numerical values to a threshold, e.g:
*
* close(p1, p2, 30, 10)
*
* meaning that the Euclidean distance of p1, p2 at time 10 is less than 30.
*
* Comparison predicates may be declared in the modes file like this:
*
* comparisonPredicate(close(+person,+person,#numvalue,+time), lessThan, comparison_term_position(3))
*
* The #numvalue placemarker indicates the position of the actual numerical threshold
* while the 'lessThan' term (can also be 'greaterThan') declares the intended "semantics"
* of the predicate. Note that numvalue has to be the type of this term in the corresponding body declaration. The
* comparison_term_position(3) indicates the position of the comparison term in the atom. In folded atoms the whole
* "path" to this term needs to be specified e.g.
*
* comparisonPredicate(far(+person,+person,test(+person, p(#threshold_value)),+time), greaterThan, comparison_term_position(3,2,1))
*
* Here to find the comparison term take atom.terms(3).terms(2).terms(1). See also the method getComparisonTerm
* in the Modes class and the getComparisonTerm in the Literal class.
*
* Comparison predicate declarations are used internally to allow for two tasks that simplify the learning process:
*
* 1. Reduce clauses: When a comparison predicate in the lessThan semantics and with numvalue1 is added to a rule,
* then any other similar predicate with numvalue2 such that numvalue2 > numvalue1 is removed from the rule.
* Rules with comparison predicate in the greaterThan semantics are reduced accordingly.
* 2. When generating candidate specializations, rules that consist of comparison predicates only (e.g. close/4
* predicates only) are omitted.
* */
val comparisonPredicates: List[ModeAtom] = {
MODES.filter(m => m.contains("comparisonPredicate") && !m.startsWith("%")).
map(x => modesParser.getParseResult(modesParser.parseModes(modesParser.compPred, x)))
}
MODEBS foreach { m =>
val x = comparisonPredicates.find(z => z == m).getOrElse(ModeAtom())
if (x != ModeAtom()) {
m.compRelation = x.compRelation
m.comparisonTermPosition = x.comparisonTermPosition
}
}
val headAtomSignatures: List[AtomSignature] = {
MODEHS.map(x => new AtomSignature(x.functor, x.arity))
}
val bodyAtomSignatures: List[AtomSignature] = {
MODEBS.map(x => new AtomSignature(x.functor, x.arity))
}
/* Reads the background knowledge from $inputPath/bk.lp and produces helper files (e.g. for rule evaluation,
bottom clause generation etc.) */
def generateBKFiles_Event_Calculus() = {
//private val PY_LESSTHAN =
// "#script (python)\nfrom gringo import Fun\nimport math\n\ndef less_than(x,y):\n return float(x) < float(y)\n\n#end."
val EC_AXIOM_1 = "holdsAt(F,Te) :- fluent(F), not sdFluent(F), initiatedAt(F,Ts), next(Ts, Te)."
val EC_AXIOM_2 = "holdsAt(F,Te) :- fluent(F), not sdFluent(F), holdsAt(F,Ts), " +
"not terminatedAt(F,Ts), next(Ts, Te)."
//private val RIGHT_BEFORE_DEF = "right_before(X,Z) :- time(X), time(Z), Z = X+40."
///*
/*val RIGHT_BEFORE_DEF ="\n#script (python)\ntimes = []\ndef collect_all(a):\n times.append(a)\n " +
"return 1\ndef sorted():\n times.sort()\n return zip(range(len(times)), times)\n#end.\ncollect_all." +
"\ncollect_all :- time(X), @collect_all(X) == 0.\nsorted_pair(X,N) :- collect_all, " +
"(X,N) = @sorted().\nnext(X, Y) :- sorted_pair(A,X), sorted_pair(A+1,Y).\n"*/
val RIGHT_BEFORE_DEF =
"""
|#script (python)
|times = []
|def collect_all(a):
| times.append(a)
| return 1
|def sorted():
| times.sort()
| return zip(range(len(times)), times)
|def end_time():
| times.sort()
| return times[-1]
|def start_time():
| times.sort()
| return times[0]
|#end.
|collect_all.
|collect_all :- time(X), @collect_all(X) == 0.
|sorted_pair(X,N) :- collect_all, (X,N) = @sorted().
|next(X, Y) :- sorted_pair(A,X), sorted_pair(A+1,Y).
|start_end :- collect_all.
|start_end(X,Y) :- start_end, X = @start_time(), Y = @end_time().
|%endTime(X) :- X = @end_time().
|startTime(X) :- X = @start_time().
|""".stripMargin
//*/
val INIT_TIME_DEF = "initialTime(X) :- time(X), #false : X > Y, time(Y)."
val INIT_HOLDS_DEF = "%THIS SHOULD NOT BE HERE!\nholdsAt(F,T) :- initialTime(T), example(holdsAt(F,T))."
val CORE_EVENT_CALCULUS_BK = List(EC_AXIOM_1, EC_AXIOM_2, RIGHT_BEFORE_DEF, INIT_TIME_DEF, INIT_HOLDS_DEF)
val CROSSVAL_EVENT_CALCULUS_BK = List(EC_AXIOM_1, EC_AXIOM_2, RIGHT_BEFORE_DEF)
val INITIATED_ONLY_EVENT_CALCULUS_BK = List(EC_AXIOM_1, RIGHT_BEFORE_DEF, INIT_TIME_DEF, INIT_HOLDS_DEF)
val TERMINATED_ONLY_EVENT_CALCULUS_BK =
List(EC_AXIOM_1, EC_AXIOM_2, RIGHT_BEFORE_DEF, INIT_TIME_DEF, INIT_HOLDS_DEF,
"holdsAt(F,T) :- fluent(F), not sdFluent(F), examplesInitialTime(T), example(holdsAt(F,T)).",
"examplesInitialTime(X) :- example(holdsAt(_,X)), #false : X > Y, example(holdsAt(_,Y)).")
// Read the user-input BK
val userBK = Source.fromFile(USER_BK).getLines.toList.mkString("\n")
// Generate the ASP scoring rules:
val scoringRules = generateScoringBK(MODEHS)
// Type axioms:
val tas = this.typeAxioms.mkString("\n")
// Generate bk.lp file (it will be used for reasoning)
val bkFile = new java.io.File(BK_WHOLE_EC)
val pw1 = new PrintWriter(bkFile)
pw1.write(userBK + "\n")
pw1.write(CORE_EVENT_CALCULUS_BK.mkString("\n"))
pw1.write("\n" + tas)
pw1.close()
bkFile.deleteOnExit()
// Generate initiation-only BK file
val initOnlyBKFile = new java.io.File(BK_INITIATED_ONLY)
val pw2 = new PrintWriter(initOnlyBKFile)
pw2.write(userBK + "\n")
pw2.write(INITIATED_ONLY_EVENT_CALCULUS_BK.mkString("\n"))
pw2.write("\n" + tas)
pw2.close()
initOnlyBKFile.deleteOnExit()
// Generate termination-only BK file
val termOnlyBKFile = new java.io.File(BK_TERMINATED_ONLY)
val pw3 = new PrintWriter(termOnlyBKFile)
pw3.write(userBK + "\n")
pw3.write(TERMINATED_ONLY_EVENT_CALCULUS_BK.mkString("\n"))
pw3.write("\n" + tas)
pw3.close()
termOnlyBKFile.deleteOnExit()
// Generate initiation-scoring rules
val scoreInitFile = new java.io.File(BK_INITIATED_ONLY_MARKDED)
val pw4 = new PrintWriter(scoreInitFile)
pw4.write(userBK + "\n")
pw4.write("\n" + scoringRules._1 + "\n" + RIGHT_BEFORE_DEF + "\n")
pw4.write("\n" + tas)
pw4.close()
scoreInitFile.deleteOnExit()
// Generate termination-scoring rules
val scoreTermFile = new java.io.File(BK_TERMINATED_ONLY_MARKDED)
val pw5 = new PrintWriter(scoreTermFile)
pw5.write(userBK + "\n")
pw5.write("\n" + scoringRules._2 + "\n" + RIGHT_BEFORE_DEF + "\n")
pw5.write("\n" + tas)
pw5.close()
scoreTermFile.deleteOnExit()
// Generate cross-validation file
val crossValFile = new java.io.File(BK_CROSSVAL)
val pw6 = new PrintWriter(crossValFile)
pw6.write(userBK + "\n")
pw6.write(CROSSVAL_EVENT_CALCULUS_BK.mkString("\n"))
pw6.write("\n" + tas)
pw6.close()
crossValFile.deleteOnExit()
}
def generateBKFiles_No_Event_Calculus() = {
// Read the user-input BK
val userBK = Source.fromFile(USER_BK).getLines.toList.mkString("\n")
// Generate the ASP scoring rules:
val scoringRules = generateScoringBK(MODEHS)
// Type axioms:
val tas = this.typeAxioms.mkString("\n")
// Generate bk.lp file (it will be used for reasoning)
val bkFile = new java.io.File(BK_WHOLE)
val pw1 = new PrintWriter(bkFile)
pw1.write(userBK + "\n")
pw1.write("\n" + tas)
pw1.close()
bkFile.deleteOnExit()
// Generate BK file for rule scoring
val scoreTermFile = new java.io.File(BK_RULE_SCORING_MARKDED)
val pw5 = new PrintWriter(scoreTermFile)
pw5.write(userBK + "\n")
pw5.write("\n" + scoringRules._2 + "\n")
pw5.write("\n" + tas)
pw5.close()
scoreTermFile.deleteOnExit()
// Generate cross-validation file
val crossValFile = new java.io.File(BK_CROSSVAL)
val pw6 = new PrintWriter(crossValFile)
pw6.write(userBK + "\n")
pw6.write("\n" + tas)
pw6.close()
crossValFile.deleteOnExit()
}
if (Globals.glvalues("with-ec").toBoolean) {
generateBKFiles_Event_Calculus()
} else {
generateBKFiles_No_Event_Calculus()
}
val EXAMPLE_PATTERNS: List[Literal] = eps2 map (p => p.varbed)
val EXAMPLE_PATTERNS_AS_STRINGS: List[String] = EXAMPLE_PATTERNS map (_.tostring)
private val coverageDirectives = getCoverageDirectives(EXAMPLE_PATTERNS_AS_STRINGS)
val TPS_RULES: String = coverageDirectives._1
val FPS_RULES: String = coverageDirectives._2
val FNS_RULES: String = coverageDirectives._3
val TPS_RULES_MARKED: String = coverageDirectives._4
val FPS_RULES_MARKED: String = coverageDirectives._5
val FNS_RULES_MARKED: String = coverageDirectives._6
val CONSTRAINT_COVER_ALL_POSITIVES: String = coverageDirectives._7
val CONSTRAINT_EXCLUDE_ALL_NEGATIVES: String = coverageDirectives._8
val SHOW_TPS_ARITY_1 = "\n#show tps/1."
val SHOW_TPS_ARITY_2 = "\n#show tps/2."
val SHOW_FPS_ARITY_1 = "\n#show fps/1."
val SHOW_FPS_ARITY_2 = "\n#show fps/2."
val SHOW_FNS_ARITY_1 = "\n#show fns/1."
val SHOW_FNS_ARITY_2 = "\n#show fns/2."
val SHOW_TIME = "\n#show times/1."
val SHOW_INTERPRETATIONS_COUNT = "\n#show countGroundings/1."
val INCLUDE_BK: String => String = (file: String) => s"\n\n#include " + "\"" + file + "\".\n"
val HIDE = "\n#show.\n"
// if jep is used "UNSAT" else "UNSATISFIABLE"
def UNSAT = if (Globals.glvalues("with-jep").toBoolean) "UNSAT" else "UNSATISFIABLE"
val SAT = "SAT"
val TPS_COUNT_RULE: String = tpsCountRules(EXAMPLE_PATTERNS)
val FPS_COUNT_RULE: String = fpsCountRules(EXAMPLE_PATTERNS)
val FNS_COUNT_RULE: String = fnsCountRules(EXAMPLE_PATTERNS)
// FNs for terminated: not marked(I,exmpl), example(exmpl) (same as FNs for initiated)
// TPs for terminated: marked(I, exmpl), example(exmpl) (same as TPs for initiated)
val TIMES_COUNT_RULE = "\ntimes(X) :- X = #count { Z: time(Z) }.\n"
/*
def EXAMPLE_COUNT_RULE =
this.EXAMPLE_PATTERNS.map{ x =>
s"exampleGrounding(${x.tostring}):-${x.getTypePredicates(this).mkString(",")}.\n"+
s"countGroundings(X) :- X = #count { ${x.getVars.toList.map(_.tostring).mkString(",")}: " +
s"exampleGrounding(${x.tostring}),${x.getTypePredicates(this).mkString(",")} }."
}.mkString("\n")+"\n"
*/
/* I NEED TO FIND A WAY TO MAKE THIS GENERIC (NON- EVENT CALCULUS SPECIFIC).
* FOR EXAMPLE, THE USER COULD SPECIFY IT IN THE MODES FILE. */
/*
def EXAMPLE_COUNT_RULE = "exampleGrounding(holdsAt(F,T)):-fluent(F),time(T).\n"+
"countGroundings(X) :- X = #count { F,T: exampleGrounding(holdsAt(F,T)),fluent(F),time(T) }.\n"
*/
def EXAMPLE_COUNT_RULE = {
val targetPred = EXAMPLE_PATTERNS.head
val tpstr = targetPred.tostring
val vars = targetPred.getVars.map(x => x.name).mkString(",")
val typePreds = targetPred.getTypePredicates(this).mkString(",")
s"exampleGrounding($tpstr) :- $typePreds.\ncountGroundings(X) :- X = #count { $vars: exampleGrounding($tpstr),$typePreds }.\n"
}
/*
val LOOK_AHEADS = {
val f = Source.fromFile(modesFile).getLines.toList.filter(line => line.startsWith("lookahead"))
if (f.nonEmpty) f.map( x => new LookAheadSpecification(x) ) else Nil
}
*/
private val LOOK_AHEADS_TEST = {
val f = Source.fromFile(modesFile).getLines.toList.filter(line => line.startsWith("lookahead"))
if (f.nonEmpty) f.map(x => new LookAheadUtils.LookAheadSpecification(x)) else Nil
}
/*
def getAdditionalLanguageBias(predicateName: String) = {
val f = Source.fromFile(modesFile).getLines.toList.filter(line => line.startsWith(s"$predicateName"))
f.map(x => x.split(s"$predicateName\\(")(1).split("\\)")(0)).filter(p => (MODEHS++MODEBS).exists(q => q.functor == p))
}
val FORCE_PREDS = getAdditionalLanguageBias("force")
val BASIC_PREDS = getAdditionalLanguageBias("basic")
val AUXILIARY_PREDS = getAdditionalLanguageBias("auxiliary")
*/
var EVALUATION_FUNCTION = "precision_recall" // alternative is foil_gain
var MAX_CLAUSE_LENGTH = 15
var LEARNING_WHOLE_THEORIES = false
var TOP_THEORY_SCORE = 0.0
var TOP_THEORY_SCORE_COUNT = 0
/*
* The following are not used anywhere, they are for debugging
*/
/*
private val initHead = "initiatedAt(meeting(X0,X1),X2)"
private val initHead1 = "initiatedAt(meeting(X1,X0),X2)"
private val termHead = "terminatedAt(meeting(X0,X1),X2)"
private val termHead1 = "terminatedAt(meeting(X1,X0),X2)"
private val BCBodyLits =
List("happensAt(inactive(X1),X2)","happensAt(inactive(X0),X2)",
"happensAt(active(X1),X2)","happensAt(active(X0),X2)",
"happensAt(walking(X1),X2)","happensAt(walking(X0),X2)",
"happensAt(running(X1),X2)","happensAt(running(X0),X2)",
"happensAt(appear(X1),X2)","happensAt(appear(X0),X2)",
"happensAt(disappear(X1),X2)","happensAt(disappear(X0),X2)",
"not happensAt(disappear(X1),X2)","not happensAt(disappear(X0),X2)",
"close(X0,X1,24,X2)","close(X1,X0,24,X2)","close(X0,X1,25,X2)","close(X1,X0,25,X2)",
"close(X0,X1,30,X2)","close(X1,X0,30,X2)","close(X0,X1,34,X2)","close(X1,X0,34,X2)",
"far(X0,X1,24,X2)","far(X1,X0,24,X2)","far(X0,X1,25,X2)","far(X1,X0,25,X2)",
"far(X0,X1,30,X2)","far(X1,X0,30,X2)","far(X0,X1,34,X2)","far(X1,X0,34,X2)")
val initBC1 = {
val h = Literal.toLiteral(initHead)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
val initBC2 = {
val h = Literal.toLiteral(initHead1)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
val termBC1 = {
val h = Literal.toLiteral(termHead)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
val termBC2 = {
val h = Literal.toLiteral(termHead1)
val b = BCBodyLits map (x => Literal.toLiteral(x))
Clause(List(h) ++ b)
}
*/
}
| 25,519 | 40.294498 | 136 | scala |
OLED | OLED-master/src/main/scala/app/runutils/IOHandling.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoCollection}
import logic.Examples.Example
import utils.DataUtils.Interval
/**
* Created by nkatz on 6/28/17.
*/
object IOHandling {
// TODO
// Currently used by the maritime runner
trait InputSource
trait MongoSource extends InputSource {
def createIndex(collection: MongoCollection, sort: String = "ascending", sortKey: String = "None"): Unit = {
sortKey match {
case "None" =>
case _ =>
val i = if (sort == "ascending") 1 else -1
collection.createIndex(MongoDBObject(sortKey -> i))
}
}
def allData(collection: MongoCollection, sort: String = "ascending", sortKey: String = "None"): collection.CursorType = {
sortKey match {
case "None" => collection.find()
case _ =>
val i = if (sort == "ascending") 1 else -1
collection.find().sort(MongoDBObject(sortKey -> i))
}
}
}
// TODO
trait FileSource
/*
def getData[T <: Source](opts: T, dataFunc: (T) => Iterator[Example]) = {
dataFunc(opts)
}
def run[T <: Source](opts: T, dataFunc: (T) => Iterator[Example]): Iterator[Example] = {
dataFunc(opts)
}
//getData(new DefaultMongoDataOptions(""), getMongoData)
*/
}
| 2,029 | 28 | 125 | scala |
OLED | OLED-master/src/main/scala/app/runutils/IOUtils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package app.runutils
import com.mongodb.casbah.MongoClient
import logic.Examples.Example
/**
* Created by nkatz at 25/10/2018
*/
object IOUtils {
def main(args: Array[String]) = {
databaseToFile("caviar-train", "/home/nkatz/Desktop/caviar-train")
}
/* Helper method for dumping a mongo DB to a file OLED can read. */
def databaseToFile(dbName: String, fileName: String) = {
val mongoClient = MongoClient()
val collection = mongoClient(dbName)("examples")
collection.find().foreach{ x =>
val e = Example(x)
println(e)
}
}
/* Read data from a file */
def getDataFromFile(path: String) = {
}
}
| 1,349 | 25.470588 | 72 | scala |
OLED | OLED-master/src/main/scala/experiments/caviar/FullDatasetHoldOut.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.caviar
import akka.actor.{ActorSystem, Props}
import app.runutils.CMDArgs
import app.runutils.IOHandling.MongoSource
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import oled.mwua.Learner
object FullDatasetHoldOut extends LazyLogging {
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
logger.error(argsok._2); System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
val dataset = MeetingTrainTestSets.meeting7
val trainingDataOptions =
new MongoDataOptions(dbNames = dataset._1,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "training")
val testingDataOptions =
new MongoDataOptions(dbNames = dataset._2,
chunkSize = runningOptions.chunkSize, targetConcept = runningOptions.targetHLE, sortDbByField = "time", what = "testing")
val trainingDataFunction: MongoDataOptions => Iterator[Example] = getMongoData
val testingDataFunction: MongoDataOptions => Iterator[Example] = getMongoData
val system = ActorSystem("HoeffdingLearningSystem")
val startMsg = if (runningOptions.evalth != "None") "eval" else "start"
system.actorOf(Props(new Learner(runningOptions, trainingDataOptions, testingDataOptions, trainingDataFunction,
testingDataFunction)), name = "Learner") ! startMsg
}
}
class MongoDataOptions(val dbNames: Vector[String], val chunkSize: Int = 1,
val limit: Double = Double.PositiveInfinity.toInt,
val targetConcept: String = "None", val sortDbByField: String = "time",
val sort: String = "ascending", val what: String = "training") extends MongoSource
/* "what" is either training or testing */
def getMongoData(opts: MongoDataOptions): Iterator[Example] = {
val mc = MongoClient()
val exmplIters = opts.dbNames map { dbName =>
val collection: MongoCollection = mc(dbName)("examples")
val data = opts.allData(collection, opts.sort, opts.sortDbByField) map { x =>
val e = Example(x)
opts.targetConcept match {
case "None" => new Example(annot = e.annotation, nar = e.narrative, _time = e.time)
case _ => new Example(annot = e.annotation filter (_.contains(opts.targetConcept)), nar = e.narrative, _time = e.time)
}
}
if (opts.what == "training") {
opts.chunkSize > 1 match {
case false => data
case _ =>
data.grouped(opts.chunkSize).map { x =>
//data.sliding(opts.chunkSize).map { x =>
x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
}
}
} else { // no chunking for testing data
///*
val firstDataPoint = data.next()
val annotation = firstDataPoint.annotation
val narrative = firstDataPoint.narrative
val time = firstDataPoint.time
val merged = data.foldLeft(annotation, narrative) { (accum, ex) =>
(accum._1 ++ ex.annotation, accum._2 ++ ex.narrative)
}
val e = new Example(annot = merged._1, nar = merged._2, _time = time)
Iterator(e)
//*/
// comment the above and uncomment this to have chunked data
/*
data.grouped(opts.chunkSize).map { x =>
//data.sliding(opts.chunkSize).map { x =>
x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
}
*/
}
}
exmplIters.foldLeft(Iterator[Example]())(_ ++ _)
}
}
| 4,663 | 36.312 | 155 | scala |
OLED | OLED-master/src/main/scala/experiments/caviar/TrainTestSets.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.caviar
object MeetingTrainTestSets {
/* Train/test sets are identified by the mongo db name of each video */
/*
private val train1 =
Vector("caviar-video-2-meeting-moving", "caviar-video-5",
"caviar-video-6", "caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test1 = Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4")
val meeting1: (Vector[String], Vector[String]) = (train1, test1)
private val train2 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test2 = Vector("caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6")
val meeting2: (Vector[String], Vector[String]) = (train2, test2)
private val train3 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test3 = Vector("caviar-video-13-meeting", "caviar-video-7", "caviar-video-8")
val meeting3: (Vector[String], Vector[String]) = (train3, test3)
private val train4 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test4 = Vector("caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10")
val meeting4: (Vector[String], Vector[String]) = (train4, test4)
private val train5 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test5 = Vector("caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving")
val meeting5: (Vector[String], Vector[String]) = (train5, test5)
private val train6 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test6 = Vector("caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16")
val meeting6: (Vector[String], Vector[String]) = (train6, test6)
private val train7 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test7 = Vector("caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18")
val meeting7: (Vector[String], Vector[String]) = (train7, test7)
private val train8 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test8 = Vector("caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25")
val meeting8: (Vector[String], Vector[String]) = (train8, test8)
private val train9 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test9 = Vector("caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27")
val meeting9: (Vector[String], Vector[String]) = (train9, test9)
private val train10 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27")
private val test10 = Vector("caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
val meeting10: (Vector[String], Vector[String]) = (train10, test10)
*/
private val train1 =
Vector("caviar-video-2-meeting-moving", "caviar-video-5",
"caviar-video-6", "caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-4")
private val test1 = Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-30")
val meeting1: (Vector[String], Vector[String]) = (train1, test1)
private val train2 =
/*
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-5", "caviar-video-30")
*/
// This ordering seems to be giving good results in terms of the rules it learns
Vector("caviar-video-21-meeting-moving", "caviar-video-11", "caviar-video-6", "caviar-video-3", "caviar-video-8", "caviar-video-24-meeting-moving", "caviar-video-10", "caviar-video-25", "caviar-video-1-meeting-moving", "caviar-video-27", "caviar-video-16", "caviar-video-28-meeting", "caviar-video-18", "caviar-video-5", "caviar-video-14-meeting-moving", "caviar-video-13-meeting", "caviar-video-30", "caviar-video-20-meeting-moving", "caviar-video-22-meeting-moving", "caviar-video-9", "caviar-video-17", "caviar-video-12-moving", "caviar-video-15", "caviar-video-26", "caviar-video-7", "caviar-video-19-meeting-moving", "caviar-video-23-moving")
private val test2 = Vector("caviar-video-2-meeting-moving", "caviar-video-29", "caviar-video-4")
val meeting2: (Vector[String], Vector[String]) = (train2, test2)
private val train3 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-8", "caviar-video-6",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-7",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test3 = Vector("caviar-video-13-meeting", "caviar-video-27", "caviar-video-5")
val meeting3: (Vector[String], Vector[String]) = (train3, test3)
private val train4 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-10",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-9", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test4 = Vector("caviar-video-14-meeting-moving", "caviar-video-26", "caviar-video-6")
val meeting4: (Vector[String], Vector[String]) = (train4, test4)
private val train5 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-12-moving", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-11",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test5 = Vector("caviar-video-19-meeting-moving", "caviar-video-25", "caviar-video-7")
val meeting5: (Vector[String], Vector[String]) = (train5, test5)
private val train6 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-16",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-15", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test6 = Vector("caviar-video-20-meeting-moving", "caviar-video-23-moving", "caviar-video-8")
val meeting6: (Vector[String], Vector[String]) = (train6, test6)
private val train7 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-17", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test7 = Vector("caviar-video-21-meeting-moving", "caviar-video-9", "caviar-video-18")
val meeting7: (Vector[String], Vector[String]) = (train7, test7)
private val train8 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-25",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-23-moving", "caviar-video-18",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test8 = Vector("caviar-video-22-meeting-moving", "caviar-video-17", "caviar-video-10")
val meeting8: (Vector[String], Vector[String]) = (train8, test8)
private val train9 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-27", "caviar-video-12-moving",
"caviar-video-20-meeting-moving", "caviar-video-15", "caviar-video-26",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
private val test9 = Vector("caviar-video-24-meeting-moving", "caviar-video-16", "caviar-video-11")
val meeting9: (Vector[String], Vector[String]) = (train9, test9)
private val train10 =
Vector("caviar-video-1-meeting-moving", "caviar-video-3", "caviar-video-4",
"caviar-video-2-meeting-moving", "caviar-video-5", "caviar-video-6",
"caviar-video-13-meeting", "caviar-video-7", "caviar-video-8",
"caviar-video-14-meeting-moving", "caviar-video-9", "caviar-video-10",
"caviar-video-19-meeting-moving", "caviar-video-11", "caviar-video-30",
"caviar-video-20-meeting-moving", "caviar-video-29", "caviar-video-16",
"caviar-video-21-meeting-moving", "caviar-video-17", "caviar-video-18",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-25",
"caviar-video-24-meeting-moving", "caviar-video-26", "caviar-video-27")
private val test10 = Vector("caviar-video-28-meeting", "caviar-video-15", "caviar-video-12-moving")
val meeting10: (Vector[String], Vector[String]) = (train10, test10)
}
| 19,407 | 61.205128 | 651 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/CERSiteDisplayVideos.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import java.io.{BufferedWriter, File, FileWriter, PrintWriter}
import app.runutils.{CMDArgs, Globals, RunningOptions}
import experiments.caviar.FullDatasetHoldOut
import experiments.caviar.FullDatasetHoldOut.MongoDataOptions
import logic.Examples.Example
import logic.{Literal, Theory}
import oled.functions.SingleCoreOLEDFunctions.crossVal
import utils.{ASP, Utils}
import utils.Implicits._
import scala.collection.mutable
object CERSiteDisplayVideos {
private val videos =
Vector("caviar-video-1-meeting-moving", "caviar-video-2-meeting-moving", "caviar-video-3",
"caviar-video-4", "caviar-video-5", "caviar-video-6", "caviar-video-7", "caviar-video-8",
"caviar-video-9", "caviar-video-10", "caviar-video-11", "caviar-video-12-moving", "caviar-video-13-meeting",
"caviar-video-14-meeting-moving", "caviar-video-15", "caviar-video-16", "caviar-video-17", "caviar-video-18",
"caviar-video-19-meeting-moving", "caviar-video-20-meeting-moving", "caviar-video-21-meeting-moving",
"caviar-video-22-meeting-moving", "caviar-video-23-moving", "caviar-video-24-meeting-moving", "caviar-video-25",
"caviar-video-26", "caviar-video-27", "caviar-video-28-meeting", "caviar-video-29", "caviar-video-30")
def main(args: Array[String]) = {
val argsok = CMDArgs.argsOk(args)
if (!argsok._1) {
println(argsok._2)
System.exit(-1)
} else {
val runningOptions = CMDArgs.getOLEDInputArgs(args)
runHandCrafted(runningOptions)
}
}
def runHandCrafted(runOpts: RunningOptions) = {
var (totalTPs, totalFPs, totalFNs) = (0, 0, 0)
videos.filter(x => x.contains("meeting") || x.contains("moving")) foreach { video =>
println(video)
val videoNum = video.split("caviar-video-")(1).split("-")(0)
println(videoNum)
val testingDataOptions = new MongoDataOptions(dbNames = Vector(video), chunkSize = runOpts.chunkSize, sortDbByField = "time", what = "testing")
val testingDataFunction: MongoDataOptions => Iterator[Example] = FullDatasetHoldOut.getMongoData
val data = testingDataFunction(testingDataOptions).next()
// This returns only the detected CE instances. Comment it out if you only want the tp/fn/fn counts.
val atomsMeeting = evaluate(data, "/home/nkatz/Desktop/hand-crafted-meeting", runOpts.globals, runOpts, "meeting")
val atomsMoving = evaluate(data, "/home/nkatz/Desktop/hand-crafted-moving", runOpts.globals, runOpts, "moving")
val recognizedIntervalsMeeting = findCEIntervals(atomsMeeting)
val recognizedIntervalsMoving = findCEIntervals(atomsMoving)
//println(recognizedIntervalsMeeting)
//println(recognizedIntervalsMoving)
val annotationAtoms = data.annotation
val (meetAnnotAtoms, moveAnnotAtoms) = annotationAtoms.foldLeft(List[String](), List[String]()) { (x, y) =>
if (y.contains("meeting")) {
(x._1 :+ y, x._2)
} else if (y.contains("moving")) {
(x._1, x._2 :+ y)
} else {
x
}
}
val groundTruthIntervalsMeeting = findCEIntervals(meetAnnotAtoms)
val groundTruthIntervalsMoving = findCEIntervals(moveAnnotAtoms)
println(recognizedIntervalsMeeting)
println(groundTruthIntervalsMeeting)
println(recognizedIntervalsMoving)
println(groundTruthIntervalsMoving)
val recognizedFile = new File(s"/home/nkatz/dev/CER-site-caviar-videos/video-$videoNum-OLED")
val bw1 = new BufferedWriter(new FileWriter(recognizedFile))
val groundTruthFile = new File(s"/home/nkatz/dev/CER-site-caviar-videos/video-$videoNum-GroundTruth")
val bw2 = new BufferedWriter(new FileWriter(groundTruthFile))
//val recognizedFile = new PrintWriter(new File(s"s/home/nkatz/dev/CER-site-caviar-videos/video-$videoNum-OLED" ))
//val groundTruthFile = new PrintWriter(new File(s"s/home/nkatz/dev/CER-site-caviar-videos/video-$videoNum-GroundTruth.txt" ))
writeIntervals(bw1, recognizedIntervalsMeeting)
writeIntervals(bw1, recognizedIntervalsMoving)
writeIntervals(bw2, groundTruthIntervalsMeeting)
writeIntervals(bw2, groundTruthIntervalsMoving)
//pw.write("Hello, world")
bw1.close()
bw2.close()
// This returns only the tp/fn/fn counts. Comment it out if you only want the detected CE instances.
/*
val (tps, fps, fns, _, _, _) =
crossVal(Theory(), data = testingDataFunction(testingDataOptions), handCraftedTheoryFile = runOpts.evalth, globals = runOpts.globals, inps = runOpts)
println(tps, fps, fns)
totalTPs += tps
totalFPs += fps
totalFNs += fns
*/
}
//val precision = totalTPs.toDouble/(totalTPs+totalFPs)
//val recall = totalTPs.toDouble/(totalTPs+totalFNs)
//val f1score = 2*precision*recall/(precision+recall)
//println(s"sMicro F1-score: $f1score")
}
def writeIntervals(file: BufferedWriter, data: Map[String, List[scala.List[Int]]]) = {
data foreach { x =>
val parsed = Literal.parse(x._1)
val ids = parsed.terms.map(_.tostring.split("id")(1)).mkString(",")
val first = s"${parsed.predSymbol.tostring}($ids)"
val second = x._2.map(y => s"(${(y.head / 40.0).toInt}, ${(y.last / 40.0).toInt})").mkString(",")
val msg = s"$first:[$second]"
file.write(msg + "\n")
}
}
def evaluate(
data: Example,
handCraftedTheoryFile: String = "",
globals: Globals,
inps: RunningOptions,
CE: String) = {
val e = data // only 1 entry in the iterator (no chunking)
val t = globals.INCLUDE_BK(handCraftedTheoryFile)
val show = s"#show.\n#show holdsAt($CE(X0,X1),X2):holdsAt($CE(X0,X1),X2)."
val ex = e.tostring
val program = ex + globals.INCLUDE_BK(globals.BK_CROSSVAL) + t + show
val f = Utils.getTempFile("isConsistent", ".lp")
Utils.writeLine(program, f, "overwrite")
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
f.delete()
if (answerSet.nonEmpty) {
val atoms = answerSet.head.atoms
atoms
} else {
Nil
}
}
def findCEIntervals(ceAtoms: List[String]) = {
val parsed = ceAtoms.map(atom => Literal.parse(atom))
val groupedByCEInstance = parsed.groupBy(x => x.terms.head.tostring).map { case (k, v) =>
//val transform = v.sortBy(x => x.terms.tail.head.tostring.toInt)//.map(_.tostring)
val times = v.map(x => x.terms.tail.head.tostring.toInt).sorted
val timesToIntervals = times.tail.foldLeft(times.head, mutable.Stack[List[Int]]()) { (x, currentTime) =>
val (prevTimePoint, intervals) = (x._1, x._2)
if (currentTime == prevTimePoint + 40) {
val last = if (intervals.nonEmpty) intervals.pop() else List.empty[Int]
if (intervals.size > 1) {
val stop = "stop"
}
val last_+ = last :+ currentTime
intervals.push(last_+)
(currentTime, intervals)
} else {
(currentTime, intervals)
}
}
(k, timesToIntervals._2.toList)
}
groupedByCEInstance
}
}
| 7,847 | 38.837563 | 157 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/CopyCAVIAR.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import ParseCAVIAR.{lleParser1, parseAll}
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.Imports._
import logic.Examples.Example
import logic.{Constant, Literal}
/**
* Created by nkatz on 6/2/17.
*/
object CopyCAVIAR extends App {
val numOfCopies = 10
val idPattern = "id[0-9]+".r
val originalIds = List("id0", "id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8", "id9").sortBy(x => x.last)
val IDCopies = originalIds.map(x => x -> (1 until numOfCopies).map(y => x + y)).toMap
///*
val mc = MongoClient()
val collection = mc("caviar-whole")("examples")
val newDBName = s"caviarX$numOfCopies"
val newDB = mc(newDBName)("examples")
mc.dropDatabase(newDBName)
for (x <- collection.find()) {
val e = Example(x)
val time = e.time.toInt
println(time)
val extendedNarrative = copyAtoms(e.narrative, "narrative")
val extendedAnnotation = copyAtoms(e.annotation, "annotation")
// Need to get extra annotation (see the generateExtraAnnotation to see what this is about)
val happensAtoms = e.narrative.filter(x => x.contains("happensAt") && (x.contains("walking") || x.contains("active") || x.contains("inactive")))
val extraAnnotation = happensAtoms.flatMap(x => generateExtraAnnotation(x)).distinct
val entry = MongoDBObject("time" -> time) ++ ("annotation" -> (extendedAnnotation ++ extraAnnotation)) ++ ("narrative" -> extendedNarrative)
newDB.insert(entry)
}
//*/
generateExtraAnnotation("happensAt(walking(id1),2347)").foreach(println)
def extractIds(atom: String) = idPattern.findAllIn(atom).toList
def replaceIds(atom: String, replaceWith: Map[String, String]) = {
val ids = idPattern.findAllIn(atom)
val toLit = if (!atom.contains(".")) Literal.parse(atom) else Literal.parse(atom.split("\\.")(0))
// CAVIAR-specific stuff.
// THIS IS FOR ANNOTATION ATOMS ONLY
val first = replaceWith.head._2
val second = replaceWith.tail.head._2
val newLit =
Literal(predSymbol = toLit.predSymbol,
terms = List(Literal(
predSymbol = toLit.terms.head.asInstanceOf[Literal].predSymbol,
terms = List(Constant(first), Constant(second))), toLit.terms.tail.head)).tostring
newLit
}
def copyAtom(atom: String, what: String): List[String] = {
val ids = extractIds(atom)
what match {
case "annotation" =>
if (ids.length != 2) throw new RuntimeException(s"ids length for annotation atom $atom is != 2")
val idCopies = ids.flatMap(x => List(x) ++ IDCopies(x))
// As an extra test for checking that you are producing the annotation correctly,
// (in addition to just inspecting the outcome), go like this: For n copies of id constants
// in caviar, you'l have 2n id constants. Then you need to produce C(2n, n)-many (the binomial
// coefficient -- syndiasmoi 2n ana 2). Multiply that by 2 (because you have a pair (id1, id2)
// and its reverse (id2, id1)) and you have the correct number. For example, for X3 caviar,
// instead of a pair of constants toy have 6 constant symbols (id1 -> {id1, id11, id12} and
// similarly for id2). So you have 2C(6,2) = 30 new pairs of annotation.
val combinations = idCopies.combinations(2).flatMap(x => List(x, x.reverse))
combinations.foldLeft(List[String]()){ (accum, y) =>
val idsMap = (ids zip y).toMap
val newAtom = replaceIds(atom, idsMap)
accum :+ newAtom
}
case "narrative" =>
if (ids.length != 1) throw new RuntimeException(s"ids length for narrative atom $atom is != 1")
IDCopies(ids.head).map(x => atom.replace(ids.head, x)) :+ atom toList
case _ => throw new RuntimeException("Don't know what to do")
}
}
/*
*
* If a person is (e.g.) walking alone (without interacting, according to the annotation)
* with no one else, then that person will be moving with its copies. Similarly when that
* person is active or inactive, it will then be meeting with its copies. We need to generate
* the extra annotation, otherwise we'll have a large number of fps.
*
* The input to this method is a happensAt atom.
* */
def generateExtraAnnotation(atom: String) = {
val parsed = parseAll(lleParser1, atom).getOrElse(throw new RuntimeException("Can't parse this"))
val time = parsed.time
val nextTime = (time.toInt + 40).toString
val id = parsed.id
val lle = parsed.what
val idCopies = List(id) ++ IDCopies(id)
val combinations = idCopies.combinations(2).flatMap(x => List(x, x.reverse))
combinations.map { idPair =>
lle match {
case "walking" => s"holdsAt(moving(${idPair.head}, ${idPair.tail.head}),$nextTime)"
case ("active" | "inactive") => s"holdsAt(meeting(${idPair.head}, ${idPair.tail.head}),$nextTime)"
case _ => ""
}
}.toList.filter(x => x != "")
}
def copyAtoms(atoms: List[String], what: String) = atoms.flatMap(x => copyAtom(x, what)).distinct
//copyAtoms(List("holdsAt(meeting(id1, id2),22400)", "holdsAt(meeting(id2, id1),22400)"), "annotation").foreach(println)
//copyAtoms(List("happensAt(walking(id1),2347)", "coords(id4, 3543, 2342, 75)"), "narrative").foreach(println)
}
| 6,050 | 43.167883 | 148 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/FindPositiveNegativeIntervals.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples._
import utils.DataUtils.Interval
import utils.Database
/**
* Created by nkatz on 3/22/17.
*/
object FindPositiveNegativeIntervals {
def main(args: Array[String]) = {
val intervals = getPositiveNegativeIntervals("caviar", "meeting")
println(intervals)
}
/**
*
* @param HLE the HLE we're currently learning
* @return a tuple containing the list of positive and negative intervals for the HLE respectively
*/
def getPositiveNegativeIntervals(DBName: String, HLE: String): (List[Interval], List[Interval]) = {
import scala.collection.mutable.Stack
val step = 40
var lastTime = 0
val intervals = Stack[Interval]()
val negativeIntervals = Stack[Interval]()
var holdsPreviousely = false
val closeInterval = (i: Stack[Interval], e: Example, what: String) => {
val lastInterval = i.pop()
// give some "air" around the positive intervals (hence +step)
lastInterval.endPoint = if (what == "positive") e.time.toInt + step else e.time.toInt // This is wrong we should have "e.time.toInt-step" here
i.push(lastInterval)
}
val startNewInterval = (i: Stack[Interval], e: Example, what: String) => {
// give some "air" around the positive intervals (hence -step)
if (what == "positive") i.push(Interval(HLE, e.time.toInt - step)) else i.push(Interval("nothing", e.time.toInt))
}
val closeLastInterval = (i: Stack[Interval]) => {
val last = i.pop()
if (last.endPoint == 0) last.endPoint = lastTime
i.push(last)
}
val DB = new Database(DBName, "examples")
val dataIterator = DB.collection.find().sort(MongoDBObject("time" -> 1))
while (dataIterator.hasNext) {
val x = dataIterator.next()
val e = Example(x)
val annotation = e.annotation
if (annotation.exists(x => x.contains(HLE))) {
if (!holdsPreviousely) {
// close the last negative interval
closeInterval(negativeIntervals, e, "negative")
// start a new positive interval
startNewInterval(intervals, e, "positive")
holdsPreviousely = true
}
} else {
if (holdsPreviousely) {
// close the last positive interval
closeInterval(intervals, e, "positive")
// and start a new negative interval
startNewInterval(negativeIntervals, e, "negative")
holdsPreviousely = false
} else {
// start a new negative interval if on is not already being populated
if (negativeIntervals.isEmpty) {
startNewInterval(negativeIntervals, e, "negative")
}
}
}
lastTime = e.time.toInt // remember the last time to close the last interval in the stacks
}
closeLastInterval(negativeIntervals)
closeLastInterval(intervals)
intervals.toList.reverse foreach (x => println(s"${x.HLE}: (${x.startPoint},${x.endPoint}), length: ${x.length}"))
negativeIntervals.toList.reverse foreach (x => println(s"${x.HLE}: (${x.startPoint},${x.endPoint}), length: ${x.length}"))
// The average length of positive intervals
val averagePositiveLenth = intervals.foldLeft(0.0)(_ + _.length) / intervals.length
println(s"Average positive length: ${averagePositiveLenth.ceil}")
// The total length of negative intervals
val totalNegativeLength = negativeIntervals.foldLeft(0.0)(_ + _.length)
println(s"Total negative length: $totalNegativeLength")
// The total length of positive intervals
val totalPositiveLength = intervals.foldLeft(0.0)(_ + _.length)
println(s"Total positive length: $totalPositiveLength")
// 90% of negatives will be used for training:
val trainingNegativesNumber = ((90.0 / 100) * totalNegativeLength).toInt
println(s"90% of negatives (training set size) is ${(90.0 / 100) * totalNegativeLength}")
println(s"So negatives' testing set size is ${totalNegativeLength - trainingNegativesNumber}")
(intervals.toList.reverse, negativeIntervals.toList.reverse)
}
}
| 4,829 | 40.637931 | 148 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/GenerateCleanCaviarData.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import app.runutils.Globals
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.Example
import logic.Literal
import utils.{ASP, Database, Utils}
/**
* Created by nkatz on 6/20/17.
*/
object GenerateCleanCaviarData {
def main(args: Array[String]) = {
generateCleanData("meeting",
"/home/nkatz/dev/iled/datasets/hand-crafted-rules/caviar/meeting-hand-crafted.lp",
"/home/nkatz/dev/iled/datasets/Caviar/meeting")
}
def generateCleanData(HLE: String, handCraftedRulesPath: String, entryPath: String = "", fromDB: String = ""): Unit = {
val CaviarDB = "caviar"
val newDB = s"caviar_${HLE}_clean"
val mongoClient = MongoClient()
mongoClient.dropDatabase(newDB)
val collection = mongoClient(newDB)("examples")
val gl = new Globals(entryPath)
val (handCraftedRules, show) = HLE match {
case "meeting" =>
(handCraftedRulesPath, s"\n#show.\n#show holdsAt($HLE(X,Y),T):holdsAt($HLE(X,Y),T).\n")
case "moving" =>
(handCraftedRulesPath, s"\n#show.\n#show holdsAt($HLE(X,Y),T):holdsAt($HLE(X,Y),T).\n")
}
val file = Utils.getTempFile("generate", ".lp")
val db = new Database(CaviarDB, "examples")
db.collection.find().sort(MongoDBObject("time" -> 1)).foldLeft(List[String]()){ (priorAnnotation, newExmpl) =>
val e = Example(newExmpl)
if (e.time == "766600") {
val stop = "stop"
}
val narrative = e.narrativeASP
val in = narrative ++ priorAnnotation.map(x => x + ".") ++ List(s"time(${e.time.toInt + 40}).")
val content = in.mkString("\n") + gl.INCLUDE_BK(gl.BK_WHOLE_EC) + gl.INCLUDE_BK(handCraftedRules) + show
Utils.writeLine(content, file.getCanonicalPath, "overwrite")
val out = ASP.solve(task = Globals.INFERENCE, aspInputFile = file)
val prior =
if (out.nonEmpty) {
out.head.atoms.map(x => (Literal.parse(x).terms(1).tostring, x)).filter(z => z._1 == e.time).map(_._2)
} else {
Nil
}
val next =
if (out.nonEmpty) {
out.head.atoms.map(x => (Literal.parse(x).terms(1).tostring, x)).filter(z => z._1 == (e.time.toInt + 40).toString).map(_._2)
} else {
Nil
}
if (prior.nonEmpty) {
val stop = "stop"
}
val entry = MongoDBObject("time" -> e.time.toInt) ++ ("annotation" -> prior) ++ ("narrative" -> e.narrative)
println(entry)
collection.insert(entry)
next
}
}
}
| 3,307 | 32.755102 | 134 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/InspectDB.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.Imports._
import logic.Examples.Example
/**
* Created by nkatz on 3/13/17.
*
* Utilities to inspect data
*/
object InspectDB extends App {
/*
val dbName = "caviar"
//val dbName = "maritime-brest"
//val dbName = "CAVIAR-MERGED-COPIES-10"
//val dbName = "caviar"
//val dbName = "CAVIAR_Real_original"
val mongoClient = MongoClient()
val event = "meeting"
val collection = mongoClient(dbName)("examples")
collection.find().foreach{ x =>
val e = Example(x)
if (e.annotation.nonEmpty) {
val f = e.annotation.filter(x => x.contains(event))
println(f)
}
}
*/
val dbName = "caviar-train"
val newDbName = "caviar-train-1"
val mongoClient = MongoClient()
val collection = mongoClient(dbName)("examples")
var idCounter = 0
mongoClient.dropDatabase(newDbName)
val newCollection = mongoClient(newDbName)("examples")
collection.find().foreach{ x =>
val e = Example(x)
val entry = MongoDBObject("time" -> e.time) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative) ++ ("_id" -> idCounter)
newCollection.insert(entry)
idCounter += 1
}
}
| 1,983 | 28.176471 | 137 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/ParseCAVIAR.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import java.io.File
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Literal
import utils.parsers.ClausalLogicParser
import scala.collection.immutable.SortedMap
/**
* Created by nkatz on 3/13/17.
*
* Parse the CAVIAR dataset into mongodb, see further notes below.
*
*/
object ParseCAVIAR extends ClausalLogicParser {
/*
Example({ "_id" : { "$oid" : "56d58f08e4b0842fb35754c8"} , "time" : 1077520 , "annotation" : [ ] ,
"narrative" : [ "orientation(id1,128,1077520)" , "happensAt(disappear(id1),1077520)" , "orientation(id3,164,1077520)" , "holdsAt(visible(id3),1077520)" ,
"orientation(id4,166,1077520)" , "holdsAt(visible(id4),1077520)" , "happensAt(running(id1),1077520)" , "coords(id1,77,60,1077520)" , "happensAt(walking(id3),1077520)" ,
"coords(id3,98,201,1077520)" , "happensAt(inactive(id4),1077520)" , "coords(id4,82,206,1077520)"]},,List(),List(),,false,false,List(),List())
*/
val fixedBordersDBName = "CAVIAR_Real_FixedBorders"
val originalDBName = "CAVIAR_Real_original"
def main(args: Array[String]) = {
// with fixed borders
//val dataPath = "/home/nkatz/dev/CAVIAR-abrupt-corrected-borderlines"
//val fixedBorders = true
// original version
//val dataPath = "/home/nkatz/dev/CAVIAR-abrupt-original"
//val dbname = "caviar"
val dataPath = args(0)
val dbname = args(1)
run(dataPath, dbname)
}
/**
* The number of training interpretations for each video is
* the number of distinct time points in that video times
* the 2-combinations of distinct ids.
*
* @param path
*/
def countInterpretations(path: String) = {
val d = new File(path)
val idPattern = "id[0-9]+".r
val innerFolders = d.listFiles.sortBy(_.getName.split("-")(0).toInt)
var totalSize = 0
for (f <- innerFolders) {
println(s"Video ${f.getCanonicalPath}")
val files = f.listFiles.filter(x => movementOnly.exists(p => x.getName.contains(p)))
val contents =
(for (f <- files)
yield scala.io.Source.fromFile(f).getLines().filter(p => !p.startsWith("%"))).toList.flatten.mkString.replaceAll("\\s", "").split("\\.").toList
val parsed = contents.flatMap(x => parseAll(caviarParser(0), x).getOrElse(List(""))).filter(_ != "").asInstanceOf[List[Atom]]
//println(parsed map (_.atoms))
val allAtoms = parsed flatMap (_.atoms) map (x => Literal.parse(x))
val times = allAtoms.map(_.terms.reverse.head.tostring).distinct.length
//println(times.length)
val ids = parsed.flatMap(_.atoms).flatMap(z => idPattern.findAllIn(z).toList).distinct.length
val size = if (ids > 1) (times * utils.Utils.combinations(ids, 2)).toInt else times
println(s"current video size: $size")
totalSize = totalSize + size
}
println(s"Total size: $totalSize")
}
// We'll maintain two versions of CAVIAR: The first will be the corrected one (where examples at "borderlines"
// where a fluent changes its value, have been corrected -- pushed a frame forward for initiation, add extra
// annotated frame for termination). This has happened everywhere (consulting the narrative at the same time),
// except in cases of more than two persons participating in an HLE (e.g. happensAt( moving( grp_ID0, [ id0, id2, id3 ]), 13440).)
// The corrected version of CAVIAR is under /dev/CAVIAR-abrupt-corrected-borderlines.
// The second version is the original one, nothing has been tweaked. It is located under /dev/CAVIAR-abrupt-original
def run(path: String, dbName: String) = {
//val dbName = if (fixedBorders) fixedBordersDBName else originalDBName
val mongoClient = MongoClient()
mongoClient.dropDatabase(dbName)
val collection = mongoClient(dbName)("examples")
val d = new File(path)
val innerFolders = d.listFiles.sortBy(_.getName.split("-")(0).toInt)
var lastTime = 0
for (f <- innerFolders) {
println(s"Parsing video ${f.getCanonicalPath}")
val files = f.listFiles.filter(x => dataFileNames.exists(p => x.getName.contains(p)))
val contents =
(for (f <- files)
yield scala.io.Source.fromFile(f).getLines().filter(p => !p.startsWith("%"))).toList.flatten.mkString.replaceAll("\\s", "").split("\\.").toList
val parsed = contents.flatMap(x => parseAll(caviarParser(lastTime), x).getOrElse(List(""))).filter(_ != "").asInstanceOf[List[Atom]]
val atoms = SortedMap[Int, List[Atom]]() ++ parsed.groupBy(_.time.toInt)
for ((k, v) <- atoms) {
val narrative = v.filter(x => !x.annotationAtom).flatMap(z => z.atoms)
val annotation = v.filter(x => x.annotationAtom).flatMap(z => z.atoms)
val entry = MongoDBObject("time" -> k) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
collection.insert(entry)
//println(s"inserted $entry")
}
lastTime = atoms.keySet.toList.reverse.head + 40 // the last time point
}
}
val hleMapping = Map("moving" -> "moving", "fighting" -> "fighting", "leaving_object" -> "leavingObject", "interacting" -> "meeting")
val correctedCaviarPath = "/home/nkatz/dev/CAVIAR-abrupt-corrected-borderlines"
val originalCaviarPath = "/home/nkatz/dev/CAVIAR-abrupt-original"
val dataFileNames = List("AppearenceIndv", "MovementIndv", "SituationGrp")
val movementOnly = List("MovementIndv")
def word: Parser[String] = """[A-Za-z0-9_]*""".r ^^ { x => x }
def person: Parser[Person] = "id" ~ number ^^ { case x ~ y => new Person(x + y) }
def persons: Parser[List[Person]] = "[" ~> repsep(person, ",") <~ "]"
def time: Parser[String] = number
def orientationValue: Parser[String] = number
def appearanceValue: Parser[String] = word
def coordinates: Parser[(String, String)] = "(" ~ number ~ "," ~ number ~ ")" ^^ { case "(" ~ x ~ "," ~ y ~ ")" => (x, y) }
def meeting: Parser[String] = "interacting"
def moving: Parser[String] = "moving"
def fighting: Parser[String] = "fighting"
def leavingObject: Parser[String] = "leaving_object"
def walking: Parser[String] = "walking"
def active: Parser[String] = "active"
def inactive: Parser[String] = "inactive"
def running: Parser[String] = "running"
def abrupt: Parser[String] = "abrupt"
def happens: Parser[String] = "happensAt"
def holds: Parser[String] = "holdsAt"
def orientation: Parser[String] = "orientation"
def appearance: Parser[String] = "appearance"
def coords: Parser[String] = "coord"
def annotationParser(pastTime: Int): Parser[AnnotationAtom] =
happens ~ "(" ~ (meeting | moving | fighting | leavingObject) ~ "(" ~ word ~ "," ~ persons ~ ")" ~ "," ~ time ~ ")" ^^ {
case _ ~ "(" ~ x ~ "(" ~ _ ~ "," ~ y ~ ")" ~ "," ~ z ~ ")" => new AnnotationAtom(x, y, (z.toInt + pastTime).toString)
}
def lleParser(pastTime: Int): Parser[NarrativeAtom] = happens ~ "(" ~ (walking | active | inactive | running | abrupt) ~ "(" ~ person ~ ")" ~ "," ~ time ~ ")" ^^ {
case _ ~ "(" ~ lle ~ "(" ~ p ~ ")" ~ "," ~ t ~ ")" => new NarrativeAtom(what = lle, id = p.id, time = (t.toInt + pastTime).toString)
}
/* This is a utility parser used by data_handling.CopyCAVIAR. It doesn't push time forward*/
def lleParser1: Parser[NarrativeAtom] = happens ~ "(" ~ (walking | active | inactive | running | abrupt) ~ "(" ~ person ~ ")" ~ "," ~ time ~ ")" ^^ {
case _ ~ "(" ~ lle ~ "(" ~ p ~ ")" ~ "," ~ t ~ ")" => new NarrativeAtom(what = lle, id = p.id, time = t)
}
def orientationParser(pastTime: Int): Parser[NarrativeAtom] = holds ~ "(" ~ orientation ~ "(" ~ person ~ ")" ~ "=" ~ number ~ "," ~ time ~ ")" ^^ {
case _ ~ "(" ~ _ ~ "(" ~ p ~ ")" ~ "=" ~ v ~ "," ~ t ~ ")" => new NarrativeAtom(what = "orientation", id = p.id, orientation = v, time = (t.toInt + pastTime).toString)
}
def appearanceParser(pastTime: Int): Parser[NarrativeAtom] = holds ~ "(" ~ appearance ~ "(" ~ person ~ ")" ~ "=" ~ word ~ "," ~ time ~ ")" ^^ {
case _ ~ "(" ~ _ ~ "(" ~ p ~ ")" ~ "=" ~ v ~ "," ~ t ~ ")" => new NarrativeAtom(what = "appearance", id = p.id, appearance = v, time = (t.toInt + pastTime).toString)
}
def coordsParser(pastTime: Int): Parser[NarrativeAtom] = holds ~ "(" ~ coords ~ "(" ~ person ~ ")" ~ "=" ~ coordinates ~ "," ~ time ~ ")" ^^ {
case _ ~ "(" ~ _ ~ "(" ~ p ~ ")" ~ "=" ~ c ~ "," ~ t ~ ")" => new NarrativeAtom(what = "coord", id = p.id, xcoord = c._1, ycoord = c._2, time = (t.toInt + pastTime).toString)
}
def caviarAtomParser(pastTime: Int): Parser[Atom] =
annotationParser(pastTime) | lleParser(pastTime) | orientationParser(pastTime) | appearanceParser(pastTime) | coordsParser(pastTime)
def caviarParser(pastTime: Int): Parser[List[Atom]] = rep(caviarAtomParser(pastTime))
class Person(val id: String)
trait Atom {
val annotationAtom: Boolean
val atoms: List[String]
val time: String
}
class AnnotationAtom(val HLE: String, val persons: List[Person], val time: String) extends Atom {
val annotationAtom = true
val atoms =
if (HLE == "leaving_object") {
List(s"holdsAt(${hleMapping(HLE)}(${persons.head.id},${persons(1).id}),$time)")
} else {
persons.toSet.subsets(2).flatMap(y => for (z <- y.toList.permutations) yield s"holdsAt(${hleMapping(HLE)}(${z.head.id},${z(1).id}),$time)").toList
}
}
// what is either an LLE, or orientation, appearance, coord
class NarrativeAtom(val what: String = "none", val id: String, val xcoord: String = "none",
val ycoord: String = "none", val orientation: String = "none",
val appearance: String = "none", val time: String) extends Atom {
val annotationAtom = false
val atoms = what match {
case ("walking" | "active" | "inactive" | "running" | "abrupt") => List(s"happensAt($what($id),$time)")
case "coord" => List(s"coords($id,$xcoord,$ycoord,$time)")
case "appearance" => appearance match {
case "appear" | "disappear" => List(s"happensAt($appearance($id),$time)")
case _ => List(s"holdsAt($appearance($id),$time)")
}
case "orientation" => List(s"orientation($id,$orientation,$time)")
}
}
}
| 10,921 | 47.758929 | 180 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_data/ParseCAVIAR_DB_per_video.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_data
import java.io.File
import ParseCAVIAR._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.Imports._
import scala.collection.immutable.SortedMap
object ParseCAVIAR_DB_per_video {
def main(args: Array[String]) = {
val dataPath = args(0)
run(dataPath)
}
def run(path: String) = {
//val dbName = if (fixedBorders) fixedBordersDBName else originalDBName
val mongoClient = MongoClient()
val d = new File(path)
val innerFolders = d.listFiles.sortBy(_.getName.split("-")(0).toInt)
//var lastTime = 0
var videoCounter = 0
for (f <- innerFolders) {
videoCounter += 1
println(s"Parsing video ${f.getCanonicalPath}")
val files = f.listFiles.filter(x => dataFileNames.exists(p => x.getName.contains(p)))
val contents =
(for (f <- files)
yield scala.io.Source.fromFile(f).getLines().filter(p => !p.startsWith("%"))).
toList.flatten.mkString.replaceAll("\\s", "").split("\\."
).toList
val parsed = contents.flatMap(x =>
//parseAll(caviarParser(lastTime),x).getOrElse(List(""))).filter(_!="").asInstanceOf[List[Atom]]
parseAll(caviarParser(0), x).getOrElse(List(""))).filter(_ != "").asInstanceOf[List[Atom]]
val atoms = SortedMap[Int, List[Atom]]() ++ parsed.groupBy(_.time.toInt)
/*
for ( (k,v) <- atoms ) {
val narrative = v.filter(x => !x.annotationAtom).flatMap(z => z.atoms)
val annotation = v.filter(x => x.annotationAtom).flatMap(z => z.atoms)
val entry = MongoDBObject("time" -> k) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
collection.insert(entry)
}
*/
var hasMeeting = false
var hasMoving = false
val dbEntries = atoms.foldLeft(Vector[DBObject]()) { (entries, mapRecord) =>
val (time, atoms) = (mapRecord._1, mapRecord._2)
val narrative = atoms.filter(x => !x.annotationAtom).flatMap(z => z.atoms)
val annotation = atoms.filter(x => x.annotationAtom).flatMap(z => z.atoms)
if (annotation.exists(p => p.contains("meeting"))) hasMeeting = true
if (annotation.exists(p => p.contains("moving"))) hasMoving = true
val entry = MongoDBObject("time" -> time) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
entries :+ entry
}
val dbName =
if (hasMeeting && hasMoving) s"caviar-video-$videoCounter-meeting-moving"
else if (hasMeeting) s"caviar-video-$videoCounter-meeting"
else if (hasMoving) s"caviar-video-$videoCounter-moving"
else s"caviar-video-$videoCounter"
mongoClient.dropDatabase(dbName)
val collection = mongoClient(dbName)("examples")
println(s"Inserting data in $dbName")
dbEntries foreach (entry => collection.insert(entry))
//lastTime = atoms.keySet.toList.reverse.head+40 // the last time point
}
}
}
| 3,704 | 33.95283 | 111 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_intervals/MeetingCleanTrainingData.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_intervals
import utils.DataUtils.{DataAsIntervals, Interval}
import scala.util.Random
/**
* Created by nkatz on 3/23/16.
*/
object MeetingCleanTrainingData {
/**
* To find the intervals call:
*
* val intervals = iled.utils.CaviarUtils.getPositiveNegativeIntervals("meeting")
* val positiveInervals = intervals._1
* val negativeIntervals = intervals._2
*
*
*/
/*
Average positive length: 234.0
Total negative length: 23249.0
Total positive length: 3739.0
90% of negatives (training set size) is 20924.100000000002
So negatives' testing set size is 2325.0
*/
val meetPos1 = Interval("meeting", 6200, 24480) // length:458
val meetPos2 = Interval("meeting", 27000, 63160) // length:905
val meetPos3 = Interval("meeting", 585200, 585960) // length:20
val meetPos4 = Interval("meeting", 600520, 601640) // length:29
val meetPos5 = Interval("meeting", 622440, 624840) // length: 61
val meetPos6 = Interval("meeting", 638720, 641320) // length: 66
val meetPos7 = Interval("meeting", 714360, 720040) // length: 143
val meetPos8 = Interval("meeting", 746160, 747600) // length: 37
val meetPos9 = Interval("meeting", 765280, 766720) // length: 37
val meetPos10 = Interval("meeting", 785440, 791880) // length: 162
val meetPos11 = Interval("meeting", 812520, 835520) // length: 576
val meetPos12 = Interval("meeting", 842320, 850120) // length: 196
val meetPos13 = Interval("meeting", 892000, 896240) // length: 107
val meetPos14 = Interval("meeting", 1001320, 1003200) // length: 48
val meetPos15 = Interval("meeting", 1008640, 1010480) // length: 47
val meetPos16 = Interval("meeting", 1045200, 1048280) // length: 78
val meetPos17 = Interval("meeting", 1072360, 1077680) //length: 134
val allPosIntervals = List(meetPos1, meetPos2, meetPos3, meetPos4, meetPos5, meetPos6, meetPos7, meetPos8,
meetPos9, meetPos10, meetPos11, meetPos12, meetPos13, meetPos14, meetPos15, meetPos16, meetPos17)
val testingPos1 = List(meetPos1)
val testingPos2 = List(meetPos2)
val testingPos3 = List(meetPos3, meetPos4, meetPos17)
val testingPos4 = List(meetPos5, meetPos6)
val testingPos5 = List(meetPos7, meetPos8)
val testingPos6 = List(meetPos9, meetPos10)
val testingPos7 = List(meetPos11, meetPos12)
val testingPos8 = List(meetPos13, meetPos14)
val testingPos9 = List(meetPos15)
val testingPos10 = List(meetPos16)
val meetNeg1 = Interval("meeting", 680, 6240) //length: 140
val meetNeg2 = Interval("meeting", 24440, 27040) //length: 66
val meetNeg3 = Interval("meeting", 63120, 143080) //2000
val meetNeg4 = Interval("meeting", 143120, 223080) //2000
val meetNeg5 = Interval("meeting", 223120, 303080) //2000
val meetNeg6 = Interval("meeting", 303120, 383080) //2000
val meetNeg7 = Interval("meeting", 383120, 463080) //2000
val meetNeg8 = Interval("meeting", 463120, 543080) //2000
val meetNeg9 = Interval("meeting", 543120, 585200) //2000
val meetNeg10 = Interval("meeting", 585920, 600560) //367
val meetNeg11 = Interval("meeting", 601600, 622480) //523
val meetNeg12 = Interval("meeting", 624800, 638760) //350
val meetNeg13 = Interval("meeting", 641280, 714400) //1829
val meetNeg14 = Interval("meeting", 720000, 746200) //656
val meetNeg15 = Interval("meeting", 747560, 765320) //445
val meetNeg16 = Interval("meeting", 766680, 785480) //471
val meetNeg17 = Interval("meeting", 791840, 812560) //519
val meetNeg18 = Interval("meeting", 835480, 842360) //173
val meetNeg19 = Interval("meeting", 850080, 892040) //1050
val meetNeg20 = Interval("meeting", 896200, 1001360) //2630
val meetNeg21 = Interval("meeting", 1003160, 1008680) //139
val meetNeg22 = Interval("meeting", 1010440, 1045240) //871
val meetNeg23 = Interval("meeting", 1048240, 1072400) //605
val allNegIntervals =
List(meetNeg1, meetNeg2, meetNeg3, meetNeg4, meetNeg5, meetNeg6, meetNeg7, meetNeg8, meetNeg9, meetNeg10, meetNeg11, meetNeg12, meetNeg13, meetNeg14, meetNeg15, meetNeg16,
meetNeg17, meetNeg18, meetNeg19, meetNeg20, meetNeg21, meetNeg22, meetNeg23)
//allNegIntervals.foreach(x => println(x.length))
val testingNeg1 = List(meetNeg1, meetNeg2, meetNeg8)
val testingNeg2 = List(meetNeg3, meetNeg4, meetNeg5)
val testingNeg3 = List(meetNeg6, meetNeg7, meetNeg9)
val testingNeg4 = List(meetNeg10, meetNeg11, meetNeg10)
val testingNeg5 = List(meetNeg13, meetNeg14, meetNeg15)
val testingNeg6 = List(meetNeg16, meetNeg17, meetNeg18)
val testingNeg7 = List(meetNeg12, meetNeg19)
val testingNeg8 = List(meetNeg20, meetNeg21)
val testingNeg9 = List(meetNeg22)
val testingNeg10 = List(meetNeg23)
// Training set 1. All but meetPos1
//----------------------------------
val meetTrainingSet1 = {
val training = allPosIntervals.filter(x => !testingPos1.contains(x)) ++ allNegIntervals.filter(z => !testingNeg1.contains(z))
val testing = testingPos1 ++ testingNeg1
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet2 = {
val training = allPosIntervals.filter(x => !testingPos2.contains(x)) ++ allNegIntervals.filter(z => !testingNeg2.contains(z))
val testing = testingPos2 ++ testingNeg2
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet3 = {
val training = allPosIntervals.filter(x => !testingPos3.contains(x)) ++ allNegIntervals.filter(z => !testingNeg3.contains(z))
val testing = testingPos3 ++ testingNeg3
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet4 = {
val training = allPosIntervals.filter(x => !testingPos4.contains(x)) ++ allNegIntervals.filter(z => !testingNeg4.contains(z))
val testing = testingPos4 ++ testingNeg4
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet5 = {
val training = allPosIntervals.filter(x => !testingPos5.contains(x)) ++ allNegIntervals.filter(z => !testingNeg5.contains(z))
val testing = testingPos5 ++ testingNeg5
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet6 = {
val training = allPosIntervals.filter(x => !testingPos6.contains(x)) ++ allNegIntervals.filter(z => !testingNeg6.contains(z))
val testing = testingPos6 ++ testingNeg6
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet7 = {
val training = allPosIntervals.filter(x => !testingPos7.contains(x)) ++ allNegIntervals.filter(z => !testingNeg7.contains(z))
val testing = testingPos7 ++ testingNeg7
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet8 = {
val training = allPosIntervals.filter(x => !testingPos8.contains(x)) ++ allNegIntervals.filter(z => !testingNeg8.contains(z))
val testing = testingPos8 ++ testingNeg8
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet9 = {
val training = allPosIntervals.filter(x => !testingPos9.contains(x)) ++ allNegIntervals.filter(z => !testingNeg9.contains(z))
val testing = testingPos9 ++ testingNeg9
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val meetTrainingSet10 = {
val training = allPosIntervals.filter(x => !testingPos10.contains(x)) ++ allNegIntervals.filter(z => !testingNeg10.contains(z))
val testing = testingPos10 ++ testingNeg10
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val allTrainingSets = List(meetTrainingSet1, meetTrainingSet2, meetTrainingSet3, meetTrainingSet4, meetTrainingSet5, meetTrainingSet6, meetTrainingSet7,
meetTrainingSet8, meetTrainingSet9, meetTrainingSet10)
val wholeCAVIARForManualRules = {
new DataAsIntervals(trainingSet = List(), testingSet = allPosIntervals ++ allNegIntervals)
}
val wholeCAVIARForTraining = {
new DataAsIntervals(trainingSet = allPosIntervals ++ allNegIntervals, testingSet = allPosIntervals ++ allNegIntervals)
}
}
| 9,233 | 46.84456 | 175 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_intervals/MeetingTrainingData.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_intervals
import utils.DataUtils.{DataAsIntervals, Interval}
import scala.util.Random
/**
* Created by nkatz on 3/22/16.
*/
object MeetingTrainingData {
/**
* To find the intervals call:
*
* val intervals = iled.utils.CaviarUtils.getPositiveNegativeIntervals("meeting")
* val positiveInervals = intervals._1
* val negativeIntervals = intervals._2
*
*
*/
/*
meeting:
Average positive length: 170.0
Total negative length: 25103.0
Total positive length: 1867.0
90% of negatives (training set size) is 22592.7
So negatives' testing set size is 2511.0
*/
//val meetPos1 = Interval("meeting",5720,24480)
val meetPos1 = Interval("meeting", 5680, 24480)
//val meetPos2 = Interval("meeting",27280,61560)
val meetPos2 = Interval("meeting", 27240, 61520)
//val meetPos3 = Interval("meeting",507120,509800)
val meetPos3 = Interval("meeting", 507080, 509760)
//val meetPos4 = Interval("meeting",559200,564200)
val meetPos4 = Interval("meeting", 559160, 564160)
//val meetPos5 = Interval("meeting",785240,786320)
val meetPos5 = Interval("meeting", 785200, 786280)
//val meetPos6 = Interval("meeting",813080,814960)
val meetPos6 = Interval("meeting", 813040, 814920)
//val meetPos7 = Interval("meeting",829400,835520)
val meetPos7 = Interval("meeting", 829360, 833520)
//val meetPos8 = Interval("meeting",842680,843640)
val meetPos8 = Interval("meeting", 840200, 841320)
//val meetPos9 = Interval("meeting",892440,894920)
val meetPos9 = Interval("meeting", 867560, 868520)
//val meetPos10 = Interval("meeting",1009000,1009880)
val meetPos10 = Interval("meeting", 917320, 919800)
val meetPos11 = Interval("meeting", 1033880, 1034760)
// To break large intervals in smaller of 1000 data points use this (40 is the step):
// List.range(919760,1030680,40).grouped(500).map(x => (x.head,x.tail.reverse.head)) foreach println
val meetNeg1 = Interval("meeting", 680, 5720) // length: 128
val meetNeg2 = Interval("meeting", 24440, 27280) // length: 73
val meetNeg3 = Interval("meeting", 61480, 101440) // length: 1000
val meetNeg4 = Interval("meeting", 101480, 141440) // length: 1000
val meetNeg5 = Interval("meeting", 141480, 181440) // length: 1000
val meetNeg6 = Interval("meeting", 181480, 221440) // length: 1000
val meetNeg7 = Interval("meeting", 221480, 261440) // length: 1000
val meetNeg8 = Interval("meeting", 261480, 301440) // length: 1000
val meetNeg9 = Interval("meeting", 301480, 341440) // length: 1000
val meetNeg10 = Interval("meeting", 341480, 381440) // length: 1000
val meetNeg11 = Interval("meeting", 381480, 421440) // length: 1000
val meetNeg12 = Interval("meeting", 421480, 461440) // length: 1000
val meetNeg13 = Interval("meeting", 461480, 501440) // length: 1000
val meetNeg14 = Interval("meeting", 509720, 559200) // length: 1238
val meetNeg15 = Interval("meeting", 564120, 604080) // length: 1000
val meetNeg16 = Interval("meeting", 604120, 644080) // length: 1000
val meetNeg17 = Interval("meeting", 644120, 684080) // length: 1000
val meetNeg18 = Interval("meeting", 684120, 724080) // length: 1000
val meetNeg19 = Interval("meeting", 724120, 764080) // length: 1000
val meetNeg20 = Interval("meeting", 764120, 785200) // length: 528
val meetNeg21 = Interval("meeting", 786240, 813080) // length: 672
val meetNeg22 = Interval("meeting", 814880, 829400) // length: 364
val meetNeg23 = Interval("meeting", 833480, 840240) // length: 182
val meetNeg24 = Interval("meeting", 841280, 867600) // length: 659
val meetNeg25 = Interval("meeting", 868480, 917360) // length: 1223
val meetNeg26 = Interval("meeting", 919760, 939720) // length: 500
val meetNeg27 = Interval("meeting", 939760, 959720) // length: 500
val meetNeg28 = Interval("meeting", 959760, 979720) // length: 500
val meetNeg29 = Interval("meeting", 999760, 1019720) // length: 500
val meetNeg30 = Interval("meeting", 1019760, 1030640) // length: 500
val meetNeg31 = Interval("meeting", 1030720, 1033920) // length: 81
val meetNeg32 = Interval("meeting", 1009840, 1077680) //// length: 1697
val meetNeg33 = Interval("meeting", 1034720, 1102600) // length: 1000
val allNegIntervals = List(meetNeg1, meetNeg2, meetNeg3, meetNeg4, meetNeg5, meetNeg6, meetNeg7, meetNeg8, meetNeg9, meetNeg10, meetNeg11, meetNeg12, meetNeg13, meetNeg14,
meetNeg15, meetNeg16, meetNeg17, meetNeg18, meetNeg19, meetNeg20, meetNeg21, meetNeg22, meetNeg23, meetNeg24, meetNeg25, meetNeg26, meetNeg27,
meetNeg28, meetNeg29, meetNeg30, meetNeg31, meetNeg32, meetNeg33)
val allPosIntervals = List(meetPos1, meetPos2, meetPos3, meetPos4, meetPos5, meetPos6, meetPos7, meetPos8, meetPos9, meetPos10, meetPos11)
// Negative intervals for the testing sets
val testingNeg1 = List(meetNeg1, meetNeg2, meetNeg3, meetNeg33)
val testingNeg2 = List(meetNeg4, meetNeg5, meetNeg6)
val testingNeg3 = List(meetNeg7, meetNeg8, meetNeg9)
val testingNeg4 = List(meetNeg10, meetNeg11, meetNeg12)
val testingNeg5 = List(meetNeg13, meetNeg14, meetNeg15)
val testingNeg6 = List(meetNeg16, meetNeg17, meetNeg18)
val testingNeg7 = List(meetNeg19, meetNeg20, meetNeg21)
val testingNeg8 = List(meetNeg22, meetNeg23, meetNeg24)
val testingNeg9 = List(meetNeg25, meetNeg26, meetNeg27)
val testingNeg10 = List(meetNeg28, meetNeg29, meetNeg30, meetNeg31, meetNeg32)
val allNegativeTestingSetIntervals = List(testingNeg1, testingNeg2, testingNeg3, testingNeg4, testingNeg5, testingNeg6, testingNeg7, testingNeg8, testingNeg9, testingNeg10)
def getMeetingTrainingData(fold: Int, randomOrder: Boolean) = {
val training = fold match {
case 1 =>
randomOrder match {
// Training set 1. All but meetPos1 & meetPos11
case true => allPosIntervals.filter(x => x != meetPos1 && x != meetPos11) ++ allNegIntervals.filter(z => !testingNeg1.contains(z))
case _ => List(Interval("meeting", 27240, 61520), Interval("meeting", 301480, 341440), Interval("meeting", 939760, 959720), Interval("meeting", 813040, 814920), Interval("meeting", 684120, 724080), Interval("meeting", 644120, 684080), Interval("meeting", 1009840, 1077680), Interval("meeting", 833480, 840240), Interval("meeting", 867560, 868520), Interval("meeting", 559160, 564160), Interval("meeting", 786240, 813080), Interval("meeting", 917320, 919800), Interval("meeting", 840200, 841320), Interval("meeting", 785200, 786280), Interval("meeting", 919760, 939720), Interval("meeting", 181480, 221440), Interval("meeting", 829360, 833520), Interval("meeting", 564120, 604080), Interval("meeting", 421480, 461440), Interval("meeting", 141480, 181440), Interval("meeting", 341480, 381440), Interval("meeting", 959760, 979720), Interval("meeting", 999760, 1019720), Interval("meeting", 509720, 559200), Interval("meeting", 1019760, 1030640), Interval("meeting", 261480, 301440), Interval("meeting", 381480, 421440), Interval("meeting", 724120, 764080), Interval("meeting", 461480, 501440), Interval("meeting", 814880, 829400), Interval("meeting", 221480, 261440), Interval("meeting", 604120, 644080), Interval("meeting", 841280, 867600), Interval("meeting", 507080, 509760), Interval("meeting", 868480, 917360), Interval("meeting", 101480, 141440), Interval("meeting", 1030720, 1033920), Interval("meeting", 764120, 785200))
}
case 2 =>
randomOrder match {
// Training set 2. All but meetPos2
case true => allPosIntervals.filter(x => x != meetPos2) ++ allNegIntervals.filter(z => !testingNeg2.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 680, 5720), Interval("meeting", 1033880, 1034760), Interval("meeting", 814880, 829400), Interval("meeting", 917320, 919800), Interval("meeting", 381480, 421440), Interval("meeting", 999760, 1019720), Interval("meeting", 840200, 841320), Interval("meeting", 261480, 301440), Interval("meeting", 461480, 501440), Interval("meeting", 507080, 509760), Interval("meeting", 868480, 917360), Interval("meeting", 684120, 724080), Interval("meeting", 221480, 261440), Interval("meeting", 301480, 341440), Interval("meeting", 959760, 979720), Interval("meeting", 813040, 814920), Interval("meeting", 1034720, 1102600), Interval("meeting", 341480, 381440), Interval("meeting", 604120, 644080), Interval("meeting", 867560, 868520), Interval("meeting", 509720, 559200), Interval("meeting", 1009840, 1077680), Interval("meeting", 421480, 461440), Interval("meeting", 833480, 840240), Interval("meeting", 841280, 867600), Interval("meeting", 785200, 786280), Interval("meeting", 786240, 813080), Interval("meeting", 564120, 604080), Interval("meeting", 1019760, 1030640), Interval("meeting", 61480, 101440), Interval("meeting", 24440, 27280), Interval("meeting", 919760, 939720), Interval("meeting", 829360, 833520), Interval("meeting", 559160, 564160), Interval("meeting", 764120, 785200), Interval("meeting", 1030720, 1033920), Interval("meeting", 644120, 684080), Interval("meeting", 724120, 764080), Interval("meeting", 939760, 959720))
}
case 3 =>
randomOrder match {
// Training set 3. All but meetPos3
case true => allPosIntervals.filter(x => x != meetPos3) ++ allNegIntervals.filter(z => !testingNeg3.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 1009840, 1077680), Interval("meeting", 684120, 724080), Interval("meeting", 724120, 764080), Interval("meeting", 680, 5720), Interval("meeting", 868480, 917360), Interval("meeting", 841280, 867600), Interval("meeting", 27240, 61520), Interval("meeting", 461480, 501440), Interval("meeting", 785200, 786280), Interval("meeting", 559160, 564160), Interval("meeting", 181480, 221440), Interval("meeting", 1019760, 1030640), Interval("meeting", 509720, 559200), Interval("meeting", 341480, 381440), Interval("meeting", 867560, 868520), Interval("meeting", 1033880, 1034760), Interval("meeting", 833480, 840240), Interval("meeting", 829360, 833520), Interval("meeting", 999760, 1019720), Interval("meeting", 564120, 604080), Interval("meeting", 939760, 959720), Interval("meeting", 959760, 979720), Interval("meeting", 1030720, 1033920), Interval("meeting", 644120, 684080), Interval("meeting", 61480, 101440), Interval("meeting", 840200, 841320), Interval("meeting", 919760, 939720), Interval("meeting", 813040, 814920), Interval("meeting", 917320, 919800), Interval("meeting", 764120, 785200), Interval("meeting", 24440, 27280), Interval("meeting", 101480, 141440), Interval("meeting", 421480, 461440), Interval("meeting", 814880, 829400), Interval("meeting", 786240, 813080), Interval("meeting", 1034720, 1102600), Interval("meeting", 604120, 644080), Interval("meeting", 141480, 181440), Interval("meeting", 381480, 421440))
}
case 4 =>
randomOrder match {
// Training set 4. All but meetPos4
case true => allPosIntervals.filter(x => x != meetPos4) ++ allNegIntervals.filter(z => !testingNeg4.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 840200, 841320), Interval("meeting", 101480, 141440), Interval("meeting", 829360, 833520), Interval("meeting", 1019760, 1030640), Interval("meeting", 461480, 501440), Interval("meeting", 868480, 917360), Interval("meeting", 680, 5720), Interval("meeting", 61480, 101440), Interval("meeting", 261480, 301440), Interval("meeting", 724120, 764080), Interval("meeting", 604120, 644080), Interval("meeting", 785200, 786280), Interval("meeting", 999760, 1019720), Interval("meeting", 301480, 341440), Interval("meeting", 841280, 867600), Interval("meeting", 181480, 221440), Interval("meeting", 141480, 181440), Interval("meeting", 939760, 959720), Interval("meeting", 24440, 27280), Interval("meeting", 644120, 684080), Interval("meeting", 221480, 261440), Interval("meeting", 786240, 813080), Interval("meeting", 919760, 939720), Interval("meeting", 813040, 814920), Interval("meeting", 959760, 979720), Interval("meeting", 684120, 724080), Interval("meeting", 27240, 61520), Interval("meeting", 1034720, 1102600), Interval("meeting", 509720, 559200), Interval("meeting", 1030720, 1033920), Interval("meeting", 564120, 604080), Interval("meeting", 833480, 840240), Interval("meeting", 867560, 868520), Interval("meeting", 814880, 829400), Interval("meeting", 1033880, 1034760), Interval("meeting", 507080, 509760), Interval("meeting", 764120, 785200), Interval("meeting", 1009840, 1077680), Interval("meeting", 917320, 919800))
}
case 5 =>
randomOrder match {
// Training set 5. All but meetPos5
case true => allPosIntervals.filter(x => x != meetPos5) ++ allNegIntervals.filter(z => !testingNeg5.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 1019760, 1030640), Interval("meeting", 829360, 833520), Interval("meeting", 421480, 461440), Interval("meeting", 999760, 1019720), Interval("meeting", 680, 5720), Interval("meeting", 814880, 829400), Interval("meeting", 1033880, 1034760), Interval("meeting", 559160, 564160), Interval("meeting", 1009840, 1077680), Interval("meeting", 917320, 919800), Interval("meeting", 840200, 841320), Interval("meeting", 61480, 101440), Interval("meeting", 1034720, 1102600), Interval("meeting", 684120, 724080), Interval("meeting", 764120, 785200), Interval("meeting", 101480, 141440), Interval("meeting", 833480, 840240), Interval("meeting", 381480, 421440), Interval("meeting", 221480, 261440), Interval("meeting", 841280, 867600), Interval("meeting", 724120, 764080), Interval("meeting", 507080, 509760), Interval("meeting", 919760, 939720), Interval("meeting", 1030720, 1033920), Interval("meeting", 261480, 301440), Interval("meeting", 813040, 814920), Interval("meeting", 301480, 341440), Interval("meeting", 341480, 381440), Interval("meeting", 939760, 959720), Interval("meeting", 604120, 644080), Interval("meeting", 867560, 868520), Interval("meeting", 959760, 979720), Interval("meeting", 27240, 61520), Interval("meeting", 181480, 221440), Interval("meeting", 141480, 181440), Interval("meeting", 868480, 917360), Interval("meeting", 24440, 27280), Interval("meeting", 786240, 813080), Interval("meeting", 644120, 684080))
}
case 6 =>
randomOrder match {
// Training set 6. All but meetPos6
case true => allPosIntervals.filter(x => x != meetPos6) ++ allNegIntervals.filter(z => !testingNeg6.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 786240, 813080), Interval("meeting", 507080, 509760), Interval("meeting", 840200, 841320), Interval("meeting", 833480, 840240), Interval("meeting", 829360, 833520), Interval("meeting", 785200, 786280), Interval("meeting", 917320, 919800), Interval("meeting", 261480, 301440), Interval("meeting", 341480, 381440), Interval("meeting", 27240, 61520), Interval("meeting", 939760, 959720), Interval("meeting", 724120, 764080), Interval("meeting", 461480, 501440), Interval("meeting", 1033880, 1034760), Interval("meeting", 301480, 341440), Interval("meeting", 221480, 261440), Interval("meeting", 764120, 785200), Interval("meeting", 61480, 101440), Interval("meeting", 1034720, 1102600), Interval("meeting", 999760, 1019720), Interval("meeting", 381480, 421440), Interval("meeting", 841280, 867600), Interval("meeting", 509720, 559200), Interval("meeting", 141480, 181440), Interval("meeting", 564120, 604080), Interval("meeting", 680, 5720), Interval("meeting", 101480, 141440), Interval("meeting", 181480, 221440), Interval("meeting", 1019760, 1030640), Interval("meeting", 959760, 979720), Interval("meeting", 868480, 917360), Interval("meeting", 867560, 868520), Interval("meeting", 814880, 829400), Interval("meeting", 1030720, 1033920), Interval("meeting", 421480, 461440), Interval("meeting", 559160, 564160), Interval("meeting", 24440, 27280), Interval("meeting", 1009840, 1077680), Interval("meeting", 919760, 939720))
}
case 7 =>
randomOrder match {
// Training set 7. All but meetPos7
case true => allPosIntervals.filter(x => x != meetPos7) ++ allNegIntervals.filter(z => !testingNeg7.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 27240, 61520), Interval("meeting", 221480, 261440), Interval("meeting", 814880, 829400), Interval("meeting", 301480, 341440), Interval("meeting", 684120, 724080), Interval("meeting", 840200, 841320), Interval("meeting", 509720, 559200), Interval("meeting", 1033880, 1034760), Interval("meeting", 1034720, 1102600), Interval("meeting", 61480, 101440), Interval("meeting", 841280, 867600), Interval("meeting", 461480, 501440), Interval("meeting", 999760, 1019720), Interval("meeting", 507080, 509760), Interval("meeting", 381480, 421440), Interval("meeting", 559160, 564160), Interval("meeting", 785200, 786280), Interval("meeting", 181480, 221440), Interval("meeting", 141480, 181440), Interval("meeting", 261480, 301440), Interval("meeting", 1009840, 1077680), Interval("meeting", 813040, 814920), Interval("meeting", 341480, 381440), Interval("meeting", 644120, 684080), Interval("meeting", 101480, 141440), Interval("meeting", 919760, 939720), Interval("meeting", 421480, 461440), Interval("meeting", 1019760, 1030640), Interval("meeting", 917320, 919800), Interval("meeting", 24440, 27280), Interval("meeting", 564120, 604080), Interval("meeting", 867560, 868520), Interval("meeting", 604120, 644080), Interval("meeting", 959760, 979720), Interval("meeting", 833480, 840240), Interval("meeting", 1030720, 1033920), Interval("meeting", 680, 5720), Interval("meeting", 868480, 917360), Interval("meeting", 939760, 959720))
}
case 8 =>
randomOrder match {
// Training set 8. All but meetPos8
case true => allPosIntervals.filter(x => x != meetPos8) ++ allNegIntervals.filter(z => !testingNeg8.contains(z))
case _ => allPosIntervals.filter(x => x != meetPos8) ++ allNegIntervals.filter(z => !testingNeg8.contains(z))
}
case 9 =>
randomOrder match {
// Training set 9. All but meetPos9
case true => allPosIntervals.filter(x => x != meetPos9) ++ allNegIntervals.filter(z => !testingNeg9.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 341480, 381440), Interval("meeting", 813040, 814920), Interval("meeting", 786240, 813080), Interval("meeting", 917320, 919800), Interval("meeting", 1009840, 1077680), Interval("meeting", 724120, 764080), Interval("meeting", 101480, 141440), Interval("meeting", 840200, 841320), Interval("meeting", 1030720, 1033920), Interval("meeting", 604120, 644080), Interval("meeting", 814880, 829400), Interval("meeting", 461480, 501440), Interval("meeting", 27240, 61520), Interval("meeting", 559160, 564160), Interval("meeting", 1019760, 1030640), Interval("meeting", 24440, 27280), Interval("meeting", 1033880, 1034760), Interval("meeting", 381480, 421440), Interval("meeting", 141480, 181440), Interval("meeting", 841280, 867600), Interval("meeting", 61480, 101440), Interval("meeting", 680, 5720), Interval("meeting", 509720, 559200), Interval("meeting", 221480, 261440), Interval("meeting", 959760, 979720), Interval("meeting", 829360, 833520), Interval("meeting", 507080, 509760), Interval("meeting", 564120, 604080), Interval("meeting", 301480, 341440), Interval("meeting", 833480, 840240), Interval("meeting", 764120, 785200), Interval("meeting", 181480, 221440), Interval("meeting", 684120, 724080), Interval("meeting", 785200, 786280), Interval("meeting", 644120, 684080), Interval("meeting", 421480, 461440), Interval("meeting", 261480, 301440), Interval("meeting", 1034720, 1102600), Interval("meeting", 999760, 1019720))
}
case 10 =>
randomOrder match {
// Training set 10. All but meetPos10
case true => allPosIntervals.filter(x => x != meetPos10) ++ allNegIntervals.filter(z => !testingNeg10.contains(z))
case _ => List(Interval("meeting", 5680, 24480), Interval("meeting", 684120, 724080), Interval("meeting", 764120, 785200), Interval("meeting", 813040, 814920), Interval("meeting", 919760, 939720), Interval("meeting", 644120, 684080), Interval("meeting", 181480, 221440), Interval("meeting", 1034720, 1102600), Interval("meeting", 1033880, 1034760), Interval("meeting", 814880, 829400), Interval("meeting", 559160, 564160), Interval("meeting", 867560, 868520), Interval("meeting", 841280, 867600), Interval("meeting", 604120, 644080), Interval("meeting", 680, 5720), Interval("meeting", 868480, 917360), Interval("meeting", 786240, 813080), Interval("meeting", 221480, 261440), Interval("meeting", 509720, 559200), Interval("meeting", 564120, 604080), Interval("meeting", 785200, 786280), Interval("meeting", 261480, 301440), Interval("meeting", 939760, 959720), Interval("meeting", 724120, 764080), Interval("meeting", 61480, 101440), Interval("meeting", 840200, 841320), Interval("meeting", 341480, 381440), Interval("meeting", 461480, 501440), Interval("meeting", 829360, 833520), Interval("meeting", 381480, 421440), Interval("meeting", 507080, 509760), Interval("meeting", 141480, 181440), Interval("meeting", 24440, 27280), Interval("meeting", 27240, 61520), Interval("meeting", 421480, 461440), Interval("meeting", 833480, 840240), Interval("meeting", 101480, 141440), Interval("meeting", 301480, 341440))
}
case _ => throw new RuntimeException("No such training set exists (use 1..10).")
}
val testing = fold match {
case 1 => List(meetPos1) ++ List(meetPos11) ++ testingNeg1
case 2 => List(meetPos2) ++ testingNeg2
case 3 => List(meetPos3) ++ testingNeg3
case 4 => List(meetPos4) ++ testingNeg4
case 5 => List(meetPos5) ++ testingNeg5
case 6 => List(meetPos6) ++ testingNeg6
case 7 => List(meetPos7) ++ testingNeg7
case 8 => List(meetPos8) ++ testingNeg8
case 9 => List(meetPos9) ++ testingNeg9
case 10 => List(meetPos10) ++ testingNeg10
}
if (randomOrder) new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
else new DataAsIntervals(trainingSet = training, testingSet = testing)
}
val wholeCAVIARForManualRules = {
new DataAsIntervals(trainingSet = List(), testingSet = allPosIntervals ++ allNegIntervals)
}
val wholeCAVIAR1 = {
//new TrainingSet(trainingSet = List(allPosIntervals.head) ++ Random.shuffle(allPosIntervals++allNegIntervals), testingSet = allPosIntervals++allNegIntervals)
// that's the one used normally
new DataAsIntervals(trainingSet = List(allPosIntervals.head) ++ Random.shuffle(allPosIntervals ++ allNegIntervals), testingSet = List(Interval("meeting", 680, 1077680)))
Interval("meeting", 24440, 27320)
// that's for giving intervals explicitly to re-produce results. For instance, the one ordered as below should give a theory no example coverage at all
/*
new DataAsIntervals(trainingSet = List(meetPos1, meetNeg18, meetNeg14, meetNeg28, meetNeg17, meetPos7, meetNeg32,
meetNeg33, meetNeg20, meetNeg4, meetNeg21, meetNeg25, meetNeg23, meetNeg9, meetNeg19, meetNeg22, meetNeg7,
meetNeg15, meetPos9, meetNeg31, meetPos5, meetNeg2, meetPos8, meetNeg5, meetPos1, meetNeg3, meetPos4,
meetNeg11, meetNeg12, meetNeg29, meetNeg16, meetNeg6, meetNeg27, meetPos6, meetPos2, meetNeg1, meetNeg13,
meetNeg8, meetPos3, meetNeg30, meetNeg10, meetNeg24, meetPos10, meetNeg26
), testingSet = List(Interval("meeting",680,1077680)))
*/
}
}
| 24,377 | 103.626609 | 1,504 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_intervals/MeetingTrainingDistributed.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_intervals
import experiments.datautils.caviar_intervals.MeetingTrainingData._
import utils.DataUtils.{DataAsIntervals, Interval}
import scala.util.Random
/**
* Created by nkatz on 4/11/17.
*/
object MeetingTrainingDistributed {
def splitN[A](xs: List[A], n: Int) = {
val (quot, rem) = (xs.size / n, xs.size % n)
val (smaller, bigger) = xs.splitAt(xs.size - rem * (quot + 1))
smaller.grouped(quot) ++ bigger.grouped(quot + 1)
}
/* Split large positive intervals into smaller ones to evenly distribute the data */
def splitInterval(x: Interval, byN: Int) = {
splitN((x.startPoint to x.endPoint by 40).toList, byN).toList.map(z => Interval(x.HLE, z.head, z.tail.reverse.head))
}
def getData(testingPos: Interval, testingNegs: List[Interval], coresNum: Int) = {
/*
* meetPos1 (size=470) and meetPos1 (size=858) are large positive intervals.
* Therefore, if they are in the training set, we break them into smaller
* intervals in order to evenly distribute the data across nodes.
* */
///*
def isInTrainingSet(x: Interval) = x != testingPos
/*
var allowedPos = allPosIntervals.filter(x => x!= testingPos)
if (isInTrainingSet(meetPos1)) {
allowedPos = allowedPos.filter(x => x!= meetPos1)
// split meetPos1
val subIntervals = splitInterval(meetPos1, 4)
allowedPos = subIntervals ++ allowedPos
}
if (isInTrainingSet(meetPos2)) {
allowedPos = allowedPos.filter(x => x!= meetPos2)
// split meetPos1
val subIntervals = splitInterval(meetPos2, 8)
allowedPos = subIntervals ++ allowedPos
}
*/
//*/
val allowedPos = allPosIntervals.filter(x => x != testingPos)
val allowedNegs = allNegIntervals.filter(z => !testingNegs.contains(z))
val positives = splitN(allowedPos, coresNum).toList
val negatives = splitN(allowedNegs, coresNum).toList
if (positives.length != negatives.length) throw new RuntimeException("SOMETHING'S WRONG!")
val zipped = positives zip negatives
val testing = List(testingPos) ++ testingNegs
val out = zipped.map(x => new DataAsIntervals(trainingSet = List(x._1.head) ++ Random.shuffle(x._1.tail ++ x._2), testingSet = testing))
out
}
def main(args: Array[String]) = {
/*
* Just for testing-debugging
*
* */
///*
val eight = EightFoldSplit.meetTrainingSet1
val four = FourFoldSplit.meetTrainingSet1
val two = TwoFoldSplit.meetTrainingSet1
println(eight)
println("")
println(four)
println("")
println(two)
//*/
//println(splitInterval(meetPos1, 4))
//println(Interval("meeting",5720,24480) == meetPos1)
}
object TwoFoldSplit {
val cores = 2
val meetTrainingSet1 = getData(meetPos1, testingNeg1, cores)
val meetTrainingSet2 = getData(meetPos2, testingNeg2, cores)
val meetTrainingSet3 = getData(meetPos3, testingNeg3, cores)
val meetTrainingSet4 = getData(meetPos4, testingNeg4, cores)
val meetTrainingSet5 = getData(meetPos5, testingNeg5, cores)
val meetTrainingSet6 = getData(meetPos6, testingNeg6, cores)
val meetTrainingSet7 = getData(meetPos7, testingNeg7, cores)
val meetTrainingSet8 = getData(meetPos8, testingNeg8, cores)
val meetTrainingSet9 = getData(meetPos9, testingNeg9, cores)
val meetTrainingSet10 = getData(meetPos10, testingNeg10, cores)
}
object FourFoldSplit {
val cores = 4
val meetTrainingSet1 = getData(meetPos1, testingNeg1, cores)
val meetTrainingSet2 = getData(meetPos2, testingNeg2, cores)
val meetTrainingSet3 = getData(meetPos3, testingNeg3, cores)
val meetTrainingSet4 = getData(meetPos4, testingNeg4, cores)
val meetTrainingSet5 = getData(meetPos5, testingNeg5, cores)
val meetTrainingSet6 = getData(meetPos6, testingNeg6, cores)
val meetTrainingSet7 = getData(meetPos7, testingNeg7, cores)
val meetTrainingSet8 = getData(meetPos8, testingNeg8, cores)
val meetTrainingSet9 = getData(meetPos9, testingNeg9, cores)
val meetTrainingSet10 = getData(meetPos10, testingNeg10, cores)
}
object EightFoldSplit {
val cores = 8
val meetTrainingSet1 = getData(meetPos1, testingNeg1, cores)
val meetTrainingSet2 = getData(meetPos2, testingNeg2, cores)
val meetTrainingSet3 = getData(meetPos3, testingNeg3, cores)
val meetTrainingSet4 = getData(meetPos4, testingNeg4, cores)
val meetTrainingSet5 = getData(meetPos5, testingNeg5, cores)
val meetTrainingSet6 = getData(meetPos6, testingNeg6, cores)
val meetTrainingSet7 = getData(meetPos7, testingNeg7, cores)
val meetTrainingSet8 = getData(meetPos8, testingNeg8, cores)
val meetTrainingSet9 = getData(meetPos9, testingNeg9, cores)
val meetTrainingSet10 = getData(meetPos10, testingNeg10, cores)
}
}
| 5,527 | 34.435897 | 140 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_intervals/MovingCleanTrainingData.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_intervals
import utils.DataUtils.{DataAsIntervals, Interval}
import scala.util.Random
/**
* Created by nkatz on 4/18/16.
*/
object MovingCleanTrainingData {
val movePos1 = Interval("moving", 2520, 6880) //length: 110
val movePos2 = Interval("moving", 24440, 27080) //length: 67
val movePos3 = Interval("moving", 224040, 225880) //length: 47
val movePos4 = Interval("moving", 547400, 551680) //length: 108
val movePos5 = Interval("moving", 551640, 551960) //length: 9
val movePos6 = Interval("moving", 551920, 552600) //length: 18
val movePos7 = Interval("moving", 552920, 553040) //length: 4
val movePos8 = Interval("moving", 553080, 553880) //length: 21
val movePos9 = Interval("moving", 557240, 559120) //length: 48
val movePos10 = Interval("moving", 604240, 606720) //length: 63
val movePos11 = Interval("moving", 785080, 785480) //length: 11
val movePos12 = Interval("moving", 786360, 791880) //length: 139
val movePos13 = Interval("moving", 797120, 800440) //length: 84
val movePos14 = Interval("moving", 814840, 829480) //length: 367
val movePos15 = Interval("moving", 841120, 841240) //length: 4
val movePos16 = Interval("moving", 841240, 850120) //length: 223
val movePos17 = Interval("moving", 850200, 852520) //length: 59
val movePos18 = Interval("moving", 872000, 881640) //length: 242
val movePos19 = Interval("moving", 881640, 882680) //length: 27
val movePos20 = Interval("moving", 882840, 883360) //length: 14
val movePos21 = Interval("moving", 884160, 884280) //length: 4
val movePos22 = Interval("moving", 884240, 884560) //length: 9
val movePos23 = Interval("moving", 885480, 892360) //length: 173
val movePos24 = Interval("moving", 895480, 896240) //length: 20
val movePos25 = Interval("moving", 919920, 922560) //length: 67
val movePos26 = Interval("moving", 950600, 951680) //length: 28
val movePos27 = Interval("moving", 960840, 961160) // length: 9
val movePos28 = Interval("moving", 1010840, 1013720) //length: 73
val movePos29 = Interval("moving", 1037560, 1038600) //length: 27
val movePos30 = Interval("moving", 1045200, 1045320) //length: 4
val movePos31 = Interval("moving", 1071840, 1075240) //length: 86
val allPosIntervals = List(movePos1, movePos2, movePos3, movePos4, movePos5, movePos6, movePos7, movePos8, movePos9, movePos10, movePos11, movePos12, movePos13, movePos14,
movePos15, movePos16, movePos17, movePos18, movePos19, movePos20, movePos21, movePos22, movePos23, movePos24, movePos25, movePos26, movePos27, movePos28, movePos29,
movePos30, movePos31)
val testingPos1 = List(movePos1, movePos2, movePos3)
val testingPos2 = List(movePos4, movePos5, movePos6)
val testingPos3 = List(movePos7, movePos8, movePos9)
val testingPos4 = List(movePos10, movePos11, movePos12)
val testingPos5 = List(movePos13, movePos14, movePos15)
val testingPos6 = List(movePos16, movePos17, movePos18)
val testingPos7 = List(movePos19, movePos20, movePos21)
val testingPos8 = List(movePos22, movePos23, movePos24)
val testingPos9 = List(movePos25, movePos26, movePos27)
val testingPos10 = List(movePos28, movePos29, movePos30, movePos31)
val moveNeg1 = Interval("moving", 680, 2560) //length: 48
val moveNeg2 = Interval("moving", 6840, 24480) //length: 442
val moveNeg3 = Interval("moving", 27040, 224080) //length: 4927
val moveNeg4 = Interval("moving", 225840, 547440) //length: 8041
val moveNeg5 = Interval("moving", 551640, 551680) //length: 2
val moveNeg6 = Interval("moving", 551920, 551960) //length: 2
val moveNeg7 = Interval("moving", 552560, 552960) //length: 11
val moveNeg8 = Interval("moving", 553000, 553120) //length: 4
val moveNeg9 = Interval("moving", 553840, 557280) //length: 87
val moveNeg10 = Interval("moving", 559080, 604280) //length: 1131
val moveNeg11 = Interval("moving", 606680, 785120) //length: 4462
val moveNeg12 = Interval("moving", 785440, 786400) //length: 25
val moveNeg13 = Interval("moving", 791840, 797160) //length: 134
val moveNeg14 = Interval("moving", 800400, 814880) //length: 363
val moveNeg15 = Interval("moving", 829440, 841160) // length: 294
val moveNeg16 = Interval("moving", 841200, 841280) // length: 3
val moveNeg17 = Interval("moving", 850080, 850240) //length: 5
val moveNeg18 = Interval("moving", 852480, 872040) //length: 490
val moveNeg19 = Interval("moving", 881600, 881680) //length: 3
val moveNeg20 = Interval("moving", 882640, 882880) //length: 7
val moveNeg21 = Interval("moving", 883320, 884200) // length: 23
val moveNeg22 = Interval("moving", 884240, 884280) //length: 2
val moveNeg23 = Interval("moving", 884520, 885520) //length: 26
val moveNeg24 = Interval("moving", 892320, 895520) //length: 81
val moveNeg25 = Interval("moving", 896200, 919960) //length: 595
val moveNeg26 = Interval("moving", 922520, 950640) //length: 704
val moveNeg27 = Interval("moving", 951640, 960880) //length: 232
val moveNeg28 = Interval("moving", 961120, 1010880) //length: 1245
val moveNeg29 = Interval("moving", 1013680, 1037600) //length: 599
val moveNeg30 = Interval("moving", 1038560, 1045240) //length: 168
val moveNeg31 = Interval("moving", 1045280, 1071880) //length: 666
val moveNeg32 = Interval("moving", 1075200, 1077680) //length: 63
val allNegIntervals = List(moveNeg1, moveNeg2, moveNeg3, moveNeg4, moveNeg5, moveNeg6, moveNeg7, moveNeg8, moveNeg9, moveNeg10, moveNeg11, moveNeg12, moveNeg13,
moveNeg14, moveNeg15, moveNeg16, moveNeg17, moveNeg18, moveNeg19, moveNeg20, moveNeg21, moveNeg22, moveNeg23, moveNeg24, moveNeg25, moveNeg26, moveNeg27,
moveNeg28, moveNeg29, moveNeg30, moveNeg31, moveNeg32)
val testingNeg1 = List(moveNeg3)
val testingNeg2 = List(moveNeg4)
val testingNeg3 = List(moveNeg1, moveNeg2, moveNeg5, moveNeg6)
val testingNeg4 = List(moveNeg7, moveNeg8, moveNeg9, moveNeg18, moveNeg19, moveNeg20)
val testingNeg5 = List(moveNeg10)
val testingNeg6 = List(moveNeg11)
val testingNeg7 = List(moveNeg12, moveNeg13, moveNeg14, moveNeg15, moveNeg16, moveNeg17)
val testingNeg8 = List(moveNeg21, moveNeg22, moveNeg23, moveNeg24, moveNeg25)
val testingNeg9 = List(moveNeg26, moveNeg27, moveNeg29, moveNeg30, moveNeg32)
val testingNeg10 = List(moveNeg28, moveNeg31)
val moveTrainingSet1 = {
val training = allPosIntervals.filter(x => !testingPos1.contains(x)) ++ allNegIntervals.filter(z => !testingNeg1.contains(z))
val testing = testingPos1 ++ testingNeg1
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet2 = {
val training = allPosIntervals.filter(x => !testingPos2.contains(x)) ++ allNegIntervals.filter(z => !testingNeg2.contains(z))
val testing = testingPos2 ++ testingNeg2
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet3 = {
val training = allPosIntervals.filter(x => !testingPos3.contains(x)) ++ allNegIntervals.filter(z => !testingNeg3.contains(z))
val testing = testingPos3 ++ testingNeg3
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet4 = {
val training = allPosIntervals.filter(x => !testingPos4.contains(x)) ++ allNegIntervals.filter(z => !testingNeg4.contains(z))
val testing = testingPos4 ++ testingNeg4
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet5 = {
val training = allPosIntervals.filter(x => !testingPos5.contains(x)) ++ allNegIntervals.filter(z => !testingNeg5.contains(z))
val testing = testingPos5 ++ testingNeg5
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet6 = {
val training = allPosIntervals.filter(x => !testingPos6.contains(x)) ++ allNegIntervals.filter(z => !testingNeg6.contains(z))
val testing = testingPos6 ++ testingNeg6
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet7 = {
val training = allPosIntervals.filter(x => !testingPos7.contains(x)) ++ allNegIntervals.filter(z => !testingNeg7.contains(z))
val testing = testingPos7 ++ testingNeg7
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet8 = {
val training = allPosIntervals.filter(x => !testingPos8.contains(x)) ++ allNegIntervals.filter(z => !testingNeg8.contains(z))
val testing = testingPos8 ++ testingNeg8
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet9 = {
val training = allPosIntervals.filter(x => !testingPos9.contains(x)) ++ allNegIntervals.filter(z => !testingNeg9.contains(z))
val testing = testingPos9 ++ testingNeg9
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val moveTrainingSet10 = {
val training = allPosIntervals.filter(x => !testingPos10.contains(x)) ++ allNegIntervals.filter(z => !testingNeg10.contains(z))
val testing = testingPos10 ++ testingNeg10
new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
}
val allTrainingSets = List(moveTrainingSet1, moveTrainingSet2, moveTrainingSet3, moveTrainingSet4, moveTrainingSet5, moveTrainingSet6, moveTrainingSet7, moveTrainingSet8,
moveTrainingSet9, moveTrainingSet10)
}
| 10,550 | 53.953125 | 193 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_intervals/MovingTrainingData.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_intervals
import utils.DataUtils.{DataAsIntervals, Interval}
import scala.util.Random
/**
* Created by nkatz on 3/22/16.
*/
object MovingTrainingData {
/**
* To find the intervals call:
*
* val intervals = iled.utils.CaviarUtils.getPositiveNegativeIntervals("meeting")
* val positiveInervals = intervals._1
* val negativeIntervals = intervals._2
*
*
*/
val movePos1 = Interval("moving", 2480, 5760) //length: 83
val movePos2 = Interval("moving", 24400, 27320) // length: 74
val movePos3 = Interval("moving", 460600, 464160) // length: 90
val movePos4 = Interval("moving", 547360, 559240) // length: 298
val movePos5 = Interval("moving", 565760, 568120) // length: 60
val movePos6 = Interval("moving", 786200, 791880) // length: 143
val movePos7 = Interval("moving", 797080, 800440) // length: 85
val movePos8 = Interval("moving", 814840, 829440) // length: 366
val movePos9 = Interval("moving", 841240, 844760) // length: 89
val movePos10 = Interval("moving", 868440, 874320) // length: 148
val movePos11 = Interval("moving", 896880, 909480) // length: 316
val movePos12 = Interval("moving", 910360, 917400) // length: 177
// To break large intervals in smaller of 1000 data points use this (40 is the step):
// List.range(568080,786280,40).grouped(1000).map(x => (x.head,x.tail.reverse.head)) foreach println
val moveNeg1 = Interval("moving", 680, 2520) // 47
val moveNeg2 = Interval("moving", 5720, 24440) // 469
val moveNeg3 = Interval("moving", 27280, 67240) // 1000
val moveNeg4 = Interval("moving", 67280, 107240) //1000
val moveNeg5 = Interval("moving", 107280, 147240) //1000
val moveNeg6 = Interval("moving", 147280, 187240) //1000
val moveNeg7 = Interval("moving", 187280, 227240) //1000
val moveNeg8 = Interval("moving", 227280, 267240) //1000
val moveNeg9 = Interval("moving", 267280, 307240) //1000
val moveNeg10 = Interval("moving", 307280, 347240) //1000
val moveNeg11 = Interval("moving", 347280, 387240) //1000
val moveNeg12 = Interval("moving", 387280, 427240) //1000
val moveNeg13 = Interval("moving", 427280, 460600) //1000
val moveNeg14 = Interval("moving", 464120, 547400) //2083
val moveNeg15 = Interval("moving", 559200, 565800) //166
val moveNeg16 = Interval("moving", 568080, 608040) //1000
val moveNeg17 = Interval("moving", 608080, 648040) //1000
val moveNeg18 = Interval("moving", 648080, 688040) //1000
val moveNeg19 = Interval("moving", 688080, 728040) //1000
val moveNeg20 = Interval("moving", 728080, 768040) //1000
val moveNeg21 = Interval("moving", 768080, 786200) //1000
val moveNeg22 = Interval("moving", 791840, 797120) //133
val moveNeg23 = Interval("moving", 800400, 814880) //363
val moveNeg24 = Interval("moving", 829400, 841280) //298
val moveNeg25 = Interval("moving", 844720, 852680) //200
val moveNeg26 = Interval("moving", 852720, 860680) //200
val moveNeg27 = Interval("moving", 860720, 868440) //195
val moveNeg28 = Interval("moving", 874280, 882240) //200
val moveNeg29 = Interval("moving", 882280, 890240) //200
val moveNeg30 = Interval("moving", 890280, 896880) //167
val moveNeg31 = Interval("moving", 909440, 910400) //25
val moveNeg32 = Interval("moving", 917360, 957320) //1000
val moveNeg33 = Interval("moving", 957360, 997320) //1000
val moveNeg34 = Interval("moving", 997360, 1037320) //1000
val moveNeg35 = Interval("moving", 1037360, 1045320) //200
val moveNeg36 = Interval("moving", 1045360, 1053320) //200
val moveNeg37 = Interval("moving", 1053360, 1061320) //200
val moveNeg38 = Interval("moving", 1061360, 1069320) //200
val moveNeg39 = Interval("moving", 1069360, 1077280) //200
val allNegIntervals = List(moveNeg1, moveNeg2, moveNeg3, moveNeg4, moveNeg5, moveNeg6, moveNeg7, moveNeg8, moveNeg9,
moveNeg10, moveNeg11, moveNeg12, moveNeg13, moveNeg14, moveNeg15, moveNeg16, moveNeg17, moveNeg18, moveNeg19, moveNeg20,
moveNeg21, moveNeg22, moveNeg23, moveNeg24, moveNeg25, moveNeg26, moveNeg27,
moveNeg28, moveNeg29, moveNeg30, moveNeg31, moveNeg32, moveNeg33, moveNeg34, moveNeg35, moveNeg36, moveNeg37, moveNeg38, moveNeg39)
val allPosIntervals = List(movePos1, movePos2, movePos3, movePos4, movePos5, movePos6, movePos7,
movePos8, movePos9, movePos10, movePos11, movePos12)
val testingNeg1 = List(moveNeg1, moveNeg2, moveNeg3, moveNeg32)
val testingNeg2 = List(moveNeg4, moveNeg5, moveNeg6, moveNeg26, moveNeg33)
val testingNeg3 = List(moveNeg7, moveNeg8, moveNeg9, moveNeg27)
val testingNeg4 = List(moveNeg10, moveNeg11, moveNeg12, moveNeg28)
val testingNeg5 = List(moveNeg13, moveNeg14, moveNeg15, moveNeg35)
val testingNeg6 = List(moveNeg16, moveNeg17, moveNeg18, moveNeg36)
val testingNeg7 = List(moveNeg19, moveNeg20, moveNeg21, moveNeg37)
val testingNeg8 = List(moveNeg22, moveNeg23, moveNeg24, moveNeg38)
val testingNeg9 = List(moveNeg39, moveNeg29, moveNeg30, moveNeg31)
val testingNeg10 = List(moveNeg28, moveNeg29, moveNeg30, moveNeg34)
val allNegativeTestingSetIntervals = List(testingNeg1, testingNeg2, testingNeg3, testingNeg4, testingNeg5, testingNeg6, testingNeg7, testingNeg8, testingNeg9, testingNeg10)
def getMovingTrainingData(fold: Int, randomOrder: Boolean) = {
val training = fold match {
case 1 =>
randomOrder match {
// Training set 1. All but movePos1 & movePos12
//----------------------------------------------
case true => allPosIntervals.filter(x => x != movePos1 && x != movePos12) ++ allNegIntervals.filter(z => !testingNeg1.contains(z))
case _ =>
List(Interval("moving", 24400, 27320), Interval("moving", 267280, 307240), Interval("moving", 829400, 841280), Interval("moving", 547360, 559240), Interval("moving", 909440, 910400), Interval("moving", 997360, 1037320), Interval("moving", 786200, 791880), Interval("moving", 814840, 829440), Interval("moving", 460600, 464160), Interval("moving", 227280, 267240), Interval("moving", 896880, 909480), Interval("moving", 1045360, 1053320), Interval("moving", 464120, 547400), Interval("moving", 107280, 147240), Interval("moving", 565760, 568120), Interval("moving", 957360, 997320), Interval("moving", 882280, 890240), Interval("moving", 147280, 187240), Interval("moving", 387280, 427240), Interval("moving", 890280, 896880), Interval("moving", 559200, 565800), Interval("moving", 852720, 860680), Interval("moving", 648080, 688040), Interval("moving", 768080, 786200), Interval("moving", 800400, 814880), Interval("moving", 791840, 797120), Interval("moving", 187280, 227240), Interval("moving", 728080, 768040), Interval("moving", 1061360, 1069320), Interval("moving", 67280, 107240), Interval("moving", 608080, 648040), Interval("moving", 1069360, 1077280), Interval("moving", 1037360, 1045320), Interval("moving", 568080, 608040), Interval("moving", 874280, 882240), Interval("moving", 427280, 460600), Interval("moving", 868440, 874320), Interval("moving", 1053360, 1061320), Interval("moving", 860720, 868440), Interval("moving", 841240, 844760), Interval("moving", 797080, 800440), Interval("moving", 688080, 728040), Interval("moving", 844720, 852680), Interval("moving", 347280, 387240), Interval("moving", 307280, 347240))
}
case 2 =>
randomOrder match {
// Training set 2. All but movePos2
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos2) ++ allNegIntervals.filter(z => !testingNeg2.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 910360, 917400), Interval("moving", 227280, 267240), Interval("moving", 917360, 957320), Interval("moving", 791840, 797120), Interval("moving", 768080, 786200), Interval("moving", 5720, 24440), Interval("moving", 1069360, 1077280), Interval("moving", 464120, 547400), Interval("moving", 568080, 608040), Interval("moving", 844720, 852680), Interval("moving", 680, 2520), Interval("moving", 347280, 387240), Interval("moving", 559200, 565800), Interval("moving", 868440, 874320), Interval("moving", 307280, 347240), Interval("moving", 786200, 791880), Interval("moving", 997360, 1037320), Interval("moving", 896880, 909480), Interval("moving", 874280, 882240), Interval("moving", 1061360, 1069320), Interval("moving", 547360, 559240), Interval("moving", 829400, 841280), Interval("moving", 800400, 814880), Interval("moving", 427280, 460600), Interval("moving", 267280, 307240), Interval("moving", 882280, 890240), Interval("moving", 565760, 568120), Interval("moving", 27280, 67240), Interval("moving", 860720, 868440), Interval("moving", 1053360, 1061320), Interval("moving", 387280, 427240), Interval("moving", 187280, 227240), Interval("moving", 797080, 800440), Interval("moving", 460600, 464160), Interval("moving", 841240, 844760), Interval("moving", 648080, 688040), Interval("moving", 688080, 728040), Interval("moving", 890280, 896880), Interval("moving", 1037360, 1045320), Interval("moving", 1045360, 1053320), Interval("moving", 909440, 910400), Interval("moving", 814840, 829440), Interval("moving", 608080, 648040), Interval("moving", 728080, 768040))
}
case 3 =>
randomOrder match {
// Training set 3. All but movePos3
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos3) ++ allNegIntervals.filter(z => !testingNeg3.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 874280, 882240), Interval("moving", 559200, 565800), Interval("moving", 680, 2520), Interval("moving", 841240, 844760), Interval("moving", 387280, 427240), Interval("moving", 852720, 860680), Interval("moving", 347280, 387240), Interval("moving", 24400, 27320), Interval("moving", 307280, 347240), Interval("moving", 814840, 829440), Interval("moving", 5720, 24440), Interval("moving", 427280, 460600), Interval("moving", 565760, 568120), Interval("moving", 147280, 187240), Interval("moving", 1045360, 1053320), Interval("moving", 829400, 841280), Interval("moving", 1053360, 1061320), Interval("moving", 844720, 852680), Interval("moving", 1037360, 1045320), Interval("moving", 997360, 1037320), Interval("moving", 791840, 797120), Interval("moving", 1061360, 1069320), Interval("moving", 688080, 728040), Interval("moving", 868440, 874320), Interval("moving", 800400, 814880), Interval("moving", 882280, 890240), Interval("moving", 568080, 608040), Interval("moving", 917360, 957320), Interval("moving", 890280, 896880), Interval("moving", 107280, 147240), Interval("moving", 464120, 547400), Interval("moving", 547360, 559240), Interval("moving", 896880, 909480), Interval("moving", 957360, 997320), Interval("moving", 728080, 768040), Interval("moving", 909440, 910400), Interval("moving", 27280, 67240), Interval("moving", 608080, 648040), Interval("moving", 768080, 786200), Interval("moving", 1069360, 1077280), Interval("moving", 648080, 688040), Interval("moving", 786200, 791880), Interval("moving", 910360, 917400), Interval("moving", 67280, 107240), Interval("moving", 797080, 800440))
}
case 4 =>
randomOrder match {
// Training set 4. All but movePos4
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos4) ++ allNegIntervals.filter(z => !testingNeg4.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 910360, 917400), Interval("moving", 917360, 957320), Interval("moving", 267280, 307240), Interval("moving", 464120, 547400), Interval("moving", 565760, 568120), Interval("moving", 1069360, 1077280), Interval("moving", 800400, 814880), Interval("moving", 868440, 874320), Interval("moving", 797080, 800440), Interval("moving", 67280, 107240), Interval("moving", 187280, 227240), Interval("moving", 227280, 267240), Interval("moving", 841240, 844760), Interval("moving", 147280, 187240), Interval("moving", 786200, 791880), Interval("moving", 688080, 728040), Interval("moving", 997360, 1037320), Interval("moving", 460600, 464160), Interval("moving", 957360, 997320), Interval("moving", 1045360, 1053320), Interval("moving", 107280, 147240), Interval("moving", 814840, 829440), Interval("moving", 909440, 910400), Interval("moving", 24400, 27320), Interval("moving", 844720, 852680), Interval("moving", 27280, 67240), Interval("moving", 427280, 460600), Interval("moving", 559200, 565800), Interval("moving", 896880, 909480), Interval("moving", 791840, 797120), Interval("moving", 852720, 860680), Interval("moving", 5720, 24440), Interval("moving", 1061360, 1069320), Interval("moving", 890280, 896880), Interval("moving", 608080, 648040), Interval("moving", 860720, 868440), Interval("moving", 768080, 786200), Interval("moving", 648080, 688040), Interval("moving", 568080, 608040), Interval("moving", 680, 2520), Interval("moving", 829400, 841280), Interval("moving", 728080, 768040), Interval("moving", 882280, 890240), Interval("moving", 1037360, 1045320), Interval("moving", 1053360, 1061320))
}
case 5 =>
randomOrder match {
// Training set 5. All but movePos5
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos5) ++ allNegIntervals.filter(z => !testingNeg5.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 909440, 910400), Interval("moving", 1045360, 1053320), Interval("moving", 860720, 868440), Interval("moving", 814840, 829440), Interval("moving", 797080, 800440), Interval("moving", 844720, 852680), Interval("moving", 460600, 464160), Interval("moving", 387280, 427240), Interval("moving", 890280, 896880), Interval("moving", 1069360, 1077280), Interval("moving", 874280, 882240), Interval("moving", 267280, 307240), Interval("moving", 882280, 890240), Interval("moving", 910360, 917400), Interval("moving", 608080, 648040), Interval("moving", 917360, 957320), Interval("moving", 24400, 27320), Interval("moving", 5720, 24440), Interval("moving", 187280, 227240), Interval("moving", 786200, 791880), Interval("moving", 147280, 187240), Interval("moving", 227280, 267240), Interval("moving", 852720, 860680), Interval("moving", 648080, 688040), Interval("moving", 791840, 797120), Interval("moving", 107280, 147240), Interval("moving", 680, 2520), Interval("moving", 67280, 107240), Interval("moving", 688080, 728040), Interval("moving", 997360, 1037320), Interval("moving", 27280, 67240), Interval("moving", 829400, 841280), Interval("moving", 896880, 909480), Interval("moving", 868440, 874320), Interval("moving", 841240, 844760), Interval("moving", 347280, 387240), Interval("moving", 1061360, 1069320), Interval("moving", 1053360, 1061320), Interval("moving", 957360, 997320), Interval("moving", 768080, 786200), Interval("moving", 800400, 814880), Interval("moving", 568080, 608040), Interval("moving", 728080, 768040), Interval("moving", 307280, 347240), Interval("moving", 547360, 559240))
}
case 6 =>
randomOrder match {
// Training set 6. All but movePos6
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos6) ++ allNegIntervals.filter(z => !testingNeg6.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 5720, 24440), Interval("moving", 464120, 547400), Interval("moving", 844720, 852680), Interval("moving", 852720, 860680), Interval("moving", 882280, 890240), Interval("moving", 829400, 841280), Interval("moving", 680, 2520), Interval("moving", 860720, 868440), Interval("moving", 791840, 797120), Interval("moving", 460600, 464160), Interval("moving", 868440, 874320), Interval("moving", 347280, 387240), Interval("moving", 841240, 844760), Interval("moving", 227280, 267240), Interval("moving", 147280, 187240), Interval("moving", 728080, 768040), Interval("moving", 957360, 997320), Interval("moving", 559200, 565800), Interval("moving", 24400, 27320), Interval("moving", 1037360, 1045320), Interval("moving", 800400, 814880), Interval("moving", 874280, 882240), Interval("moving", 768080, 786200), Interval("moving", 909440, 910400), Interval("moving", 67280, 107240), Interval("moving", 910360, 917400), Interval("moving", 565760, 568120), Interval("moving", 1061360, 1069320), Interval("moving", 997360, 1037320), Interval("moving", 1069360, 1077280), Interval("moving", 387280, 427240), Interval("moving", 688080, 728040), Interval("moving", 896880, 909480), Interval("moving", 814840, 829440), Interval("moving", 547360, 559240), Interval("moving", 307280, 347240), Interval("moving", 267280, 307240), Interval("moving", 187280, 227240), Interval("moving", 1053360, 1061320), Interval("moving", 107280, 147240), Interval("moving", 797080, 800440), Interval("moving", 427280, 460600), Interval("moving", 27280, 67240), Interval("moving", 917360, 957320), Interval("moving", 890280, 896880))
}
case 7 =>
randomOrder match {
// Training set 7. All but movePos7
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos7) ++ allNegIntervals.filter(z => !testingNeg7.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 107280, 147240), Interval("moving", 547360, 559240), Interval("moving", 648080, 688040), Interval("moving", 427280, 460600), Interval("moving", 464120, 547400), Interval("moving", 559200, 565800), Interval("moving", 307280, 347240), Interval("moving", 860720, 868440), Interval("moving", 868440, 874320), Interval("moving", 568080, 608040), Interval("moving", 680, 2520), Interval("moving", 1069360, 1077280), Interval("moving", 909440, 910400), Interval("moving", 800400, 814880), Interval("moving", 565760, 568120), Interval("moving", 786200, 791880), Interval("moving", 814840, 829440), Interval("moving", 829400, 841280), Interval("moving", 267280, 307240), Interval("moving", 608080, 648040), Interval("moving", 890280, 896880), Interval("moving", 27280, 67240), Interval("moving", 844720, 852680), Interval("moving", 5720, 24440), Interval("moving", 1037360, 1045320), Interval("moving", 917360, 957320), Interval("moving", 147280, 187240), Interval("moving", 227280, 267240), Interval("moving", 852720, 860680), Interval("moving", 187280, 227240), Interval("moving", 841240, 844760), Interval("moving", 957360, 997320), Interval("moving", 882280, 890240), Interval("moving", 1045360, 1053320), Interval("moving", 997360, 1037320), Interval("moving", 874280, 882240), Interval("moving", 910360, 917400), Interval("moving", 896880, 909480), Interval("moving", 24400, 27320), Interval("moving", 67280, 107240), Interval("moving", 387280, 427240), Interval("moving", 460600, 464160), Interval("moving", 791840, 797120), Interval("moving", 1061360, 1069320), Interval("moving", 347280, 387240))
}
case 8 =>
randomOrder match {
// Training set 8. All but movePos8
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos8) ++ allNegIntervals.filter(z => !testingNeg8.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 797080, 800440), Interval("moving", 688080, 728040), Interval("moving", 648080, 688040), Interval("moving", 227280, 267240), Interval("moving", 957360, 997320), Interval("moving", 852720, 860680), Interval("moving", 147280, 187240), Interval("moving", 24400, 27320), Interval("moving", 27280, 67240), Interval("moving", 187280, 227240), Interval("moving", 786200, 791880), Interval("moving", 844720, 852680), Interval("moving", 5720, 24440), Interval("moving", 347280, 387240), Interval("moving", 568080, 608040), Interval("moving", 387280, 427240), Interval("moving", 307280, 347240), Interval("moving", 460600, 464160), Interval("moving", 267280, 307240), Interval("moving", 565760, 568120), Interval("moving", 1053360, 1061320), Interval("moving", 464120, 547400), Interval("moving", 874280, 882240), Interval("moving", 917360, 957320), Interval("moving", 841240, 844760), Interval("moving", 1069360, 1077280), Interval("moving", 547360, 559240), Interval("moving", 890280, 896880), Interval("moving", 882280, 890240), Interval("moving", 67280, 107240), Interval("moving", 868440, 874320), Interval("moving", 1045360, 1053320), Interval("moving", 896880, 909480), Interval("moving", 909440, 910400), Interval("moving", 1037360, 1045320), Interval("moving", 107280, 147240), Interval("moving", 728080, 768040), Interval("moving", 910360, 917400), Interval("moving", 860720, 868440), Interval("moving", 680, 2520), Interval("moving", 997360, 1037320), Interval("moving", 608080, 648040), Interval("moving", 559200, 565800), Interval("moving", 427280, 460600), Interval("moving", 768080, 786200))
}
case 9 =>
randomOrder match {
// Training set 9. All but movePos9 & movePos11
//----------------------------------------------
case true => allPosIntervals.filter(x => x != movePos9 && x != movePos11) ++ allNegIntervals.filter(z => !testingNeg9.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 547360, 559240), Interval("moving", 460600, 464160), Interval("moving", 107280, 147240), Interval("moving", 768080, 786200), Interval("moving", 917360, 957320), Interval("moving", 5720, 24440), Interval("moving", 24400, 27320), Interval("moving", 427280, 460600), Interval("moving", 1045360, 1053320), Interval("moving", 27280, 67240), Interval("moving", 791840, 797120), Interval("moving", 565760, 568120), Interval("moving", 648080, 688040), Interval("moving", 786200, 791880), Interval("moving", 1037360, 1045320), Interval("moving", 1061360, 1069320), Interval("moving", 829400, 841280), Interval("moving", 67280, 107240), Interval("moving", 147280, 187240), Interval("moving", 568080, 608040), Interval("moving", 852720, 860680), Interval("moving", 728080, 768040), Interval("moving", 307280, 347240), Interval("moving", 464120, 547400), Interval("moving", 347280, 387240), Interval("moving", 860720, 868440), Interval("moving", 868440, 874320), Interval("moving", 559200, 565800), Interval("moving", 910360, 917400), Interval("moving", 227280, 267240), Interval("moving", 797080, 800440), Interval("moving", 957360, 997320), Interval("moving", 387280, 427240), Interval("moving", 997360, 1037320), Interval("moving", 1053360, 1061320), Interval("moving", 688080, 728040), Interval("moving", 800400, 814880), Interval("moving", 608080, 648040), Interval("moving", 187280, 227240), Interval("moving", 267280, 307240), Interval("moving", 680, 2520), Interval("moving", 814840, 829440), Interval("moving", 874280, 882240))
}
case 10 =>
randomOrder match {
// Training set 10. All but movePos10
//----------------------------------
case true => allPosIntervals.filter(x => x != movePos10) ++ allNegIntervals.filter(z => !testingNeg10.contains(z))
case _ =>
List(Interval("moving", 2480, 5760), Interval("moving", 728080, 768040), Interval("moving", 427280, 460600), Interval("moving", 917360, 957320), Interval("moving", 347280, 387240), Interval("moving", 559200, 565800), Interval("moving", 680, 2520), Interval("moving", 829400, 841280), Interval("moving", 568080, 608040), Interval("moving", 267280, 307240), Interval("moving", 909440, 910400), Interval("moving", 814840, 829440), Interval("moving", 460600, 464160), Interval("moving", 860720, 868440), Interval("moving", 227280, 267240), Interval("moving", 910360, 917400), Interval("moving", 1037360, 1045320), Interval("moving", 688080, 728040), Interval("moving", 800400, 814880), Interval("moving", 768080, 786200), Interval("moving", 24400, 27320), Interval("moving", 608080, 648040), Interval("moving", 565760, 568120), Interval("moving", 1053360, 1061320), Interval("moving", 1061360, 1069320), Interval("moving", 797080, 800440), Interval("moving", 1069360, 1077280), Interval("moving", 896880, 909480), Interval("moving", 1045360, 1053320), Interval("moving", 786200, 791880), Interval("moving", 852720, 860680), Interval("moving", 27280, 67240), Interval("moving", 547360, 559240), Interval("moving", 387280, 427240), Interval("moving", 464120, 547400), Interval("moving", 107280, 147240), Interval("moving", 147280, 187240), Interval("moving", 957360, 997320), Interval("moving", 67280, 107240), Interval("moving", 187280, 227240), Interval("moving", 841240, 844760), Interval("moving", 5720, 24440), Interval("moving", 307280, 347240), Interval("moving", 844720, 852680), Interval("moving", 648080, 688040), Interval("moving", 791840, 797120))
}
case _ => throw new RuntimeException("No such training set exists (use 1..10).")
}
val testing = fold match {
case 1 => List(movePos1, movePos12) ++ testingNeg1
case 2 => List(movePos2) ++ testingNeg2
case 3 => List(movePos3) ++ testingNeg3
case 4 => List(movePos4) ++ testingNeg4
case 5 => List(movePos5) ++ testingNeg5
case 6 => List(movePos6) ++ testingNeg6
case 7 => List(movePos7) ++ testingNeg7
case 8 => List(movePos8) ++ testingNeg8
case 9 => List(movePos9, movePos11) ++ testingNeg9
case 10 => List(movePos10) ++ testingNeg10
}
if (randomOrder) new DataAsIntervals(trainingSet = List(training.head) ++ Random.shuffle(training.tail), testingSet = testing)
else new DataAsIntervals(trainingSet = training, testingSet = testing)
}
val wholeCAVIAR1 = {
new DataAsIntervals(trainingSet = List(allPosIntervals.head) ++ Random.shuffle(allPosIntervals ++ allNegIntervals), testingSet = allPosIntervals ++ allNegIntervals)
}
val wholeCAVIAR = {
new DataAsIntervals(trainingSet = List(), testingSet = allPosIntervals ++ allNegIntervals)
}
}
| 26,795 | 115.504348 | 1,666 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/caviar_intervals/MovingTrainingDataDistributed.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.caviar_intervals
/**
* Created by nkatz on 4/11/17.
*/
object MovingTrainingDataDistributed {
}
| 829 | 29.740741 | 72 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/datacron_5_2018_deliv/EventHandler.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.datacron_5_2018_deliv
object EventHandler {
//--------------------------------
// These are the low-level events:
//--------------------------------
// change_in_heading|1443694493|1443694493|245257000
// change_in_speed_start|1443890320|1443890320|259019000
// change_in_speed_end|1443916603|1443916603|311018500
// coord|1443686670|1443686670|228041600|-4.47298500000000043286|48.38163999999999731472
// entersArea|1443875806|1443875806|228017700|18515
// leavesArea|1443887789|1443887789|235113366|21501
// gap_start|1444316024|1444316024|246043000
// gap_end|1445063602|1445063602|269103970
// proximity|1445357304|1445354425|1445357304|true|227519920|227521960
// velocity|1443665064|1443665064|228374000|19.30468943370810563920|31.39999999999999857891|35.54927442329611153582
// slow_motion_start|1444262324|1444262324|228167900
// slow_motion_end|1444281397|1444281397|258088080
// stop_start|1444031990|1444031990|227705102
// stop_end|1444187303|1444187303|259019000
//----------------------------------------------------------------------------------------
// For: change_in_heading, change_in_speed_start, change_in_speed_end, gap_start, gap_end,
// slow_motion_start, slow_motion_end, stop_start, stop_end
// we have that:
// lle = split(0)
// time = split(1)
// vessel = split(3)
// For these the generated event instance should be:
// happensAt(lle(vessel),time) and
// HappensAt(lle_vessel, time) (for MLN)
//----------------------------------------------------------------------------------------
// mode is either "asp" or "mln"
def generateLLEInstances(line: String, mode: String) = {
// These have a different schema
val abnormalLLEs = Set[String]("coord", "entersArea", "leavesArea", "proximity", "velocity")
val split = line.split("\\|")
if (!abnormalLLEs.contains(split(0))) {
// These have a common simple schema:
// change_in_heading, change_in_speed_start, change_in_speed_end,
// gap_start, gap_end, slow_motion_start, slow_motion_end, stop_start, stop_end
val lle = split(0)
val time = split(2)
val vessel = split(1)
if ("mode" == "asp") s"happensAt($lle($vessel),$time)" else s"HappensAt(${lle.capitalize}_$vessel),$time)"
} else {
if (split(0) == "coord") {
//coord|1443686670|1443686670|228041600|-4.47298500000000043286|48.38163999999999731472
/*
val lle = split(0)
val time = split(1)
val vessel = split(3)
val lon = split(4)
val lat = split(5)
// Don't return nothing in the MLN case (can't handle the coords syntax)
if ("mode" == "asp") s"happensAt($lle($vessel,$lon,$lat),$time)" else ""
*/
// do nothing (we won't use coord).
} else if (split(0) == "entersArea" || split(0) == "leavesArea") {
//entersArea|1443875806|1443875806|228017700|18515
val lle = split(0)
val time = split(3)
val vessel = split(1)
val area = split(2)
if ("mode" == "asp") s"happensAt($lle($vessel,$area),$time)"
else s"HappensAt(${lle.capitalize}_${vessel}_$area,$time)"
} else if (split(0) == "velocity") {
// do nothing (we won't use velocity)
} else if (split(0) == "proximity") {
val vessel1 = split(1)
val vessel2 = split(2)
val time = split(3)
if ("mode" == "asp") s"happensAt(close($vessel1,$vessel1),$time)"
else s"HappensAt(Close_${vessel1}_$vessel1,$time)"
} else {
throw new RuntimeException(s"Unexpected event: $line")
}
}
}
}
| 4,353 | 40.466667 | 117 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/datacron_5_2018_deliv/IntervalHandler.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.datacron_5_2018_deliv
import java.io.{BufferedWriter, File, FileWriter}
import intervalTree.IntervalTree
import scala.collection.JavaConversions._
import data._
import scala.collection.mutable.ListBuffer
import scala.io.Source
/*
change_in_heading|1443694493|1443694493|245257000
change_in_speed_start|1443890320|1443890320|259019000
change_in_speed_end|1443916603|1443916603|311018500
coord|1443686670|1443686670|228041600|-4.47298500000000043286|48.38163999999999731472
entersArea|1443875806|1443875806|228017700|18515
leavesArea|1443887789|1443887789|235113366|21501
gap_start|1444316024|1444316024|246043000
gap_end|1445063602|1445063602|269103970
proximity|1445357304|1445354425|1445357304|true|227519920|227521960
velocity|1443665064|1443665064|228374000|19.30468943370810563920|31.39999999999999857891|35.54927442329611153582
slow_motion_start|1444262324|1444262324|228167900
slow_motion_end|1444281397|1444281397|258088080
stop_start|1444031990|1444031990|227705102
stop_end|1444187303|1444187303|259019000
*/
object Haversine {
import math._
val R = 6372.8 //radius in km
def haversine(lat1: Double, lon1: Double, lat2: Double, lon2: Double) = {
val dLat = (lat2 - lat1).toRadians
val dLon = (lon2 - lon1).toRadians
val a = pow(sin(dLat / 2), 2) + pow(sin(dLon / 2), 2) * cos(lat1.toRadians) * cos(lat2.toRadians)
val c = 2 * asin(sqrt(a))
R * c
}
def main(args: Array[String]): Unit = {
println(haversine(36.12, -86.67, 33.94, -118.40))
}
}
object IntervalHandler extends App {
val pathToHLEIntervals = "/home/nkatz/dev/Brest-data-5-5-2018/Brest-data/results_critical"
val pathToLLEs = "/home/nkatz/dev/Brest-data-5-5-2018/Brest-data/Datasets/dataset_RTEC_critical.csv"
println("Generating intervals tree...")
val intervalTree =
generateIntervalTree(
pathToHLEIntervals,
List("rendezVous", "stopped", "lowSpeed")
)
readDataIntoMiniBatches(pathToLLEs, 10, "rendezVous", "asp")
// mode is either "asp" or "mln"
/**
* Parses the input data into logical syntax and generates data mini-batches for training.
*
* A data batch is a chunk of input data of given size. Size is measured by temporal duration,
* so given batchSize = n, a mini-batch consists of input data with time stamps t to t+n.
*
*/
def readDataIntoMiniBatches(dataPath: String, batchSize: Int, targetEvent: String, mode: String) = {
val f = new File("/home/nkatz/dev/Brest-data-5-5-2018/rendezVous-batchsize-10")
val writeToFile = new BufferedWriter(new FileWriter(f))
val data = Source.fromFile(dataPath).getLines.filter(x =>
!x.startsWith("coord") && !x.startsWith("velocity") && !x.startsWith("entersArea") && !x.startsWith("leavesArea")
)
val currentBatch = new ListBuffer[String]
var timesAccum = scala.collection.mutable.SortedSet[Long]()
var llesAccum = scala.collection.mutable.SortedSet[String]()
var batchCount = 0
while (data.hasNext) {
val newLine = data.next()
val split = newLine.split("\\|")
println(split.mkString(" "))
val time = split.last
val lle = split.head
if (!timesAccum.contains(time)) timesAccum += time.toLong
if (!llesAccum.contains(lle)) llesAccum += lle
if (timesAccum.size <= batchSize) {
currentBatch += generateLLEInstances(newLine, mode)
} else {
batchCount += 1
val nexts = timesAccum.sliding(2).map(x => if (mode == "asp") s"next(${x.last},${x.head})" else s"next(${x.last},${x.head})")
val intervals = intervalTree.range(timesAccum.head, timesAccum.last)
val extras = timesAccum.flatMap{ timeStamp =>
val containedIn = intervals.filter(interval => interval._1 <= timeStamp && timeStamp <= interval._2)
containedIn.map(x => HLEIntervalToAtom(x._3, timeStamp.toString, targetEvent))
}
if (extras.nonEmpty) {
val stop = "stop"
}
for (x <- extras) currentBatch += x
for (x <- nexts) currentBatch += x
//writeToFile.write(currentBatch.filter(x => x != "None").mkString(" ")+"\n")
println(batchCount)
currentBatch.clear()
timesAccum.clear()
}
}
println(s"All batches: $batchCount")
println(s"LLEs: $llesAccum")
writeToFile.close()
}
// mode is either "asp" or "mln"
def generateLLEInstances(line: String, mode: String) = {
// These have a different schema
val abnormalLLEs = Set[String]("coord", "entersArea", "leavesArea", "proximity", "velocity")
val split = line.split("\\|")
if (!abnormalLLEs.contains(split(0))) {
// These have a common simple schema:
// change_in_heading, change_in_speed_start, change_in_speed_end,
// gap_start, gap_end, slow_motion_start, slow_motion_end, stop_start, stop_end
val lle = split(0)
val time = split(2)
val vessel = split(1)
if (mode == "asp") s"happensAt($lle($vessel),$time)" else s"HappensAt(${lle.capitalize}_$vessel),$time)"
} else {
if (split(0) == "coord") {
//coord|1443686670|1443686670|228041600|-4.47298500000000043286|48.38163999999999731472
/*
val lle = split(0)
val time = split(1)
val vessel = split(3)
val lon = split(4)
val lat = split(5)
// Don't return nothing in the MLN case (can't handle the coords syntax)
if ("mode" == "asp") s"happensAt($lle($vessel,$lon,$lat),$time)" else ""
*/
// do nothing (we won't use coord).
"None"
} else if (split(0) == "entersArea" || split(0) == "leavesArea") {
//entersArea|1443875806|1443875806|228017700|18515
val lle = split(0)
val time = split(3)
val vessel = split(1)
val area = split(2)
if (mode == "asp") s"happensAt($lle($vessel,$area),$time)"
else s"HappensAt(${lle.capitalize}_${vessel}_$area,$time)"
} else if (split(0) == "velocity") {
// do nothing (we won't use velocity)
"None"
} else if (split(0) == "proximity") {
val vessel1 = split(1)
val vessel2 = split(2)
val time = split(3)
if (mode == "asp") s"happensAt(close($vessel1,$vessel2),$time)"
else s"HappensAt(Close_${vessel1}_$vessel2,$time)"
} else {
throw new RuntimeException(s"Unexpected event: $line")
}
}
}
object HLEInterval {
def apply(hleLine: String) = {
val split = hleLine.split("\\|")
val hle = split(0)
// rendezVous, tugging
if (Set("adrift", "aground", "atAnchor", "atAnchorOrMoored", "gap",
"highSpeedNearCoast", "loitering", "lowSpeed", "maa", "moored", "speedGrThanMax",
"speedLessThanMin", "stopped", "travelSpeed", "trawling", "trawlSpeed", "underWay", "unusualSpeed").contains(hle)) {
val vessels = List(split(1))
val value = split(2)
val stime = split(3).toLong
val etime = split(4).toLong
new HLEInterval(hle, vessels, value, stime, etime)
} else if (hle == "rendezVous" || hle == "tugging") {
val vessels = List(split(1), split(2))
val value = split(3)
val stime = split(4).toLong
val etime = split(5).toLong
new HLEInterval(hle, vessels, value, stime, etime)
} else if (hle == "withinArea") {
//withinArea|923166|fishing|true|1448977130|1448977242
val vessels = List(split(1))
val value = split(2)
val stime = split(4).toLong
val etime = split(5).toLong
new HLEInterval(hle, vessels, value, stime, etime)
} else throw new RuntimeException(s"Don't know what to do with $hleLine")
}
}
class HLEInterval(val hle: String, val vessels: List[String], val value: String, val stime: Long, val etime: Long)
/* Generate an HLE logical atom. The i var carries all the info, the t var is the particular
* time point of the generated atom. "target" is the name of the target complex event. The
* target event is turned into a holdsAt predicate, while all others are turned into happensAt predicates. */
def HLEIntervalToAtom(i: HLEInterval, t: String, target: String) = {
val functor = if (i.hle == target) "holdsAt" else "happensAt"
val fluentTerm =
if (i.value != "true") s"${i.hle}(${(i.vessels :+ i.value).mkString(",")})"
else s"${i.hle}(${i.vessels.mkString(",")})"
s"$functor($fluentTerm,$t)"
}
def generateIntervalTree(pathToHLEs: String, interestedIn: List[String]) = {
def getListOfFiles(dir: String): List[File] = {
val d = new File(dir)
if (d.exists && d.isDirectory) {
d.listFiles.filter(f => f.isFile && interestedIn.exists(eventName => f.getName.contains(eventName))).toList
} else {
List[File]()
}
}
val files = getListOfFiles(pathToHLEs).map(_.getCanonicalPath)
val intervalTree = new IntervalTree[HLEInterval]()
files foreach { file =>
println(s"Updating interval tree from $file")
val data = Source.fromFile(file).getLines
while (data.hasNext) {
val newLine = data.next()
val i = HLEInterval(newLine)
intervalTree.addInterval(i.stime, i.etime, i)
}
}
intervalTree
}
}
package object data {
implicit class ITree[T](val tree: IntervalTree[T]) {
def +=(from: Long, to: Long, data: T): Unit = tree.addInterval(from, to, data)
def range(from: Long, to: Long): List[(Long, Long, T)] = {
tree.getIntervals(from, to).toList.map { i =>
if (from < i.getStart && to > i.getStart) (i.getStart, to, i.getData)
else if (from >= i.getStart && to > i.getEnd) (from, i.getEnd, i.getData)
else (from, to, i.getData)
}
}
}
}
| 10,484 | 35.155172 | 133 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/CheckOutInfs.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data
import java.io.{File, PrintWriter}
import experiments.datautils.maritime_data.FinalVersionWithTimeFlattening.getAllTimes
import scala.collection.mutable.ListBuffer
import scala.io._
/**
* Created by nkatz on 6/27/17.
*/
object CheckOutInfs {
def main(args: Array[String]) = {
val path = args(0)
val hle = args(1)
// e.g:
// path = "/home/nkatz/dev/maritime/nkatz_brest/1-core/recognition/sailing.csv"
// hle = "sailing
handleInfs(path, hle)
}
def handleInfs(path: String, hle: String) = {
def getStartTime(splittedLine: Array[String], hle: String) = {
if (List("highSpeedIn", "withinArea", "rendezVouz").contains(hle)) {
splittedLine(4).toInt
} else {
splittedLine(3).toInt
}
}
def getVessel(splittedLine: Array[String]) = splittedLine(1)
val newPath = path.split("\\.")(0) + "-no-infs.csv"
utils.Utils.clearFile(newPath)
val pw = new PrintWriter(new File(newPath))
val times = getAllTimes()
val lastTimePoint = times.last
val infs = Source.fromFile(path).getLines.filter { x => x.split("\\|").last == "inf" }.toVector.distinct
val nonInfs = Source.fromFile(path).getLines.filter { x => x.split("\\|").last != "inf" }.toVector.distinct
val buffer = ListBuffer[String]()
nonInfs.foreach(x => pw.write(x + "\n"))
for (line <- infs) {
val splitted = line.split("\\|")
val vessel = getVessel(splitted)
val startTime = getStartTime(splitted, hle)
// Check if the inf closes at some point
val nonInfLines = Source.fromFile(path).getLines.filter { x => x.split("\\|").last != "inf" }.toList.distinct
val infClosingLine = nonInfLines.find { otherLine =>
val otherLineSplitted = otherLine.split("\\|")
val (v, t) = (getVessel(otherLineSplitted), getStartTime(otherLineSplitted, hle))
v == vessel && t == startTime
}.getOrElse("None")
if (infClosingLine == "None") {
// If it's not "None" th inf interval closes.
// We don't have to copy this inf line in this case, we just throw it away.
// But is it is "None", then the inf interval does not close, so we have to
// replace the 'inf' in the interval with the last time point in the data.
// The same line with the same inf interval may appear over and over again.
// We use a buffer to avoid re-writting it multiple times.
if (!buffer.contains(vessel)) {
val lineToReplace = splitted.take(splitted.length - 1).mkString("|") + s"|$lastTimePoint"
pw.write(lineToReplace + "\n")
println(vessel)
buffer += vessel
}
}
}
pw.close()
}
}
| 3,455 | 34.628866 | 115 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/ConvertTime.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data
import scala.io.Source
import java.io._
import com.mongodb.casbah.Imports.MongoCollection
import com.mongodb.casbah.commons.MongoDBObject
import experiments.datautils.maritime_data.Structures.{BDEntry, Entry, HLE, LLE}
import scala.collection.mutable.ListBuffer
/**
* Created by nkatz on 6/22/17.
*/
/**
*
* This is a (half-finished) version that tries to pre-process the data with in-memory maps
* THIS DOESN'T WORK (GOT AN OUT OF HEAP ERROR AFTER A LOOOONG TIME OF PROCESSING LLES)
*
*/
object ConvertTime {
// This stores the data with time as the key
var dbEntries = scala.collection.mutable.Map[Int, BDEntry]()
val path = "/home/nkatz/dev/maritime/nkatz_brest_0.6/1-core"
val datasetFile = path + "/dataset.txt"
val speedLimitsFile = path + "/static_data/all_areas/areas_speed_limits.csv"
val closeToPortsFile = new File(path + "/recognition/close_to_ports.csv") // this has a different schema than the other hles
val highSpeedFile = new File(path + "/recognition/highSpeedIn.csv")
val loiteringFile = new File(path + "/recognition/loitering.csv")
val lowSpeedFile = new File(path + "/recognition/lowSpeed.csv")
val sailingFile = new File(path + "/recognition/sailing.csv")
val stoppedFile = new File(path + "/recognition/stopped.csv")
val withinAreaFile = new File(path + "/recognition/withinArea.csv")
//val hleFiles = Vector(closeToPortsFile, highSpeedFile, loiteringFile, lowSpeedFile, sailingFile, stoppedFile, withinAreaFile)
val hleFiles = Vector(highSpeedFile)
def main(args: Array[String]) = {
val tt = utils.Utils.time {
println("Creating times map")
val times = getAllTimes()
// The map of unix times -> regular numbers
val timesMap = (getAllTimes() zip (0 to times.length)).toMap
println("Processing LLEs")
//val lles = Source.fromFile(datasetFile).getLines.filter(x => !x.contains("HoldsFor") && !x.contains("coord")).foreach(x => handleLLEs(x, timesMap))
val lles = Source.fromFile(datasetFile).getLines.filter(x => !x.contains("HoldsFor") && !x.contains("coord")).map(x => handleLLEs(x, timesMap))
println("Grouping lles")
val llesGrouped = lles.toList.groupBy(x => x.time)
llesGrouped.foreach(x => println(x))
//println("Processing High Speed")
//similarHLEstoPredicateForm(highSpeedFile.getCanonicalPath, timesMap)
}
println(tt._2)
}
def updateDBEntriesMap(entry: Entry) = {
entry match {
case x: LLE =>
if (dbEntries.contains(x.time)) {
val prev = dbEntries.getOrElse(x.time, throw new RuntimeException(s"Key ${x.time} not found in dbEntries map"))
val newEntry = BDEntry(x.time, (List() ++ prev.lles) :+ x.atom, prev.hles, (List() ++ prev.vessels) :+ x.vessel, (List() ++ prev.areas) :+ x.area)
dbEntries(x.time) = newEntry
} else {
dbEntries(x.time) = BDEntry(x.time, List(x.atom), Nil, List(x.vessel), List(x.area))
}
case x: HLE =>
if (dbEntries.contains(x.time)) {
val prev = dbEntries.getOrElse(x.time, throw new RuntimeException(s"Key ${x.time} not found in dbEntries map"))
val newEntry = BDEntry(x.time, prev.lles, (List() ++ prev.hles) :+ x.atom, List() ++ prev.vessels ++ x.vessels, (List() ++ prev.areas) :+ x.area)
dbEntries(x.time) = newEntry
println(dbEntries(x.time))
} else {
// If this happens something is wrong
throw new RuntimeException(s"Time key for atom ${x.atom} is missing from the entries map.")
}
case _ =>
}
}
def getAllTimes() = {
val lleTimes = Source.fromFile(datasetFile).getLines.map(x => getTimeLLEs(x)).filter(_ != "None").toVector.distinct
val hleTimes = hleFiles.foldLeft(Vector[String]()){ (x, y) =>
val data = Source.fromFile(y).getLines.flatMap(x => getTimeHLEs(x)).toVector.distinct
x ++ data
}
(lleTimes ++ hleTimes).distinct.map(_.toInt).sorted
}
/* Get the time stamp from an LLE line. Proximity is declared with HoldsFor
(and we don't need proximity), so we don't use it.*/
def getTimeLLEs(dataLine: String) = {
// "HoldsFor" is used for vessel proximity we won't use it now.
if (!dataLine.contains("HoldsFor")) {
val info = dataLine.split("HappensAt")(1)
val _info = info.split("\\]")
_info(1).trim
} else {
"None"
}
}
def getTimeHLEs(dataLine: String) = {
try {
if (dataLine.contains("not_near")) { // this is for close_to_ports that has a different schema
Vector(dataLine.split("\\|")(0))
} else if (dataLine.contains("highSpeedIn")) {
val split = dataLine.split("\\|")
Vector(split(4), split(5))
} else {
val split = dataLine.split("\\|")
Vector(split(3), split(4))
}
} catch {
case e: ArrayIndexOutOfBoundsException =>
println(dataLine)
Vector("0")
}
}
/*
* Handles: highSpeedIn withinArea that have the same schema
*
* */
def areaHLEsToPredicateForm(path: String, timesMap: Map[Int, Int]) = {
val reversedMap = timesMap.map{ case (k, v) => (v, k) }
val data = Source.fromFile(path).getLines
data.foldLeft(List[HLE]()) { (accum, x) =>
//println()
val s = x.split("\\|")
val startTime = timesMap(s(4).toInt)
val endTime = timesMap(s(5).toInt)
val hle = s(0)
val vessel = s(1)
val area = s(2)
var count = startTime
var accum_ = ListBuffer[Int]()
while (count <= endTime) {
if (reversedMap.contains(count)) {
accum_ += count
}
count += 1
}
val intermediateTimes = accum_
val intermediateHles = intermediateTimes.map(time => HLE(hle, s"""holdsAt($hle("$vessel","$area"),"$time")""", time, List(vessel), area))
accum ++ intermediateHles.toList
}
}
/*
*
* Handles: loitering, low-speed, sailing, stopped (they have the same schema)
*
*
* */
def similarHLEstoPredicateForm(path: String, timesMap: Map[Int, Int]) = {
val reversedMap = timesMap.map{ case (k, v) => (v, k) }
val data = Source.fromFile(path).getLines
data.foreach{ x =>
val s = x.split("\\|")
val startTime = timesMap(s(3).toInt)
val endTime = timesMap(s(4).toInt)
val hle = s(0)
val vessel = s(1)
var count = startTime
var accum_ = ListBuffer[Int]()
while (count <= endTime) {
if (reversedMap.contains(count)) {
accum_ += count
}
count += 1
}
val intermediateTimes = accum_
val intermediateHles = intermediateTimes.map(time => HLE(hle, s"""holdsAt($hle("$vessel"),"$time")""", time, List(vessel), "None"))
intermediateHles.foreach(x => updateDBEntriesMap(x))
}
}
/* Convert a data point to predicate form. This does not work with proximity for now.
* This returns the predicate itself, the vessel and the area (if an area is involved).
*
* */
def handleLLEs(x: String, timesMap: Map[Int, Int]) = {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = timesMap(_info(1).trim.toInt).toString
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
val entry = LLE(lle, predicate, time.toInt, vessel, area)
//updateDBEntriesMap(entry)
entry
}
}
| 10,365 | 37.250923 | 156 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/FinalVersionWithTimeFlattening.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data
import java.io.File
import scala.io.Source
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.Imports._
import com.typesafe.scalalogging.LazyLogging
import scala.collection.mutable.ListBuffer
/**
* Created by nkatz on 6/23/17.
*/
/**
* HERE WE TRY TO STORE EVERYTHING IN A MONGO DB AS IS. TRAINING EXAMPLES WILL
* BE GENERATED AT LEARNING TIME WITH PROPER QUERIES THAN FETCH THINGS WITH THE
* SAME TIME STAMP.
*
* PERHAPS THIS WILL TAKE A BIT MORE TIME THAN HAVING ALL PRE-PROCESSED AND INPLACE,
* BUT I GET OUT OF HEAP ERRORS WHEN I TRY TO PRE-PROCESS THE DATA WITH IN-MEMORY MAPS.
*
*/
object FinalVersionWithTimeFlattening extends LazyLogging {
val path = "/home/nkatz/dev/maritime/nkatz_brest/1-core"
private val datasetFile = path + "/dataset.txt"
private val speedLimitsFile = path + "/static_data/all_areas/areas_speed_limits.csv"
/*
private val closeToPortsFile = new File(path+"/recognition/close_to_ports.csv") // this has a different schema than the other hles
private val highSpeedFile = new File(path+"/recognition/highSpeedIn-no-infs.csv")
private val loiteringFile = new File(path+"/recognition/loitering-no-infs.csv")
private val lowSpeedFile = new File(path+"/recognition/lowSpeed-no-infs.csv")
private val sailingFile = new File(path+"/recognition/sailing-no-infs.csv")
private val stoppedFile = new File(path+"/recognition/stopped-no-infs.csv")
private val withinAreaFile = new File(path+"/recognition/withinArea-no-infs.csv")
private val rendezVousFile = new File(path+"/recognition/rendezVouz-no-infs.csv")
*/
private val closeToPortsFile = new File(path + "/recognition/close_to_ports.csv") // this has a different schema than the other hles
private val highSpeedFile = new File(path + "/recognition/highSpeedIn.csv")
private val loiteringFile = new File(path + "/recognition/loitering.csv")
private val lowSpeedFile = new File(path + "/recognition/lowSpeed.csv")
private val sailingFile = new File(path + "/recognition/sailing.csv")
private val stoppedFile = new File(path + "/recognition/stopped.csv")
private val withinAreaFile = new File(path + "/recognition/withinArea.csv")
private val rendezVousFile = new File(path + "/recognition/rendezVouz.csv")
/* withinArea IS MISSING ANNOTATION */
val hleFiles = Vector(highSpeedFile, loiteringFile, lowSpeedFile, sailingFile, stoppedFile, rendezVousFile)
//private val hleFiles = Vector(highSpeedFile)
def main(args: Array[String]) = {
val dbName = "maritime-brest"
val mongoClient = MongoClient()
//mongoClient.dropDatabase(dbName)
println("Creating times map")
val times = getAllTimes()
// The map of unix times -> regular numbers
//val timesMap = (getAllTimes() zip (0 to times.length)).toMap
// TEST: Lest's keep it the same
val timesMap = (times zip times).toMap
/*This takes too much time, just preserve the collection until you debug the piece of shit*/
//println("Processing LLEs")
//LLEsToMongo(mongoClient(dbName)("lles"))
println("Processing stopped")
val stoppedCol = mongoClient(dbName)("stopped")
stoppedCol.dropCollection()
similarHLEsToMongo(stoppedFile.getCanonicalPath, timesMap, stoppedCol)
println("Processing close to ports")
val portsCol = mongoClient(dbName)("ports")
portsCol.dropCollection()
portsToMongo(portsCol)
println("Processing proximity")
val proxCol = mongoClient(dbName)("proximity")
proxCol.dropCollection()
proximityToMongo(timesMap, proxCol)
println("Processing High Speed")
val highSpeedCol = mongoClient(dbName)("high_speed")
highSpeedCol.dropCollection()
AreaHLEsToMongo(highSpeedFile.getCanonicalPath, timesMap, highSpeedCol)
println("Processing sailing")
val sailingCol = mongoClient(dbName)("sailing")
sailingCol.dropCollection()
similarHLEsToMongo(sailingFile.getCanonicalPath, timesMap, sailingCol)
println("Processing low speed")
val lowSpeedCol = mongoClient(dbName)("low-speed")
lowSpeedCol.dropCollection()
similarHLEsToMongo(lowSpeedFile.getCanonicalPath, timesMap, lowSpeedCol)
println("Processing loitering")
val loiteringCol = mongoClient(dbName)("loitering")
loiteringCol.dropCollection()
similarHLEsToMongo(loiteringFile.getCanonicalPath, timesMap, loiteringCol)
println("Processing rendezvous")
val rendezvousCol = mongoClient(dbName)("rendezvous")
rendezvousCol.dropCollection()
rendezVousToMongo(rendezVousFile.getCanonicalPath, timesMap, rendezvousCol)
//println("Processing within area")
//AreaHLEsToMongo(withinAreaFile.getCanonicalPath, timesMap, mongoClient(dbName)("within_area"))
println("Processing speed limits")
speedLimitsToMongo(speedLimitsFile, mongoClient(dbName)("speed_limits"))
println("Done!")
}
private def portsToMongo(collection: MongoCollection) = {
val data = Source.fromFile(closeToPortsFile).getLines
data foreach { x =>
val s = x.split("\\|")
val time = s(0).toInt
val vessel = s(1)
val atom = s"""notCloseToPorts("$vessel","${time.toString}")"""
val entry = MongoDBObject("time" -> time) ++ ("atom" -> atom) ++ ("vessel" -> vessel)
collection.insert(entry)
}
}
def getAllTimes() = {
val lleTimes = Source.fromFile(datasetFile).getLines.map(x => getTimeLLEs(x)).filter(_ != "None").toVector.distinct
/*
val hleTimes = hleFiles.foldLeft(Vector[String]()){ (x, y) =>
val data = Source.fromFile(y).getLines.filter(x => !x.contains("inf")).flatMap(x => getTimeHLEs(x)).toVector.distinct
x ++ data
}
*/
val procimityTimes = getProximityTimes()
//(lleTimes ++ hleTimes ++ procimityTimes).distinct.map(_.toInt).sorted
(lleTimes ++ procimityTimes).distinct.map(_.toInt).sorted
}
/* Get the time stamp from an LLE line. Proximity is declared with HoldsFor
(and we don't need proximity), so we don't use it.*/
private def getTimeLLEs(dataLine: String) = {
// "HoldsFor" is used for vessel proximity we won't use it now.
if (!dataLine.contains("HoldsFor")) {
val info = dataLine.split("HappensAt")(1)
val _info = info.split("\\]")
_info(1).trim
} else {
"None"
}
}
private def getProximityTimes() = {
val lines = Source.fromFile(datasetFile).getLines.filter(x => x.contains("proximity"))
lines.foldLeft(Vector[String]()){ (accum, x) =>
val s = x.split("=")(0).split(" ")
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
val startTime = z(0)
val endTime = z(1)
accum ++ List(startTime, endTime)
}
}
private def getTimeHLEs(dataLine: String) = {
try {
if (dataLine.contains("not_near")) { // this is for close_to_ports that has a different schema
Vector(dataLine.split("\\|")(0))
} else if (dataLine.contains("highSpeedIn") || dataLine.contains("withinArea")) {
val split = dataLine.split("\\|")
Vector(split(4), split(5))
} else if (dataLine.contains("loitering") || dataLine.contains("sailing") || dataLine.contains("stopped") || dataLine.contains("lowSpeed")) {
val split = dataLine.split("\\|")
Vector(split(3), split(4))
} else { //rendezvous
val split = dataLine.split("\\|")
Vector(split(4), split(5))
}
} catch {
case e: ArrayIndexOutOfBoundsException =>
println(dataLine)
Vector("0")
}
}
/* Convert a data point to predicate form. This does not work with proximity for now.
* This returns the predicate itself, the vessel and the area (if an area is involved).
*
* */
def LLEsToMongo(collection: MongoCollection) = {
val data = Source.fromFile(datasetFile).getLines //.filter(x => !x.contains("HoldsFor") && !x.contains("coord"))
while (data.hasNext) {
val x = data.next()
if (!x.contains("HoldsFor") && !x.contains("coord")) {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = _info(1).trim.toInt
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
val entry = MongoDBObject("time" -> time.toInt) ++ ("lle" -> predicate) ++ ("hle" -> "None") ++ ("vessels" -> List(vessel)) ++ ("areas" -> List(area))
collection.insert(entry)
}
}
}
/*
* Handles: highSpeedIn, withinArea that have the same schema
*
* */
def AreaHLEsToMongo(path: String, map: Map[Int, Int], collection: MongoCollection) = {
val data = Source.fromFile(path).getLines.filter(x => !x.contains("inf"))
data.foreach { x =>
val s = x.split("\\|")
//val startTime = map(s(4).toInt)
//val endTime = map(s(5).toInt) - 1
val startTime = s(4).toInt
val endTime = s(5).toInt - 1
val hle = s(0)
val vessel = s(1)
val area = s(2)
val intermediate = List(startTime) ++ (for (x <- startTime to endTime if map.contains(x)) yield x).toList :+ endTime
val atom = s"""holdsAt($hle("$vessel","$area"),"ReplaceThisByActualTime")"""
val entry = MongoDBObject("interval" -> intermediate.toList) ++ ("hle" -> atom) ++ ("vessels" -> List(vessel)) ++ ("areas" -> List(area))
collection.insert(entry)
}
}
def similarHLEsToMongo(path: String, timesMap: Map[Int, Int], collection: MongoCollection) = {
val data = Source.fromFile(path).getLines.filter(x => !x.contains("inf"))
data foreach { x =>
val s = x.split("\\|")
//val startTime = timesMap(s(3).toInt)
//val endTime = timesMap(s(4).toInt) - 1
val startTime = s(3).toInt
val endTime = s(4).toInt - 1
val hle = s(0)
val vessel = s(1)
val intermediate = List(startTime) ++ (for (x <- startTime to endTime if timesMap.contains(x)) yield x).toList :+ endTime
val atom = s"""holdsAt($hle("$vessel"),"ReplaceThisByActualTime")"""
val entry = MongoDBObject("interval" -> intermediate) ++ ("hle" -> atom) ++ ("vessels" -> List(vessel))
try {
collection.insert(entry)
} catch {
case _: org.bson.BsonSerializationException => println(startTime)
}
}
}
def rendezVousToMongo(path: String, timesMap: Map[Int, Int], collection: MongoCollection) = {
val data = Source.fromFile(path).getLines.filter(x => !x.contains("inf"))
data foreach { x =>
val s = x.split("\\|")
//val startTime = timesMap(s(4).toInt)
//val endTime = timesMap(s(5).toInt) - 1
val startTime = s(4).toInt
val endTime = s(5).toInt - 1
val hle = s(0)
val vessel1 = s(1)
val vessel2 = s(0)
val intermediate = List(startTime) ++ (for (x <- startTime to endTime if timesMap.contains(x)) yield x).toList :+ endTime
val atom = s"""holdsAt($hle("$vessel1","$vessel2"),"ReplaceThisByActualTime")"""
val entry = MongoDBObject("interval" -> intermediate) ++ ("hle" -> atom) ++ ("vessels" -> List(vessel1, vessel2))
collection.insert(entry)
}
}
def proximityToMongo(timesMap: Map[Int, Int], collection: MongoCollection) = {
val lines = Source.fromFile(datasetFile).getLines.filter(x => x.contains("proximity"))
lines foreach { x =>
val s = x.split("=")(0).split(" ")
val vessel1 = s(2)
val vessel2 = s(3)
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
//val startTime = timesMap(z(0).toInt)
//val endTime = timesMap(z(1).toInt) - 1
val startTime = z(0).toInt
val endTime = z(1).toInt - 1
val intermediate = List(startTime) ++ (for (x <- startTime to endTime if timesMap.contains(x)) yield x).toList :+ endTime
val atom = s"""close("$vessel1","$vessel2"),"ReplaceThisByActualTime")"""
val entry = MongoDBObject("interval" -> intermediate) ++ ("hle" -> atom) ++ ("vessels" -> List(vessel1, vessel2))
collection.insert(entry)
}
}
def speedLimitsToMongo(path: String, collection: MongoCollection) = {
val data = Source.fromFile(path).getLines
data foreach { x =>
val s = x.split("\\|")
val area = s(0)
val limit = s(1)
val atom = s"""speedLimit("$area","$limit")"""
val entry = MongoDBObject("area" -> area) ++ ("limit" -> limit) ++ ("atom" -> atom)
collection.insert(entry)
}
}
}
| 15,556 | 40.485333 | 158 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/Structures.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data
import com.mongodb.{BasicDBList, BasicDBObject, DBObject}
import com.mongodb.casbah.Imports._
/**
* Created by nkatz on 6/23/17.
*/
object Structures {
trait Entry
/* An interpretation containing all info for a single time point. */
case class BDEntry(time: Int, lles: List[String], hles: List[String], vessels: List[String], areas: List[String]) extends Entry
case class LLE(lleName: String, atom: String, time: Int, vessel: String, area: String) extends Entry
/*In HLEs vessels are in list because randezvous (for example) involves two vessels*/
case class HLE(hleName: String, atom: String, time: Int, vessels: List[String], area: String) extends Entry
class MaritimeExample(val time: Int, val lle: String, val hle: String, val vessels: List[String], val areas: List[String])
object MaritimeExample {
def apply(o: DBObject) = {
val time = o.asInstanceOf[BasicDBObject].get("time").toString.toInt
val lle = o.asInstanceOf[BasicDBObject].get("lle").toString
val hle = o.asInstanceOf[BasicDBObject].get("hle").toString
val vessels = o.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(_.toString)
val areas = o.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(_.toString)
new MaritimeExample(time, lle, hle, vessels, areas)
}
}
}
| 2,105 | 38.735849 | 129 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/Tests.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data
import com.mongodb.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.mongodb.casbah.Imports._
/**
* Created by nkatz on 6/23/17.
*/
object Tests extends App {
val dbName = "maritime-brest"
val mongoClient = MongoClient()
val llesCollection = mongoClient(dbName)("lles")
llesCollection.createIndex(MongoDBObject("time" -> 1))
/*High speed*/
val highSpeedCollection = mongoClient(dbName)("high_speed")
//highSpeedCollection.createIndex(MongoDBObject("interval"))
/*Within area*/
val withinAreaCollection = mongoClient(dbName)("within-area")
//withinAreaCollection.createIndex(MongoDBObject("interval"))
/*Sailing*/
val sailingCollection = mongoClient(dbName)("sailing")
//sailingCollection.createIndex(MongoDBObject("interval"))
/*Stopped*/
val stoppedCollection = mongoClient(dbName)("stopped")
//stoppedCollection.createIndex(MongoDBObject("interval"))
/*loitering*/
val loiteringCollection = mongoClient(dbName)("loitering")
//loiteringCollection.createIndex(MongoDBObject("interval"))
/*rendezvous*/
val rendezvousCollection = mongoClient(dbName)("rendezvous")
//rendezvousCollection.createIndex(MongoDBObject("interval"))
/*Low speed*/
val lowSpeedCollection = mongoClient(dbName)("low-speed")
//lowSpeedCollection.createIndex(MongoDBObject("interval"))
val speedLimitsCollection = mongoClient(dbName)("speed_limits")
speedLimitsCollection.createIndex(MongoDBObject("area" -> 1))
val closeToPortsCollection = mongoClient(dbName)("ports")
speedLimitsCollection.createIndex(MongoDBObject("time" -> 1))
val proximityCollection = mongoClient(dbName)("proximity")
//proximityCollection.createIndex(MongoDBObject("interval"))
val hleCollections = List(highSpeedCollection, withinAreaCollection, sailingCollection,
stoppedCollection, loiteringCollection, rendezvousCollection, lowSpeedCollection, proximityCollection)
val writeToDB = mongoClient("high-speed-stand-alone-db")("examples")
mongoClient.dropDatabase("high-speed-stand-alone-db")
writeToDB.createIndex(MongoDBObject("time" -> 1))
getMaritimeDataInChunks(llesCollection, 5)
def getMaritimeDataInChunks(llesCollection: MongoCollection, chunkSize: Int) = {
val startTime = llesCollection.find().sort(MongoDBObject("time" -> 1)).map(x => x.asInstanceOf[BasicDBObject].get("time").toString.toInt).next()
val endTime = llesCollection.find().sort(MongoDBObject("time" -> -1)).map(x => x.asInstanceOf[BasicDBObject].get("time").toString.toInt).next()
(startTime to endTime).grouped(chunkSize) foreach { timeSlice =>
val first = timeSlice.head
val last = if (timeSlice.length > 1) timeSlice.tail.reverse.head else first
val lleResults = llesCollection.find("time" $gte first $lte last)
val (narrativeAtoms, notCloseToPortsAtoms, areas1) = lleResults.foldLeft(List[String](), List[String](), List[String]()) { (accum, o) =>
val a = getLLEInfo(o)
val (atom, portAtom, as) = (a._1, a._2, a._3)
(accum._1 :+ atom, accum._2 ++ portAtom, accum._3 ++ as)
}
val (annotAtoms, areas2) = timeSlice.toList.foldLeft(List[String](), List[String]()) { (accum, time) =>
val results = hleCollections.foldLeft(Iterator[DBObject]()) { (x, collection) =>
val t = collection.find(MongoDBObject("interval" -> time))
x ++ t
}
val (anotAtoms_, areas_) = results.foldLeft(List[String](), List[String]()) { (z, o) =>
val a = getHLEInfo(o, time)
val (atom, as) = (a._1, a._2)
(z._1 :+ atom, z._2 ++ as)
}
(accum._1 ++ anotAtoms_, accum._2 ++ areas_)
}
val areas = (areas1 ++ areas2).distinct.filter(_ != "None")
val speedLimitAtoms = areas.foldLeft(List[String]()){ (k, area) =>
val results = speedLimitsCollection.find(MongoDBObject("area" -> area))
val resToStrs = results.map (obj => obj.asInstanceOf[BasicDBObject].get("atom").toString)
k ++ resToStrs
}
println(s"$first-$last")
//------------------------------
// INSERT TO THE STAND-ALONE DB
//------------------------------
val entry = MongoDBObject("time" -> first) ++ ("annotation" -> annotAtoms) ++ ("narrative" -> (narrativeAtoms ++ speedLimitAtoms ++ notCloseToPortsAtoms))
writeToDB.insert(entry)
}
}
def getLLEInfo(o: DBObject) = {
val atom = o.asInstanceOf[BasicDBObject].get("lle").toString
//val vs = o.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(_.toString)
val time = o.asInstanceOf[BasicDBObject].get("time").toString.toInt
val r = closeToPortsCollection.find(MongoDBObject("time" -> time))
val notCloseToPortsAtoms = r.map(obj => obj.asInstanceOf[BasicDBObject].get("atom").toString)
val as = o.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(_.toString)
(atom, notCloseToPortsAtoms, as)
}
def getHLEInfo(o: DBObject, time: Int) = {
val atom = o.asInstanceOf[BasicDBObject].get("hle").toString.replaceAll("ReplaceThisByActualTime", time.toString)
//val vs = o.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(_.toString)
val as = o.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(_.toString)
//(atom, vs, as)
(atom, as)
}
/**
* Returns all documents from a collection, either sorted or not.
* Use it like getAllDataFromDb(col, "time") foreach { do something }, or e.g.
* getAllDataFromDb(mongoClient(dbName)("high_speed")).foreach(x => println(x))
*/
def getAllDataFromDb(collection: MongoCollection, sortByKeyName: String = ""): collection.CursorType = {
if (sortByKeyName != "") {
collection.createIndex(MongoDBObject(sortByKeyName -> 1))
collection.find().sort(MongoDBObject(sortByKeyName -> 1))
} else {
collection.find()
}
}
}
| 6,730 | 41.872611 | 160 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/first_version_doesnt_work/DataPreProcessing.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.first_version_doesnt_work
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.Example
import scala.io.Source
/**
* Created by nkatz on 24/4/2017.
*/
/**
*
* THIS IS THE OLD VERSION THAT DOES NOT FLATTEN TIME STAMPS.
* THERE IS AN ERROR IN SUBTRACTING 1 FROM TIMES, YOU DON'T HAVE TO DO THAT.
*
*
*
*
*/
object DataPreProcessing {
/*
* ----------------------------------------------------------------------
* HLEs are (closed-open). For example
*
* highSpeedIn|237029400|area159431200|true|1451654951|1451655191
*
* means that highSpeedIn holds in [1451654951, 1451655191),
* therefore it is initiated at 1451654950 and terminated at 1451655190.
*
* The above will be turned into:
*
* holdsAt(highSpeedIn("237029400", area159431200), "1451654951")
* holdsAt(highSpeedIn("237029400", area159431200), "1451655190")
*------------------------------------------------------------------------
*
* The schema for each LLE we need at this point follows:
*
* gap_start: HappensAt [gap_start 271043753] 1451802715
* coord: WE DON'T USE THIS FOR NOW
* velocity: HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
* change_in_speed_start: HappensAt [change_in_speed_start 237955000] 1451802743
* stop_start: HappensAt [stop_start 636013060] 1451802771
* change_in_heading: HappensAt [change_in_heading 240096000] 1451802787
* isInArea: HappensAt [isInArea 239471800 area300240700] 1451802848
* change_in_speed_end: HappensAt [change_in_speed_end 237144200] 1451802872
* slow_motion_start: HappensAt [slow_motion_start 240802000] 1451802892
* proximity: WE DON'T USE THIS FOR NOW
* stop_end: HappensAt [stop_end 356460000] 1451802924
* gap_end: HappensAt [gap_end 271043772] 1451802920
* leavesArea: HappensAt [leavesArea 239371500 area300674000] 1451802925
* slow_motion_end: HappensAt [slow_motion_end 271044099] 1451802937
*
* The schema for each HLE is as follows:
*
*
*
* */
case class LLE(lleName: String, atom: String, time: String, vessel: String, area: String)
def main(args: Array[String]) = {
val mongoClient = MongoClient()
val dbName = "Maritime-Aegean-whole"
val LLEcollection = mongoClient(dbName)("lles")
val trainingExamplesCollection = mongoClient(dbName)("examples")
val highSpeedCollection = mongoClient(dbName)("high_speed")
val loiteringCollection = mongoClient(dbName)("loitering")
val lowSpeedCollection = mongoClient(dbName)("low-speed")
val rendezvousCollection = mongoClient(dbName)("rendezvous")
val sailingCollection = mongoClient(dbName)("sailing")
val stoppedCollection = mongoClient(dbName)("stopped")
val withinAreaCollection = mongoClient(dbName)("within-area")
val notCloseToPortsCollection = mongoClient(dbName)("not-close-to-ports")
val speedLimitsCollections = mongoClient(dbName)("speed-limits")
///*
mongoClient.dropDatabase("Maritime-Aegean")
storeLLEsToMongo("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/dataset.txt", LLEcollection)
toMongo_areaHLEs("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/highSpeedIn.csv", highSpeedCollection) // high-speed
toMongo_areaHLEs("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/withinArea.csv", withinAreaCollection) // withinArea
toMongo_similarHLEs("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/loitering.csv", loiteringCollection) // loitering
toMongo_similarHLEs("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/lowSpeed.csv", lowSpeedCollection) // low-speed
toMongo_similarHLEs("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/sailing.csv", sailingCollection) // sailing
toMongo_similarHLEs("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/stopped.csv", stoppedCollection) // stopped
toMongo_rendezvous("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/rendezVouz.csv", rendezvousCollection) // rendezVouz
toMongo_notCloseToPorts("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/recognition/close_to_ports.csv", notCloseToPortsCollection)
toMongo_speedLimits("/home/nkatz/dev/maritime-data/maritime-aegean/core_1/static_data/all_areas/areas_speed_limits.csv", speedLimitsCollections)
//*/
/*
* No need to do this stupid stuff:
*
* findDoc(LLEcollection, List(highSpeedCollection, loiteringCollection, lowSpeedCollection, rendezvousCollection, sailingCollection, stoppedCollection, withinAreaCollection), notCloseToPortsCollection, speedLimitsCollections)
*
* that tries to pair LLEs and HLEs. Just get the data at learning time
*
* */
/*
findDoc(LLEcollection, List(highSpeedCollection, loiteringCollection, lowSpeedCollection, rendezvousCollection, sailingCollection, stoppedCollection, withinAreaCollection), notCloseToPortsCollection, speedLimitsCollections)
*/
/*
val exmpls = mongoClient(dbName)("examples")
exmpls.drop()
val iter = getMaritimeData("Maritime-Aegean-whole", "highSpeedIn", 10)
var i = 0
iter foreach { x =>
println(i)
val e = x.exmplWithInertia
val entry = MongoDBObject("time" -> e.time) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative)
exmpls.insert(entry)
i += 1
}
*/
}
def wholeDatasetToMongo(dbName: String) = {
}
def getMaritimeData(dbName: String, HLE: String, chunkSize: Int) = {
def getHLEs(cursor: hleCollection.CursorType, lleTime: Int, initiationPoint: Boolean) = {
cursor.foldLeft(List[String]()) { (atoms, hleDbObject) =>
val hle = hleDbObject.asInstanceOf[BasicDBObject].get("hle").toString
val atom = hle match {
case "highSpeedIn" | "withinArea" =>
val vessel = hleDbObject.asInstanceOf[BasicDBObject].get("vessel").toString
val area = hleDbObject.asInstanceOf[BasicDBObject].get("area").toString
if (!initiationPoint) s"""holdsAt($hle("$vessel","$area"),"$lleTime")""" else s"""holdsAt($hle("$vessel","$area"),"${lleTime + 1}")"""
case "loitering" | "lowSpeed" | "sailing" | "stopped" =>
val vessel = hleDbObject.asInstanceOf[BasicDBObject].get("vessel").toString
if (!initiationPoint) s"""holdsAt($hle("$vessel"),"$lleTime")""" else s"""holdsAt($hle("$vessel"),"${lleTime + 1}")"""
case "rendezVouz" =>
val v1 = hleDbObject.asInstanceOf[BasicDBObject].get("vessel1").toString
val v2 = hleDbObject.asInstanceOf[BasicDBObject].get("vessel2").toString
if (!initiationPoint) s"""holdsAt($hle("$v1","$v2"),"$lleTime")""" else s"""holdsAt($hle("$v1","$v2"),"${lleTime + 1}")"""
case _ => throw new RuntimeException(s"HLE name: $hle not found")
}
atoms :+ atom
}
}
val mc = MongoClient()
val lleCollection = mc(dbName)("lles")
val portsCollection = mc(dbName)("not-close-to-ports")
val speedLimitsCollection = mc(dbName)("speed-limits")
val hleCollection = HLE match {
case "highSpeedIn" => mc(dbName)("high_speed")
case "withinArea" => mc(dbName)("within-area")
case "loitering" => mc(dbName)("loitering")
case "lowSpeed" => mc(dbName)("low-speed")
case "sailing" => mc(dbName)("sailing")
case "stopped" => mc(dbName)("stopped")
case _ => throw new RuntimeException(s"Don't know this LLE: $HLE")
}
lleCollection.createIndex(MongoDBObject("time" -> 1))
val grouped = lleCollection.find().sort(MongoDBObject("time" -> 1)).grouped(chunkSize)
val chunked = grouped.map { docs =>
val (narrative, annotation) = docs.foldLeft(List[String](), List[String]()){ (accum, dbObject) =>
val (_narrative, _annotation) = (accum._1, accum._2)
val areas = dbObject.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val vessels = dbObject.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val lles = dbObject.asInstanceOf[BasicDBObject].get("atoms").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val currentTime = dbObject.asInstanceOf[BasicDBObject].get("time").toString
val vesselQueries = vessels.map(v => MongoDBObject("vessel" -> v) ++ ("time" -> currentTime))
val vs = vesselQueries flatMap (q => portsCollection.find(q))
val portsAtoms = vs.map{ x =>
val vessel = x.asInstanceOf[BasicDBObject].get("vessel").toString
s"""notCloseToPorts("$vessel","$currentTime")"""
}
val areaQueries = areas.map(a => MongoDBObject("area" -> a))
val as = areaQueries flatMap (q => speedLimitsCollection.find(q))
val speedLimitAtoms = as.map{ x =>
val area = x.asInstanceOf[BasicDBObject].get("area").toString
val speed = x.asInstanceOf[BasicDBObject].get("limit").toString
s"""speedLimit("$area","$speed")"""
}
val query1 = ("start_time" $lte currentTime.toInt) ++ ("end_time" $gte currentTime.toInt)
val query2 = "start_time" $eq currentTime.toInt + 1
val hledocs1 = hleCollection.find(query1)
val hledocs2 = hleCollection.find(query2)
val initiationPoints = getHLEs(hledocs2, currentTime.toInt, initiationPoint = true)
val medianPoints = getHLEs(hledocs1, currentTime.toInt, initiationPoint = false)
(_narrative ++ lles ++ portsAtoms ++ speedLimitAtoms, (_annotation ++ initiationPoints ++ medianPoints).distinct)
}
val mergedExmplTime = docs.head.asInstanceOf[BasicDBObject].get("time").toString
val _merged = new Example(annot = annotation, nar = narrative, _time = mergedExmplTime)
//new Exmpl(_id = _merged.time, exampleWithInertia = _merged)
_merged
}
chunked
}
/*
def findDoc(lleCollection: MongoCollection, hleCollections: List[MongoCollection],
portsCollection: MongoCollection, speedLimitsCollection: MongoCollection, DBToWriteTo: String) = {
// That's necessary to avoid the 32MB limit with in-memory sort.
// It also makes streaming the data from DBs much faster.
lleCollection.createIndex(MongoDBObject("time" -> 1))
//hleCollections.foreach(x => x.createIndex(MongoDBObject("time" -> 1))) // no "time" field here after all
lleCollection.find().sort(MongoDBObject("time" -> 1)).foreach { lledoc =>
var hles = List[String]()
val lleTime = lledoc.asInstanceOf[BasicDBObject].get("time").toString.toInt
hleCollections.foreach { hleColl =>
val query = ("start_time" $lte lleTime) ++ ("end_time" $gte lleTime)
val hledocs = hleColl.find(query)
if (hledocs.nonEmpty) {
for (doc <- hledocs) {
val hle = doc.asInstanceOf[BasicDBObject].get("hle").toString
hle match {
case "highSpeedIn" | "withinArea" =>
val vessel = doc.asInstanceOf[BasicDBObject].get("vessel").toString
val area = doc.asInstanceOf[BasicDBObject].get("area").toString
val atom = s"""holdsAt($hle("$vessel","$area"),$lleTime)"""
hles = hles :+ atom
case "loitering" | "lowSpeed" | "sailing" | "stopped" =>
val vessel = doc.asInstanceOf[BasicDBObject].get("vessel").toString
val atom = s"""holdsAt($hle("$vessel"),$lleTime)"""
hles = hles :+ atom
case "rendezVouz" =>
val v1 = doc.asInstanceOf[BasicDBObject].get("vessel1").toString
val v2 = doc.asInstanceOf[BasicDBObject].get("vessel2").toString
val atom = s"""holdsAt($hle("$v1","$v2"),$lleTime)"""
hles = hles :+ atom
case _ => throw new RuntimeException(s"HLE name: $hle not found")
}
}
}
}
// Get info for vessels in ports
val lledocVessels = lledoc.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val vesselQueries = lledocVessels.map(v => MongoDBObject("vessel" -> v) ++ ("time" -> lleTime.toString) )
val t = vesselQueries flatMap { q =>
portsCollection.find(q)
}
//println(getAtomsField(lledoc))
val portsAtoms = t.map{ x =>
val vessel = x.asInstanceOf[BasicDBObject].get("vessel").toString
s"""notCloseToPorts("$vessel","$lleTime")"""
}
// Get info on speed limits
val lledocAreas = lledoc.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val areaQueries = lledocAreas.map(a => MongoDBObject("area" -> a) )
val t1 = areaQueries flatMap { q =>
speedLimitsCollection.find(q)
}
val speedLimitAtoms = t1.map{ x =>
val area = x.asInstanceOf[BasicDBObject].get("area").toString
val speed = x.asInstanceOf[BasicDBObject].get("limit").toString
s"""speedLimit("$area","$speed")"""
}
val narrative = getAtomsField(lledoc) ++ portsAtoms ++ speedLimitAtoms
val annotation = hles
val entry = MongoDBObject("time" -> lleTime) ++ ("annotation" -> annotation) ++ ("narrative" -> narrative)
trainingExamplesCollection.insert(entry)
println(s"Inserted $lleTime")
}
}
*/
def getAtomsField(x: DBObject) = {
x.asInstanceOf[BasicDBObject].get("atoms").asInstanceOf[BasicDBList].toList.map(x => x.toString)
}
def toMongo_notCloseToPorts(path: String, collection: MongoCollection) = {
val info = Source.fromFile(path).getLines.toList.map{ x =>
val s = x.split("\\|")
val time = s(0)
val vessel = s(1)
(vessel, time)
}
info.foreach { x =>
val (vessel, time) = (x._1, x._2)
val entry = MongoDBObject("hle" -> "ports") ++ ("vessel" -> vessel) ++ ("time" -> time)
//println(s"Inserted $entry")
collection.insert(entry)
}
}
def toMongo_speedLimits(path: String, collection: MongoCollection) = {
val info = Source.fromFile(path).getLines.toList.map{ x =>
val s = x.split("\\|")
val area = s(0)
val speed = s(1)
(area, speed)
}
info.foreach { x =>
val (area, speed) = (x._1, x._2)
val entry = MongoDBObject("hle" -> "speed-limits") ++ ("area" -> area) ++ ("limit" -> speed)
//println(s"Inserted $entry")
collection.insert(entry)
}
}
/*
* Handles:
*
* highSpeedIn
* withinArea
*
* that have the same schema
* */
def toMongo_areaHLEs(path: String, collection: MongoCollection) = {
val hles = Source.fromFile(path).getLines.toList.map{ x =>
val s = x.split("\\|")
val startTime = s(4)
val endTime = (s(5).toInt - 1).toString
val hle = s(0)
val vessel = s(1)
val area = s(2)
(hle, vessel, area, startTime, endTime)
}
hles.foreach { hle =>
val (hleName, vessel, area, startTime, endTime) = (hle._1, hle._2, hle._3, hle._4, hle._5)
val entry = MongoDBObject("hle" -> hleName) ++ ("start_time" -> startTime.toInt) ++ ("end_time" -> endTime.toInt) ++ ("vessel" -> vessel) ++ ("area" -> area) ++
("text_description" -> "The predicate schema for this HLE is holdsAt(HLEName(Vessel,Area),Time). There are two HLEs that subscribe to this schema: highSpeedIn and withinArea.")
//println(s"Inserted $entry")
collection.insert(entry)
}
}
/* Handles the rendezVouz HLE */
def toMongo_rendezvous(rendezvousPath: String, collection: MongoCollection) = {
val hles = Source.fromFile(rendezvousPath).getLines.toList.map { x =>
val s = x.split("\\|")
val startTime = s(4)
val endTime = (s(5).toInt - 1).toString
val hle = s(0)
val vessel1 = s(1)
val vessel2 = s(2)
(hle, vessel1, vessel2, startTime, endTime)
}
hles.foreach { hle =>
val (hleName, vessel1, vessel2, startTime, endTime) = (hle._1, hle._2, hle._3, hle._4, hle._5)
val entry = MongoDBObject("hle" -> hleName) ++ ("start_time" -> startTime.toInt) ++ ("end_time" -> endTime.toInt) ++ ("vessel1" -> vessel1) ++ ("vessel2" -> vessel2) ++
("text_description" -> "The predicate schema for this HLE is holdsAt(rendezVouz(Vessel1,Vessel2),Time).")
//println(s"Inserted $entry")
collection.insert(entry)
}
}
/*loitering, low-speed, sailing, stopped have the same schema, so they are handled by the same method */
/*
*
* Handles:
*
* loitering
* low-speed
* sailing
* stopped
*
* that hace the same schema
*
* */
def toMongo_similarHLEs(path: String, collection: MongoCollection) = {
def hles = Source.fromFile(path).getLines.toList.map { x =>
val s = x.split("\\|")
val startTime = s(3)
val endTime = (s(4).toInt - 1).toString
val hle = s(0)
val vessel = s(1)
(hle, vessel, startTime, endTime)
}
hles.foreach { hle =>
val (hleName, vessel, startTime, endTime) = (hle._1, hle._2, hle._3, hle._4)
val entry = MongoDBObject("hle" -> hleName) ++ ("start_time" -> startTime.toInt) ++ ("end_time" -> endTime.toInt) ++ ("vessel" -> vessel) ++
("text_description" -> "The predicate schema for this HLE is holdsAt(HLEName(Vessel),Time). The HLEs that subscribe to this schema are: loitering, low-speed, sailing, stopped")
println(s"Inserted $entry")
collection.insert(entry)
}
}
def storeLLEsToMongo(datasetPath: String, LLEcollection: MongoCollection) = {
println("Processing LLES...")
val hles_raw = Source.fromFile(datasetPath).getLines.toList
// filter out proximity for now
val hles_raw_no_proximity = hles_raw.filter(x => !x.contains("HoldsFor") && !x.contains("coord"))
// Group the LLEs by time
val grouped = hles_raw_no_proximity.groupBy(x => getTime(x))
grouped foreach { case (k, v) =>
val llesPerTimePoint = v.map(z => LLEtoPredicateForm(z))
val vessels = llesPerTimePoint.map(x => x.vessel).distinct
val areas = llesPerTimePoint.map(x => x.area).distinct.filter(z => z != "None")
val atoms = llesPerTimePoint.map(x => x.atom)
val entry = MongoDBObject("time" -> k.toInt) ++ ("areas" -> areas) ++ ("vessels" -> vessels) ++ ("atoms" -> atoms)
LLEcollection.insert(entry)
}
}
/* Convert a data point to predicate form. This does not work with proximity for now.
* This returns the predicate itself, the vessel and the area (if an area is involved).
*
* */
def LLEtoPredicateForm(x: String) = {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = _info(1).trim
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
LLE(lle, predicate, time, vessel, area)
}
/* Get the time stamp from a line. */
def getTime(dataLine: String) = {
// "HoldsFor" is used for vessel proximity we won't use it now.
if (!dataLine.contains("HoldsFor")) {
val info = dataLine.split("HappensAt")(1)
val _info = info.split("\\]")
_info(1).trim
} else {
"None"
}
}
/* Utility function, returns the (distinct) LLE names from the LLE file.
* Run it like:
*
* val hles_raw = Source.fromFile(datasetPath).getLines.toList
* hles_raw.map(x => getLineInfo(x)).distinct foreach println
*
* */
def getLLENames(data: List[String]) = data.map(x => getLLE(x)).distinct foreach println
def getLLE(dataLine: String) = {
val info = if (dataLine.contains("HappensAt")) dataLine.split("HappensAt")(1) else dataLine.split("HoldsFor")(1)
val _info = info.split("\\]")
val time = _info(1).trim
val rest = _info(0).split("\\[")(1)
rest.split(" ")(0)
}
}
| 22,921 | 42.744275 | 229 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/first_version_doesnt_work/MaritimeDataToMongo.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.first_version_doesnt_work
/**
* Created by nkatz on 4/26/17.
*/
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import logic.Examples.Example
/**
*
* THIS IS THE OLD VERSION THAT WORKS WITH THE DataPreProcessing CODE.
* THERE IS AN ERROR IN SUBTRACTING 1 FROM TIMES, YOU DON'T HAVE TO DO THAT.
*
*
*
*
*/
object MaritimeDataToMongo {
def main(args: Array[String]) = {
storeMaritimeData_Whole()
//val mc = MongoClient()
//mc("Maritime-Aegean-All_HLEs-Joined")("examples").find().foreach(println)
}
def storeMaritimeData_Whole() = {
val mc = MongoClient()
val exmpls = getMaritimeData_Whole("Maritime-Aegean-whole", chunkSize = 1, mc)
val newCollection = mc("Maritime-Aegean-All_HLEs-Joined")("examples")
var i = 0
for (x <- exmpls) {
println(i)
val e = x
val entry = MongoDBObject("time" -> e.time) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative)
newCollection.insert(entry)
i += 1
}
}
/* Try to get all data at once, for all HLEs */
def getMaritimeData_Whole(readFromDB: String, chunkSize: Int, mc: MongoClient) = {
def getHLEs(cursor: Iterator[DBObject], lleTime: Int, initiationPoint: Boolean) = {
cursor.foldLeft(List[String]()) { (atoms, hleDbObject) =>
val hle = hleDbObject.asInstanceOf[BasicDBObject].get("hle").toString
val atom = hle match {
case "highSpeedIn" | "withinArea" =>
val vessel = hleDbObject.asInstanceOf[BasicDBObject].get("vessel").toString
val area = hleDbObject.asInstanceOf[BasicDBObject].get("area").toString
if (!initiationPoint) s"""holdsAt($hle("$vessel","$area"),"$lleTime")""" else s"""holdsAt($hle("$vessel","$area"),"${lleTime + 1}")"""
case "loitering" | "lowSpeed" | "sailing" | "stopped" =>
val vessel = hleDbObject.asInstanceOf[BasicDBObject].get("vessel").toString
if (!initiationPoint) s"""holdsAt($hle("$vessel"),"$lleTime")""" else s"""holdsAt($hle("$vessel"),"${lleTime + 1}")"""
case "rendezVouz" =>
val v1 = hleDbObject.asInstanceOf[BasicDBObject].get("vessel1").toString
val v2 = hleDbObject.asInstanceOf[BasicDBObject].get("vessel2").toString
if (!initiationPoint) s"""holdsAt($hle("$v1","$v2"),"$lleTime")""" else s"""holdsAt($hle("$v1","$v2"),"${lleTime + 1}")"""
case _ => throw new RuntimeException(s"HLE name: $hle not found")
}
atoms :+ atom
}
}
//val mc = MongoClient()
val lleCollection = mc(readFromDB)("lles")
val portsCollection = mc(readFromDB)("not-close-to-ports")
val speedLimitsCollection = mc(readFromDB)("speed-limits")
val hleCollections = List(mc(readFromDB)("high_speed"), mc(readFromDB)("within-area"),
mc(readFromDB)("loitering"), mc(readFromDB)("low-speed"), mc(readFromDB)("sailing"), mc(readFromDB)("stopped"))
/*
val hleCollection = HLE match {
case "highSpeedIn" => mc(dbName)("high_speed")
case "withinArea" => mc(dbName)("within-area")
case "loitering" => mc(dbName)("loitering")
case "lowSpeed" => mc(dbName)("low-speed")
case "sailing" => mc(dbName)("sailing")
case "stopped" => mc(dbName)("stopped")
case _ => throw new RuntimeException(s"Don't know this LLE: $HLE")
}
*/
lleCollection.createIndex(MongoDBObject("time" -> 1))
val grouped = lleCollection.find().sort(MongoDBObject("time" -> 1)).grouped(chunkSize)
val chunked = grouped.map { docs =>
val (narrative, annotation) = docs.foldLeft(List[String](), List[String]()){ (accum, dbObject) =>
val (_narrative, _annotation) = (accum._1, accum._2)
val areas = dbObject.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val vessels = dbObject.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val lles = dbObject.asInstanceOf[BasicDBObject].get("atoms").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val currentTime = dbObject.asInstanceOf[BasicDBObject].get("time").toString
val vesselQueries = vessels.map(v => MongoDBObject("vessel" -> v) ++ ("time" -> currentTime))
val vs = vesselQueries flatMap (q => portsCollection.find(q))
val portsAtoms = vs.map{ x =>
val vessel = x.asInstanceOf[BasicDBObject].get("vessel").toString
s"""notCloseToPorts("$vessel","$currentTime")"""
}
val areaQueries = areas.map(a => MongoDBObject("area" -> a))
val as = areaQueries flatMap (q => speedLimitsCollection.find(q))
val speedLimitAtoms = as.map{ x =>
val area = x.asInstanceOf[BasicDBObject].get("area").toString
val speed = x.asInstanceOf[BasicDBObject].get("limit").toString
s"""speedLimit("$area","$speed")"""
}
val query1 = ("start_time" $lte currentTime.toInt) ++ ("end_time" $gte currentTime.toInt)
val query2 = "start_time" $eq currentTime.toInt + 1
val hledocs1 = hleCollections.map(c => c.find(query1))
val hledocs2 = hleCollections.map(c => c.find(query2))
val initiationPoints = hledocs2.flatMap(x => getHLEs(x, currentTime.toInt, initiationPoint = true))
val medianPoints = hledocs1.flatMap(x => getHLEs(x, currentTime.toInt, initiationPoint = false))
(_narrative ++ lles ++ portsAtoms ++ speedLimitAtoms, (_annotation ++ initiationPoints ++ medianPoints).distinct)
}
val mergedExmplTime = docs.head.asInstanceOf[BasicDBObject].get("time").toString
val _merged = new Example(annot = annotation, nar = narrative, _time = mergedExmplTime)
//new Exmpl(_id = _merged.time, exampleWithInertia = _merged)
_merged
}
chunked
}
}
| 6,711 | 42.584416 | 148 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/new_attempt/MaritimeDataFetcher.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.new_attempt
import com.mongodb.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.Imports._
import logic.Examples.Example
import scala.collection.immutable.HashSet
/**
* Created by nkatz on 7/4/17.
*/
object MaritimeDataFetcher extends App {
val dbName = "maritime-brest"
val mongoClient = MongoClient()
private val llesCollection = mongoClient(dbName)("lles")
llesCollection.createIndex(MongoDBObject("time" -> 1))
private val highSpeedCollection = mongoClient(dbName)("highSpeedIn")
highSpeedCollection.createIndex(MongoDBObject("vessel" -> 1))
//private val withinAreaCollection = mongoClient(dbName)("withinArea")
//withinAreaCollection.createIndex(MongoDBObject("vessel"))
private val sailingCollection = mongoClient(dbName)("sailing")
sailingCollection.createIndex(MongoDBObject("vessel" -> 1))
private val stoppedCollection = mongoClient(dbName)("stopped")
stoppedCollection.createIndex(MongoDBObject("vessel" -> 1))
//private val loiteringCollection = mongoClient(dbName)("loitering")
//loiteringCollection.createIndex(MongoDBObject("vessel"-> 1))
private val rendezvousCollection = mongoClient(dbName)("rendezVous")
rendezvousCollection.createIndex(MongoDBObject("vessel" -> 1))
private val lowSpeedCollection = mongoClient(dbName)("lowSpeed")
lowSpeedCollection.createIndex(MongoDBObject("vessel" -> 1))
private val speedLimitsCollection = mongoClient(dbName)("speed_limits")
speedLimitsCollection.createIndex(MongoDBObject("vessel" -> 1))
private val closeToPortsCollection = mongoClient(dbName)("ports")
closeToPortsCollection.createIndex(MongoDBObject("time" -> 1))
private val proximityCollection = mongoClient(dbName)("proximity")
proximityCollection.createIndex(MongoDBObject("vessel" -> 1))
/*
val collectionsMap = Map("highSpeedIn" -> highSpeedCollection, "stopped" -> stoppedCollection,
"lowSpeed" -> lowSpeedCollection, "sailing" -> sailingCollection, "rendezVous" -> rendezvousCollection,
"loitering" -> loiteringCollection, "withinArea" -> withinAreaCollection)
*/
//val hleCollections = List(highSpeedCollection, stoppedCollection, lowSpeedCollection, sailingCollection, rendezvousCollection)
val hleCollections = List(highSpeedCollection)
val newCollection = mongoClient(dbName)("examples")
newCollection.dropCollection()
newCollection.createIndex(MongoDBObject("time" -> 1))
println("Creating times hash set")
val times = HashSet() ++ Runner.getAllTimes()
getData(times)
def getData(lleActualTimes: HashSet[Int]) = {
var counter = 0
lleActualTimes foreach { time =>
val lleResults = llesCollection.find(MongoDBObject("time" -> time))
val (narrativeAtoms, notCloseToPortsAtoms, areas_, vessels_) = lleResults.foldLeft(List[String](), List[String](), List[String](), List[String]()) { (accum, o) =>
val a = getLLEInfo(o)
val (atom, portAtom, as, vs) = (a._1, a._2, a._3, a._4)
(accum._1 :+ atom, accum._2 ++ portAtom, accum._3 ++ as, accum._4 ++ vs)
}
val vessels = vessels_.distinct
///*
val hleAtoms = hleCollections.foldLeft(List[String]()) { (x, collection) =>
val r = vessels flatMap { vessel =>
collection.find(MongoDBObject("vessel" -> vessel)).map(obj => obj.asInstanceOf[BasicDBObject].get("atom").toString)
}
x ++ r
}
//*/
//val hleAtoms = List[String]()
val areas = areas_.distinct.filter(_ != "None")
val speedLimitAtoms = areas.foldLeft(List[String]()){ (k, area) =>
val results = speedLimitsCollection.find(MongoDBObject("area" -> area))
val resToStrs = results.map (obj => obj.asInstanceOf[BasicDBObject].get("atom").toString)
k ++ resToStrs
}
val narrative = narrativeAtoms ++ speedLimitAtoms ++ notCloseToPortsAtoms
if (narrative.nonEmpty) {
val entry = MongoDBObject("annotation" -> hleAtoms) ++ ("narrative" -> narrative) ++ ("time" -> counter)
println(counter, time)
}
counter += 1
}
}
def getLLEInfo(o: DBObject) = {
val atom = o.asInstanceOf[BasicDBObject].get("lle").toString
val vs = o.asInstanceOf[BasicDBObject].get("vessels").asInstanceOf[BasicDBList].toList.map(_.toString)
val time = o.asInstanceOf[BasicDBObject].get("time").toString.toInt
val r = closeToPortsCollection.find(MongoDBObject("time" -> time))
val notCloseToPortsAtoms = r.map(obj => obj.asInstanceOf[BasicDBObject].get("atom").toString)
val as = o.asInstanceOf[BasicDBObject].get("areas").asInstanceOf[BasicDBList].toList.map(_.toString)
(atom, notCloseToPortsAtoms, as, vs)
}
}
| 5,436 | 37.288732 | 168 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/new_attempt/MaritimeToMongo.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.new_attempt
import akka.actor.{Actor, ActorSystem, PoisonPill, Props}
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.{MongoClient, MongoCollection}
import scala.io.Source
import scala.collection.immutable.HashSet
/**
* Created by nkatz on 7/4/17.
*/
object Runner {
def main(args: Array[String]) = {
println("Creating times hash set")
val times = HashSet() ++ getAllTimes()
val system = ActorSystem("MatitimePreprocessing")
system.actorOf(Props(new Master(times)), name = "Master") ! "go"
}
private val path = "/home/nkatz/dev/maritime/nkatz_brest/1-core"
private val datasetFile = path + "/dataset.txt" // The LLEs file
def getAllTimes() = {
val lleTimes = Source.fromFile(datasetFile).getLines.map(x => getTimeLLEs(x)).filter(_ != "None").toVector.distinct
val procimityTimes = getProximityTimes()
(lleTimes ++ procimityTimes).distinct.map(_.toInt).sorted
}
private def getTimeLLEs(dataLine: String) = {
// "HoldsFor" is used for vessel proximity we won't use it now.
if (!dataLine.contains("HoldsFor")) {
val info = dataLine.split("HappensAt")(1)
val _info = info.split("\\]")
_info(1).trim
} else {
"None"
}
}
private def getProximityTimes() = {
val lines = Source.fromFile(datasetFile).getLines.filter(x => x.contains("proximity"))
lines.foldLeft(Vector[String]()){ (accum, x) =>
val s = x.split("=")(0).split(" ")
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
val startTime = z(0)
val endTime = z(1)
accum ++ List(startTime, endTime)
}
}
}
class Master(times: HashSet[Int]) extends Actor {
val path = "/home/nkatz/dev/maritime/nkatz_brest/1-core"
private val datasetFile = path + "/dataset.txt" // The LLEs file
private val speedLimitsFile = path + "/static_data/all_areas/areas_speed_limits.csv"
private val closeToPortsFile = path + "/recognition/close_to_ports.csv" // this has a different schema than the other hles
private val highSpeedFile = path + "/recognition/highSpeedIn-no-infs.csv"
private val loiteringFile = path + "/recognition/loitering-no-infs.csv"
private val lowSpeedFile = path + "/recognition/lowSpeed-no-infs.csv"
private val sailingFile = path + "/recognition/sailing-no-infs.csv"
private val stoppedFile = path + "/recognition/stopped-no-infs.csv"
private val withinAreaFile = path + "/recognition/withinArea-no-infs.csv"
private val rendezVousFile = path + "/recognition/rendezVouz-no-infs.csv"
val dbName = "maritime-brest"
val mongoClient = MongoClient()
mongoClient.dropDatabase(dbName)
var counter = 9 // THIS MUST BE ADAPTED IF THE JOBS CHANGE
def receive = {
case "go" =>
println("Getting times")
// withinArea & loitering are missing
context.actorOf(Props(new LLEsToMongo(datasetFile, mongoClient(dbName)("lles"))), name = "lles-actor") ! "go"
context.actorOf(Props(new PortsToMongo(closeToPortsFile, mongoClient(dbName)("ports"))), name = "ports-actor") ! "go"
context.actorOf(Props(new ProximityToMongo(datasetFile, this.times, mongoClient(dbName)("proximity"))), name = "proximity-actor") ! "go"
context.actorOf(Props(new SpeedLimitsToMongo(speedLimitsFile, mongoClient(dbName)("speed_limits"))), name = "speed-limit-actor") ! "go"
context.actorOf(Props(new HighLevelEventToMongo(highSpeedFile, this.times, "highSpeedIn", mongoClient(dbName)("highSpeedIn"))), name = "high-speed-actor") ! "go"
context.actorOf(Props(new HighLevelEventToMongo(rendezVousFile, this.times, "rendezVous", mongoClient(dbName)("rendezVous"))), name = "loitering-actor") ! "go"
context.actorOf(Props(new HighLevelEventToMongo(lowSpeedFile, this.times, "lowSpeed", mongoClient(dbName)("lowSpeed"))), name = "low-speed-actor") ! "go"
context.actorOf(Props(new HighLevelEventToMongo(sailingFile, this.times, "sailing", mongoClient(dbName)("sailing"))), name = "sailing-actor") ! "go"
context.actorOf(Props(new HighLevelEventToMongo(stoppedFile, this.times, "stopped", mongoClient(dbName)("stopped"))), name = "stopped-actor") ! "go"
case "done" =>
this.counter -= 1
println(s"$counter jobs remaining")
if (this.counter == 0) {
context.system.terminate()
}
}
}
class HighLevelEventToMongo(val dataPath: String, val times: HashSet[Int], val hle: String, val collection: MongoCollection) extends Actor {
def receive = {
case "go" =>
println(s"Starting with ${this.hle}")
HLEsToMongo(this.dataPath, this.times, this.collection)
case "done" =>
println(s"Finished with ${this.hle}")
context.parent ! "done"
self ! PoisonPill
}
def HLEsToMongo(path: String, times: HashSet[Int], collection: MongoCollection) = {
//collection.createIndex(MongoDBObject("vessel"))
val data = Source.fromFile(path).getLines.filter(x => !x.contains("inf"))
this.hle match {
case "highSpeedIn" | "withinArea" =>
/*
def insertAll(startTime: Int, endTime: Int, vessel: String, area: String) = {
insert(startTime, vessel, area)
val interval = for (x <- startTime to endTime if this.times.contains(x)) yield x
interval foreach { time => insert(time, vessel, area) }
}
def insert(time: Int, vessel: String, area: String) = {
val atom = s"""holdsAt(${this.hle}("$vessel","$area"),"$time")"""
val entry = MongoDBObject("time" -> time) ++ ("atom" -> atom)
collection.insert(entry)
}
*/
data foreach { x =>
val s = x.split("\\|")
val startTime = s(4).toInt
val endTime = s(5).toInt - 1
val vessel = s(1)
val area = s(2)
//insertAll(startTime, endTime, vessel, area)
val atom = s"""holdsAt(${this.hle}("$vessel","$area"),interval("$startTime","$endTime"))"""
val entry = MongoDBObject("vessel" -> vessel) ++ ("atom" -> atom)
collection.insert(entry)
}
context.self ! "done"
case "loitering" | "stopped" | "lowSpeed" | "sailing" =>
/*
def insertAll(startTime: Int, endTime: Int, vessel: String) = {
insert(startTime, vessel)
val interval = for (x <- startTime to endTime if times.contains(x)) yield x
interval foreach { time => insert(time, vessel) }
}
def insert(time: Int, vessel: String) = {
val atom = s"""holdsAt(${this.hle}("$vessel"),"$time")"""
val entry = MongoDBObject("time" -> time) ++ ("atom" -> atom)
collection.insert(entry)
}
*/
data foreach { x =>
val s = x.split("\\|")
val startTime = s(3).toInt
val endTime = s(4).toInt - 1
val vessel = s(1)
//insertAll(startTime, endTime, vessel)
val atom = s"""holdsAt(${this.hle}("$vessel"),interval("$startTime","$endTime"))"""
val entry = MongoDBObject("vessel" -> vessel) ++ ("atom" -> atom)
collection.insert(entry)
}
context.self ! "done"
case "rendezVous" =>
/*
def insertAll(startTime: Int, endTime: Int, vessel1: String, vessel2: String) = {
insert(startTime, vessel1, vessel2)
val interval = for (x <- startTime to endTime if times.contains(x)) yield x
interval foreach { time => insert(time, vessel1, vessel2) }
}
def insert(time: Int, vessel1: String, vessel2: String) = {
val atom = s"""holdsAt(${this.hle}("$vessel1","$vessel2"),"$time")"""
val entry = MongoDBObject("time" -> time) ++ ("atom" -> atom)
collection.insert(entry)
}
*/
data foreach { x =>
val s = x.split("\\|")
val startTime = s(4).toInt
val endTime = s(5).toInt - 1
val vessel1 = s(1)
val vessel2 = s(0)
//insertAll(startTime, endTime, vessel1, vessel2)
val atom = s"""holdsAt(${this.hle}("$vessel1","$vessel2"),interval("$startTime","$endTime"))"""
// Insert the atom twice, once for each vessel, so that we can use both vessels as keys to
// fetch the proximity atom at learning time.
val entry1 = MongoDBObject("vessel" -> vessel1) ++ ("atom" -> atom)
val entry2 = MongoDBObject("vessel" -> vessel2) ++ ("atom" -> atom)
collection.insert(entry1)
collection.insert(entry2)
}
context.self ! "done"
}
}
}
class LLEsToMongo(val dataPath: String, val collection: MongoCollection) extends Actor {
def receive = {
case "go" =>
println("Starting with LLEs")
LLEsToMongo(collection)
case "done" =>
println("Finished with LLEs")
context.parent ! "done"
self ! PoisonPill
}
def LLEsToMongo(collection: MongoCollection) = {
//collection.createIndex(MongoDBObject("time") -> 1)
val data = Source.fromFile(dataPath).getLines //.filter(x => !x.contains("HoldsFor") && !x.contains("coord"))
while (data.hasNext) {
val x = data.next()
if (!x.contains("HoldsFor") && !x.contains("coord")) {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = _info(1).trim.toInt
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
val entry = MongoDBObject("time" -> time.toInt) ++ ("lle" -> predicate) ++ ("hle" -> "None") ++ ("vessels" -> List(vessel)) ++ ("areas" -> List(area))
collection.insert(entry)
}
}
context.self ! "done"
}
}
class SpeedLimitsToMongo(val path: String, val collection: MongoCollection) extends Actor {
def receive = {
case "go" =>
println("Starting with speed limits")
toMongo(this.path, this.collection)
case "done" =>
println("Finished with speed limits")
context.parent ! "done"
self ! PoisonPill
}
def toMongo(path: String, collection: MongoCollection) = {
//collection.createIndex(MongoDBObject("area"))
val data = Source.fromFile(path).getLines
data foreach { x =>
val s = x.split("\\|")
val area = s(0)
val limit = s(1)
val atom = s"""speedLimit("$area","$limit")"""
val entry = MongoDBObject("area" -> area) ++ ("atom" -> atom)
collection.insert(entry)
}
context.self ! "done"
}
}
class ProximityToMongo(val path: String, val times: HashSet[Int], val collection: MongoCollection) extends Actor {
def receive = {
case "go" =>
println("Starting with proximity")
toMongo(this.path, this.times, this.collection)
case "done" =>
println("Finished with proximity")
context.parent ! "done"
self ! PoisonPill
}
def toMongo(path: String, times: HashSet[Int], collection: MongoCollection) = {
//collection.createIndex(MongoDBObject("vessel"))
// "Unfolding" the annotation intervals generates tenths of GBs of data.
// No obvious way to handle that and no need to do so (hopefully...)
/*
def insertAll(startTime: Int, endTime: Int, vessel1: String, vessel2: String) = {
insert(startTime, vessel1, vessel2)
val interval = for (x <- startTime to endTime if times.contains(x)) yield x
interval foreach { time => insert(time, vessel1, vessel2) }
}
def insert(time: Int, vessel1: String, vessel2: String) = {
val atom = s"""close("$vessel1","$vessel2"),"$time")"""
val entry = MongoDBObject("time" -> time) ++ ("atom" -> atom)
collection.insert(entry)
}
*/
val lines = Source.fromFile(path).getLines.filter(x => x.contains("proximity"))
lines foreach { x =>
val s = x.split("=")(0).split(" ")
val vessel1 = s(2)
val vessel2 = s(3)
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
val startTime = z(0).toInt
val endTime = z(1).toInt - 1
val atom = s"""close("$vessel1","$vessel2",interval("$startTime","$endTime"))"""
// Insert the atom twice, once for each vessel, so that we can use both vessels as keys to
// fetch the proximity atom at learning time.
val entry1 = MongoDBObject("vessel" -> vessel1) ++ ("atom" -> atom)
val entry2 = MongoDBObject("vessel" -> vessel2) ++ ("atom" -> atom)
collection.insert(entry1)
collection.insert(entry2)
}
self ! "done"
}
}
class PortsToMongo(val path: String, val collection: MongoCollection) extends Actor {
def receive = {
case "go" =>
println("Starting with ports")
toMongo(this.path, this.collection)
case "done" =>
println("Finished with ports")
context.parent ! "done"
self ! PoisonPill
}
def toMongo(path: String, collection: MongoCollection) = {
//collection.createIndex(MongoDBObject("vessel"))
val data = Source.fromFile(path).getLines
data foreach { x =>
val s = x.split("\\|")
val time = s(0).toInt
val vessel = s(1)
val atom = s"""notCloseToPorts("$vessel","${time.toString}")"""
val entry = MongoDBObject("time" -> time) ++ ("atom" -> atom) ++ ("vessel" -> vessel)
collection.insert(entry)
}
context.self ! "done"
}
}
| 16,466 | 38.207143 | 167 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/new_attempt/Tests.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.new_attempt
import scala.collection.immutable.HashSet
import scala.io.Source
/**
* Created by nkatz on 7/4/17.
*/
object Tests extends App {
val path = "/home/nkatz/dev/maritime/nkatz_brest/1-core"
private val datasetFile = path + "/dataset.txt" // The LLEs file
private val stoppedFile = path + "/recognition/stopped.csv"
val times = HashSet() ++ Runner.getAllTimes()
println(times.size)
val data = Source.fromFile(stoppedFile).getLines.filter(x => !x.contains("inf"))
data foreach { x =>
val s = x.split("\\|")
val startTime = s(3).toInt
val endTime = s(4).toInt - 1
val vessel = s(1)
val interval = for (x <- startTime to endTime if times.contains(x)) yield x
val l = interval.length
if (l > 40000) {
println(l, vessel)
}
}
}
| 1,536 | 28.557692 | 82 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/maritime_data/yet_another_attempt/MaritimeToMongo.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.maritime_data.yet_another_attempt
import com.mongodb.casbah.Imports.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoCollection}
import com.mongodb.casbah.Imports._
import scala.io.Source
/**
* Created by nkatz on 7/4/17.
*/
object MaritimeToMongo {
val path = "/home/nkatz/dev/maritime/nkatz_brest/1-core"
private val datasetFile = path + "/dataset.txt" // The LLEs file
private val speedLimitsFile = path + "/static_data/all_areas/areas_speed_limits.csv"
private val closeToPortsFile = path + "/recognition/close_to_ports.csv" // this has a different schema than the other hles
private val highSpeedFile = path + "/recognition/highSpeedIn-no-infs.csv"
private val loiteringFile = path + "/recognition/loitering-no-infs.csv"
private val lowSpeedFile = path + "/recognition/lowSpeed-no-infs.csv"
private val sailingFile = path + "/recognition/sailing-no-infs.csv"
private val stoppedFile = path + "/recognition/stopped-no-infs.csv"
private val withinAreaFile = path + "/recognition/withinArea-no-infs.csv"
private val rendezVousFile = path + "/recognition/rendezVouz-no-infs.csv"
// The key is vessel
var HLEMap = scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]()
// The key is time
var portsMap = scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]()
// The key is vessel
var proximityMap = scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]()
// the key is area
var speedLimitsMap = scala.collection.mutable.Map[String, scala.collection.mutable.Set[String]]()
// The key is time
var LLEMap = scala.collection.mutable.Map[Int, (scala.collection.mutable.Set[String], scala.collection.mutable.Set[String], scala.collection.mutable.Set[String])]()
def main(args: Array[String]) = {
val dbName = "maritime-brest"
val mongoClient = MongoClient()
mongoClient(dbName).dropDatabase()
val newCollection = mongoClient(dbName)("examples")
newCollection.dropCollection()
newCollection.createIndex(MongoDBObject("time" -> 1))
populateHLEMap(highSpeedFile, "highSpeedIn")
populatePortsMap(closeToPortsFile)
populateProximityMap(datasetFile)
populateSpeedLimitsMap(speedLimitsFile)
populateLLEsMap(datasetFile)
joinDataInMongo(newCollection)
}
def joinDataInMongo(newCollection: MongoCollection) = {
var counter = 0
val times = LLEMap.keySet
times foreach { time =>
val record = LLEMap(time)
val (lleAtoms, vessels, areas) = (record._1, record._2, record._3)
val hleAtoms = vessels.flatMap { v =>
if (HLEMap.contains(v)) HLEMap(v)
else scala.collection.mutable.Set[String]()
}
val proximityAtoms = vessels.flatMap { v =>
if (proximityMap.contains(v)) proximityMap(v)
else scala.collection.mutable.Set[String]()
}
val closeToPortsAtoms = {
if (portsMap.contains(time.toString)) portsMap(time.toString)
else scala.collection.mutable.Set[String]()
}
val speedLimitAtoms = areas.flatMap { a =>
if (speedLimitsMap.contains(a)) speedLimitsMap(a)
else scala.collection.mutable.Set[String]()
}.filter(p => p != "None")
val narrative = lleAtoms ++ proximityAtoms ++ closeToPortsAtoms ++ speedLimitAtoms
val entry = MongoDBObject("annotation" -> hleAtoms) ++ ("narrative" -> narrative) ++ ("time" -> time.toInt)
newCollection.insert(entry)
counter += 1
println(counter)
}
}
def LLEsToMongo(dataPath: String, collection: MongoCollection) = {
println("Inserting LLEs to mongo")
collection.createIndex(MongoDBObject("time" -> 1))
val data = Source.fromFile(dataPath).getLines //.filter(x => !x.contains("HoldsFor") && !x.contains("coord"))
while (data.hasNext) {
val x = data.next()
if (!x.contains("HoldsFor") && !x.contains("coord")) {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = _info(1).trim.toInt
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
val entry = MongoDBObject("time" -> time.toInt) ++ ("predicate" -> predicate) ++ ("vessels" -> List(vessel)) ++ ("areas" -> List(area))
collection.insert(entry)
}
}
}
def populateHLEMap(dataPath: String, hle: String) = {
println(s"Getting $hle map")
handleHLEs(dataPath, hle)
}
/* CAN'T GET THIS INTO A MAP */
///*
def populateLLEsMap(dataPath: String) = {
println("Getting LLEs map")
var counter = 0
val data = Source.fromFile(dataPath).getLines
while (data.hasNext) {
val x = data.next()
if (!x.contains("HoldsFor") && !x.contains("coord")) {
var area = "None"
var predicate = "None"
val info = x.split("HappensAt")(1)
val _info = info.split("\\]")
val time = _info(1).trim.toInt
val rest = _info(0).split("\\[")(1)
val lle = rest.split(" ")(0)
val vessel = rest.split(" ")(1)
lle match {
case "gap_start" =>
//HappensAt [gap_start 271043753] 1451802715
predicate = s"""happensAt(gap_start("$vessel"),"$time")"""
case "velocity" =>
//HappensAt [velocity 240675000 0 270.00005134150797] 1451802711
//the 4th parameter in [velocity 240675000 0 270.00005134150797] is heading, which is not used anywhere
val speed = rest.split(" ")(2)
predicate = s"""happensAt(velocity("$vessel","$speed"),"$time")"""
case "change_in_speed_start" =>
//HappensAt [change_in_speed_start 237955000] 1451802743
predicate = s"""happensAt(change_in_speed_start("$vessel"),"$time")"""
case "stop_start" =>
//HappensAt [stop_start 636013060] 1451802771
predicate = s"""happensAt(stop_start("$vessel"),"$time")"""
case "change_in_heading" =>
//HappensAt [change_in_heading 240096000] 1451802787
predicate = s"""happensAt(change_in_heading("$vessel"),"$time")"""
case "isInArea" =>
//HappensAt [isInArea 239471800 area300240700] 1451802848
area = rest.split(" ")(2)
predicate = s"""happensAt(isInArea("$vessel", "$area"),"$time")"""
case "change_in_speed_end" =>
//HappensAt [change_in_speed_end 237144200] 1451802872
predicate = s"""happensAt(change_in_speed_end("$vessel"),"$time")"""
case "slow_motion_start" =>
//HappensAt [slow_motion_start 240802000] 1451802892
predicate = s"""happensAt(slow_motion_start("$vessel"),"$time")"""
case "stop_end" =>
//HappensAt [stop_end 356460000] 1451802924
predicate = s"""happensAt(stop_end("$vessel"),"$time")"""
case "gap_end" =>
//HappensAt [gap_end 271043772] 1451802920
predicate = s"""happensAt(gap_end("$vessel"),"$time")"""
case "leavesArea" =>
//HappensAt [leavesArea 239371500 area300674000] 1451802925
area = rest.split(" ")(2)
predicate = s"""happensAt(leavesArea("$vessel","$area"),"$time")"""
case "slow_motion_end" =>
predicate = s"""happensAt(slow_motion_end("$vessel"),"$time")"""
//HappensAt [slow_motion_end 271044099] 1451802937
}
if (LLEMap.contains(time)) {
val currentValue = LLEMap(time)
val updatedAtoms = scala.collection.mutable.Set[String]() ++= currentValue._1 += predicate
val updatedVessels = scala.collection.mutable.Set[String]() ++= currentValue._2 += vessel
val updatedAreas = scala.collection.mutable.Set[String]() ++= currentValue._3 += area
LLEMap(time) = (updatedAtoms, updatedVessels, updatedAreas)
} else {
LLEMap(time) = (scala.collection.mutable.Set(predicate), scala.collection.mutable.Set(vessel), scala.collection.mutable.Set(area))
}
}
counter += 1
//println(s"Grouping LLEs by time. Data point: $counter")
}
}
//*/
def populateSpeedLimitsMap(dataPath: String) = {
println("Getting speed limits map")
val data = Source.fromFile(dataPath).getLines
data foreach { x =>
val s = x.split("\\|")
val area = s(0)
val limit = s(1)
val atom = s"""speedLimit("$area","$limit")"""
if (speedLimitsMap.contains(area)) speedLimitsMap(area) = speedLimitsMap(area) += atom
else speedLimitsMap(area) = scala.collection.mutable.Set(atom)
}
}
def populateProximityMap(dataPath: String) = {
println("Getting proximity map")
val lines = Source.fromFile(dataPath).getLines.filter(x => x.contains("proximity"))
lines foreach { x =>
val s = x.split("=")(0).split(" ")
val vessel1 = s(2)
val vessel2 = s(3)
val z = x.split("=")(1).trim().split(" ")(1).split("\\(")(1).split("\\)")(0).split("-")
val startTime = z(0).toInt
val endTime = z(1).toInt - 1
val atom = s"""close("$vessel1","$vessel2",interval("$startTime","$endTime"))"""
if (proximityMap.contains(vessel1)) proximityMap(vessel1) = proximityMap(vessel1) += atom
else proximityMap(vessel1) = scala.collection.mutable.Set(atom)
if (proximityMap.contains(vessel2)) proximityMap(vessel2) = proximityMap(vessel2) += atom
else proximityMap(vessel2) = scala.collection.mutable.Set(atom)
}
}
def populatePortsMap(dataPath: String) = {
println("Getting close-to-ports map")
val data = Source.fromFile(dataPath).getLines
data foreach { x =>
val s = x.split("\\|")
val time = s(0)
val vessel = s(1)
val atom = s"""notCloseToPorts("$vessel","$time")"""
if (portsMap.contains(time)) portsMap(time) = portsMap(time) += atom
else portsMap(time) = scala.collection.mutable.Set(atom)
}
}
def handleHLEs(path: String, hle: String) = {
val data = Source.fromFile(path).getLines.filter(x => !x.contains("inf"))
hle match {
case "highSpeedIn" | "withinArea" =>
data foreach { x =>
//println()
val s = x.split("\\|")
val (startTime, endTime, vessel, area) = (s(4).toInt, s(5).toInt - 1, s(1), s(2))
val atom = s"""holdsAt($hle("$vessel","$area"),interval("$startTime","$endTime"))"""
if (HLEMap.contains(vessel)) HLEMap(vessel) = HLEMap(vessel) += atom
else HLEMap(vessel) = scala.collection.mutable.Set(atom)
}
case "loitering" | "stopped" | "lowSpeed" | "sailing" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel) = (s(3).toInt, s(4).toInt - 1, s(1))
val atom = s"""holdsAt($hle("$vessel"),interval("$startTime","$endTime"))"""
if (HLEMap.contains(vessel)) HLEMap(vessel) = HLEMap(vessel) += atom
else HLEMap(vessel) = scala.collection.mutable.Set(atom)
}
case "rendezVous" =>
data foreach { x =>
val s = x.split("\\|")
val (startTime, endTime, vessel1, vessel2) = (s(4).toInt, s(5).toInt - 1, s(1), s(0))
val atom = s"""holdsAt($hle("$vessel1","$vessel2"),interval("$startTime","$endTime"))"""
if (HLEMap.contains(vessel1)) HLEMap(vessel1) = HLEMap(vessel1) += atom
else HLEMap(vessel1) = scala.collection.mutable.Set(atom)
if (HLEMap.contains(vessel2)) HLEMap(vessel2) = HLEMap(vessel2) += atom
else HLEMap(vessel2) = scala.collection.mutable.Set(atom)
}
}
}
}
| 14,755 | 43.047761 | 166 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/openssh/DataHandling.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.openssh
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.Imports._
import scala.io.Source
object DataHandling extends App {
val lines = Source.fromFile("/home/nkatz/dev/ADL-datasets/openshs-classification/d1_1m_10tm.csv").getLines
// used for test for now
//val lines = Source.fromFile("/home/nkatz/dev/ADL-datasets/openshs-classification/d3_2m_10tm.csv").getLines
val format = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
def time(t: String) = format.parse(t).getTime / 1000
//Set(eat, sleep, personal, leisure, other)
//Set(bathroomLight, hallwayLight, kitchenLight, bedroomCarp, oven, tv, kitchenDoor, bedroomLight, livingCarp, bed, kitchenCarp, couch, mainDoorLock, wardrobe, mainDoor, bathroomCarp, fridge, bedTableLamp, bathroomDoor, bedroomDoor)
val header = lines.take(1).next().split(",")
println(header)
var activities = Set[String]()
var events = Set[String]()
val mongoClient = MongoClient()
val collection = mongoClient("openssh")("examples")
// used for test for now
//val collection = mongoClient("openssh-test")("examples")
collection.drop()
lines.drop(1) foreach { x =>
val split = x.split(",")
val z = (header zip split).filter(y => y._2 != "0").reverse
val first = z.take(2)
val last = z.drop(2)
val timeStamp = time(first.head._2)
val activity = first.tail.head._2
//if (!activities.contains(activity)) activities = activities + activity
val lles_happens = last.map(x => s"happensAt(${x._1},$timeStamp)")
val hle = s"holdsAt($activity,$timeStamp)"
val entry = MongoDBObject("time" -> timeStamp.toInt) ++ ("annotation" -> List(hle)) ++ ("narrative" -> lles_happens.toList)
println(entry)
collection.insert(entry)
/*
val lles = last.map(x => x._1)
lles foreach { lle =>
if (!events.contains(lle)) events = events + lle
}
*/
//println(timeStamp, activity, lles)
}
//println(activities)
//println(events)
}
| 2,755 | 33.45 | 234 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/opportunity/DataHandler.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.opportunity
import java.io.File
import scala.io.Source
object DataHandler extends App {
val labelsPath = "/home/nkatz/dev/OpportunityUCIDataset/dataset/label_legend.txt"
val dataFilePath = "/home/nkatz/dev/OpportunityUCIDataset/dataset/S1-ADL1.dat"
val person = new File(dataFilePath).getName.split("-")(0).toLowerCase
val events = List("0 none", "1 stand", "2 walk", "4 sit", "5 lie", "101 relaxing", "102 coffee_time", "103 early_morning", "104 cleanup", "105 sandwich_time",
"201 unlock", "202 stir", "203 lock", "204 close", "205 reach", "206 open", "207 sip", "208 clean", "209 bite", "210 cut", "211 spread", "212 release",
"213 move", "301 bottle", "302 salami", "303 bread", "304 sugar", "305 dishwasher", "306 switch", "307 milk", "308 drawer3", "309 spoon", "310 knife_cheese",
"311 drawer2", "312 table", "313 glass", "314 cheese", "315 chair", "316 door1", "317 door2", "318 plate", "319 drawer1", "320 fridge", "321 cup",
"322 knife_salami", "323 lazychair", "401 unlock", "402 stir", "403 lock", "404 close", "405 reach", "406 open", "407 sip", "408 clean", "409 bite", "410 cut",
"411 spread", "412 release", "413 move", "501 Bottle", "502 salami", "503 bread", "504 sugar", "505 dishwasher", "506 switch", "507 milk", "508 drawer3",
"509 spoon", "510 knife_cheese", "511 drawer2", "512 table", "513 glass", "514 cheese", "515 chair", "516 door1", "517 door2", "518 plate", "519 drawer1",
"520 fridge", "521 cup", "522 knife_salami", "523 lazychair", "406516 open_door1", "406517 open_door2", "404516 close_door1", "404517 close_door2",
"406520 open_fridge", "404520 close_fridge", "406505 open_dishwasher", "404505 close_dishwasher", "406519 open_drawer1", "404519 close_drawer1",
"406511 open_drawer2", "404511 close_drawer2", "406508 open_drawer3", "404508 close_drawer3", "408512 clean_table", "407521 drink_fromCup", "405506 toggle_switch")
val labels = events.map{ x => val s = x.split(" "); (s(0), s(1)) } toMap
val columns = Map("locomotion" -> 244, "HL_activity" -> 245, "left_arm" -> 246,
"left_arm_obj" -> 247, "right_arm" -> 248, "right_arm_obj" -> 249, "both_arms" -> 250)
val lines = Source.fromFile(dataFilePath).getLines.map(x => x.split(" "))
var time = 0
lines foreach { line =>
val a = line(columns("locomotion") - 1)
val b = line(columns("left_arm") - 1)
val c = line(columns("left_arm_obj") - 1)
val d = line(columns("right_arm") - 1)
val e = line(columns("right_arm_obj") - 1)
val f = line(columns("both_arms") - 1)
val h = line(columns("HL_activity") - 1)
val locomotion = if (labels(a) != "none") s"happensAt(${labels(a)}($person),$time)" else ""
val leftArm = if (labels(b) != "none" && labels(c) != "none") s"happensAt(${labels(b)}($person,${labels(c)}),$time)" else ""
val rightArm = if (labels(d) != "none" && labels(e) != "none") s"happensAt(${labels(d)}($person,${labels(e)}),$time)" else ""
val bothArms = if (labels(f) != "none") s"happensAt(${labels(f).split("_")(0)}($person,${labels(f).split("_")(1)}),$time)" else ""
val highLevelActivity = if (labels(h) != "none") s"holdsAt(${labels(h)}($person),$time)" else ""
val all = List(locomotion, leftArm, rightArm, bothArms, highLevelActivity).distinct.filter(x => x != "")
val printout = s"$time ${all.mkString(" ")}\n"
utils.Utils.writeLine(printout, "/home/nkatz/Desktop/kernel", "append")
println(all)
time += 1
}
}
| 4,185 | 54.813333 | 167 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/tracknow/Helper.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.tracknow
import java.io.File
import scala.io.Source
import scala.util.Try
object Helper extends App {
def rename(oldName: String, newName: String) = {
Try(new File(oldName).renameTo(new File(newName))).getOrElse(false)
}
/*
val RTECDataPath = "/media/nkatz/storage/Zelitron-data/RTEC_data"
val dir = new File(RTECDataPath)
var types = Set[String]()
val t0 = System.nanoTime()
dir.listFiles.sortBy( x => x.getName.split("-")(0).toInt ) foreach { f =>
val source = Source.fromFile(f.getAbsolutePath)
try {
val lines = source.getLines
lines foreach { x =>
val ttype = x.split("\\|")(4)
if (!types.contains(ttype)) types = types + ttype
//println(types)
}
} finally { source.close() }
}
println(types)
val t1 = System.nanoTime()
println(s"Total time: ${(t1-t0)/1000000000.0}")
*/
/*
We need:
- Total number of LLEs
- Mean number of LLEs
- Total number of HLEs
- Mean number of HLEs
- Total time
- Mean time per batch
*/
def f(s: String) = {
val z = s.split(": ")(1).split("/")
(z(0), z(1))
}
val resultsFile = "/home/nkatz/Desktop/ZEL-RTEC-results"
val source = Source.fromFile(new File(resultsFile))
try {
val lines = source.getLines
val results = lines.foldLeft(0.0, List[Double](), 0.0, List[Double](), 0.0, List[Double]()) { (x, y) =>
val (llesTotal, llesAvg, hlesTotal, hlesAvg, timeTotal, timeAvg) = (x._1, x._2, x._3, x._4, x._5, x._6)
if (y.contains("Total/Average number of input LLEs:")) {
val (total, avg) = f(y)
(llesTotal + total.toDouble, llesAvg :+ avg.toDouble, hlesTotal, hlesAvg, timeTotal, timeAvg)
} else if (y.contains("Total/Average number of HLE instances:")) {
val (total, avg) = f(y)
(llesTotal, llesAvg, hlesTotal + total.toDouble, hlesAvg :+ avg.toDouble, timeTotal, timeAvg)
} else if (y.contains("Total/Average time:")) {
val (total, avg) = f(y)
(llesTotal, llesAvg, hlesTotal, hlesAvg, timeTotal + total.toDouble, timeAvg :+ avg.toDouble)
} else {
(llesTotal, llesAvg, hlesTotal, hlesAvg, timeTotal, timeAvg)
}
}
println(s"Total LLEs number: ${results._1}")
println(s"Average LLEs number per batch: ${results._2.sum / results._2.length}")
println(s"Total HLEs number: ${results._3}")
println(s"Average HLEs number per batch: ${results._4.sum / results._4.length}")
println(s"Total Time: ${results._5}")
println(s"Average time per batch: ${results._6.sum / results._6.length}")
} finally {
source.close()
}
}
| 3,332 | 31.359223 | 109 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/tracknow/RunER.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.tracknow
import java.io.File
import scala.sys.process._
object RunER {
/* Command to find all files of size larger than 400MBs:
* find . -size +400M */
def main(args: Array[String]) = {
val winSize = args(0) // winsize in millis
val statsPath = args(1) //
val pattenrsPath = args(2)
val RTECDataPath = "/media/nkatz/storage/Zelitron-data/RTEC_data"
val dir = new File(RTECDataPath)
val t0 = System.nanoTime()
var i = 0
def fileSizeInMBs(f: File) = {
val fileSizeInBytes = f.length()
val fileSizeInKBs = fileSizeInBytes / 1024
val fileSizeInMBs = fileSizeInKBs / 1024
//val fileSizeInGBs = fileSizeInMBs / 1024
fileSizeInMBs
}
dir.listFiles.sortBy(x => x.getName.split("-")(0).toInt) foreach { f =>
println(fileSizeInMBs(f))
if (fileSizeInMBs(f) <= 100) {
val fname = f.getName
val fnameNoExt = fname.split("\\.")(0)
val split = fnameNoExt.split("-")
val (first, last) = (split(1), split(2))
//val query = s"performFullER(['${f.getAbsolutePath}']," +
// s"'/home/nkatz/Downloads/RTEC-master/examples/track-know/results/$fnameNoExt-stats.txt'," +
// s"'/home/nkatz/Downloads/RTEC-master/examples/track-know/results/$fnameNoExt-patterns.txt',$first,3600000,3600000,$last),halt."
val query = s"performFullER(['${f.getAbsolutePath}']," +
s"'$statsPath/$fnameNoExt-stats.txt'," +
s"'$pattenrsPath/$fnameNoExt-patterns.txt',$first,$winSize,$winSize,$last),halt."
println(s"PROCESSING BATCH: $i | file: ${f.getName} | size: ${fileSizeInMBs(f)}")
val cmd = Seq("yap", "-q", "-f", "/home/nkatz/Downloads/RTEC-master/examples/track-know/loader.prolog", "-g", query)
//println(cmd.mkString(" "))
//val result = cmd.mkString(" ").lineStream_!
cmd.mkString(" ").!
i += 1
}
}
val t1 = System.nanoTime()
println(s"Total time: ${(t1 - t0) / 1000000000.0}")
}
}
| 2,746 | 32.096386 | 139 | scala |
OLED | OLED-master/src/main/scala/experiments/datautils/tracknow/TK.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.tracknow
import java.io.{File, PrintWriter}
import scala.io.Source
import util.Try
object TK extends App {
val dataPath = "/media/nkatz/storage/Zelitron-data/data"
val rtecDataPath = "/media/nkatz/storage/Zelitron-data/RTEC_data"
/*
object MyFile {
def apply(f: File): MyFile = {
val split = f.getName.split("_")(2).split("-")
val (year, month, day) = (split(0).toInt, split(1).toInt, split(2).toInt)
MyFile(year, month, day, f)
}
}
case class MyFile(year: Int, month: Int, day: Int, file: File)
*/
def rename(oldName: String, newName: String) = {
Try(new File(oldName).renameTo(new File(newName))).getOrElse(false)
}
def getListOfFiles(dir: String) = {
val d = new File(dir)
var i = 0
val k = d.listFiles
d.listFiles foreach { x =>
val files = x.listFiles.sortBy{ f =>
val split = f.getName.split("_")(2).split("-")
val (year, month, day) = (split(0).toInt, split(1).toInt, split(2).toInt)
(year, month, day)
}
val newFileName = s"$rtecDataPath/$i.csv"
val pw = new PrintWriter(new File(newFileName))
var firstTime = "non-set-yet"
var endTime = "non-set-yet"
files foreach { f =>
val source = Source.fromFile(f.getAbsolutePath)
try {
val lines = source.getLines
lines foreach { line =>
val split = line.split(";")
val company = split(0)
val vehicle = split(1)
val format = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS")
val ttime = split(2)
val time = format.parse(ttime).getTime()
if (firstTime == "non-set-yet") firstTime = time.toString
endTime = time.toString
val engineStatus = split(3)
val speed = split(10)
val ttype = split(23).replaceAll("\\s", "").replaceAll(",", "").toLowerCase
/*
if (engineStatus != "0" && engineStatus != "3") {
val newLine = s"speed|$time|$time|$vehicle|$ttype|$speed"
pw.write(newLine+"\n")
}
*/
val newLine = s"speed|$time|$time|$vehicle|$ttype|$speed"
pw.write(newLine + "\n")
}
} finally { source.close() }
}
pw.close()
rename(newFileName, s"$rtecDataPath/$i-$firstTime-$endTime.csv")
i += 1
println(i + " complete.")
}
}
println(getListOfFiles(dataPath))
}
| 3,202 | 30.712871 | 87 | scala |
OLED | OLED-master/src/main/scala/iled/ILED.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package iled
import java.io.File
import akka.actor.{Actor, ActorSystem, Props}
import com.mongodb.casbah.commons.MongoDBObject
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.{Example, ExampleBatch, ExamplePair}
import logic.Rules.InconstistentRule
import logic._
import utils.{ASP, Database, MongoUtils}
import utils.Utils.{lined, time}
import xhail.Xhail
import akka.pattern.ask
import akka.util.Timeout
import app.runutils.Globals
import utils.parsers.{ASPResultsParser, ClausalLogicParser}
import scala.concurrent.Await
import scala.concurrent.duration._
/**
* Created by nkatz on 6/20/17.
*/
object ILED extends ClausalLogicParser with LazyLogging with MongoUtils {
def main(args: Array[String]) {
val db = new Database("caviar_meeting_clean", "examples")
val entryPath = "/home/nkatz/dev/iled/datasets/Caviar/meeting"
val globals = new Globals(entryPath)
//db.inspectDB(seeWhat="example")
//collectKernels(db, batchSize=100, trainingSetSize=100000000,startTime = 0,jep=jep)
val withBacktr = true
val withWeaks = false
//val weaksSelectionStrategy = Left("all-weaks")
val weaksSelectionStrategy = Right(50)
Globals.glvalues("withWeaks") = withWeaks.toString
Globals.glvalues("withBacktr") = withBacktr.toString
run(db, batchSize = 100, trainingSetSize = 100000000, //100000 //15000
withBacktr = withBacktr, withWeaks = withWeaks,
weaksSelectionStrategy = weaksSelectionStrategy, startTime = 0, globals = globals, bkFile = globals.BK_WHOLE_EC)
}
def run(DB: Database, batchSize: Int = 1, trainingSetSize: Int = 0,
withWeaks: Boolean = false, withBacktr: Boolean = true,
weaksSelectionStrategy: Either[String, Int] = Left("all-weaks"),
startTime: Int = 0, globals: Globals, bkFile: String) = {
logger.info(s"${lined("Running configuration:")} \n" + (for ((k, v) <- Globals.glvalues) yield s"$k: $v").mkString("\n"))
var finalTheory = new PriorTheory()
var seenExs = List[Example]()
var totalTime = 0.0
var endTimeVar = 0
var timesPerBatch = List[Double]()
batchSize match {
case 1 =>
DB.collection.find().sort(MongoDBObject("time" -> 1)).foldLeft(List[Example](), new PriorTheory(), 0.0) {
(x, y) => iledTop(x._1, Example(y), x._2, globals = globals, bkFile = bkFile)
}
case _ =>
DB.nonEmpty match {
case true =>
val mainBlock = time {
val upto = if (trainingSetSize > DB.size) DB.size / batchSize else trainingSetSize / batchSize
val repeat = List.range(0, upto)
// startTime,seenExamples,finalHyp,
val run = repeat.foldLeft(startTime, List[Example](), new PriorTheory()) {
(x, _) =>
val start = x._1
val pastExs = x._2
val theory = x._3
val _batch = DB.getBatch(start, batchSize, withWeaks)
val (batch, endTime) = (_batch._1, _batch._2)
val (_seenExs, _theory, _time) = withWeaks match {
case true =>
iledTop(pastExs, Example.merge(batch.examples),
theory, weakBatch = batch, withBacktr = withBacktr,
weaksSelectionStrategy = weaksSelectionStrategy, globals = globals, bkFile = bkFile)
case _ => iledTop(pastExs, batch.asSingleExmpl, theory, withBacktr = withBacktr, globals = globals, bkFile = bkFile)
}
endTimeVar = endTime
(endTime, _seenExs, _theory)
}
seenExs = run._2
finalTheory = run._3
if (!withBacktr) finalTheory = goBack(finalTheory, seenExs, globals = globals) // we go back at the end, after one pass over all the examples
//if (withWeaks) pruneBadWeaks(seenExs,finalTheory.merge)
}
totalTime = mainBlock._2
case _ => logger.info(s"Empty db ${DB.name}"); System.exit(-1)
}
}
println(endTimeVar)
if (!withWeaks) {
wrapUp(DB, finalTheory, timesPerBatch, totalTime, globals = globals)
} else {
pruneBadWeaks(seenExs, finalTheory.merge, DB, totalTime, globals = globals)
}
}
/*
Use this for two-pass ILED as we discussed in our last meeting
def twoPassKernelFirst(DB: Database,batchSize: Int = 1, trainingSetSize: Int = 0, startTime: Int = 0) = {
}
*/
def collectKernels(DB: Database, batchSize: Int = 1,
trainingSetSize: Int = 0, startTime: Int = 0,
globals: Globals, bkFile: String) = {
val upto = if (trainingSetSize > DB.size) DB.size / batchSize else trainingSetSize / batchSize
val repeat = List.range(0, upto)
val run = repeat.foldLeft(startTime, new Theory()) {
(x, _) =>
val start = x._1
val accumKernel = x._2
val _batch = DB.getBatch(start, batchSize, usingWeakExmpls = false)
val (batch, endTime) = (_batch._1, _batch._2)
val toExmp = batch.asSingleExmpl
//println(toExmp.time)
val (kernel, _) = LogicUtils.generateKernel(toExmp.toMapASP, bkFile = bkFile, globals = globals)
println(Theory(kernel).tostring)
(endTime, accumKernel.extend(Theory(kernel)))
}
val wholeKernel = run._2
val compressed = Theory(LogicUtils.compressTheory(wholeKernel.clauses))
println(compressed.tostring)
wrapUp(DB, new PriorTheory(retainedRules = compressed), batchTimes = List[Double](), totalTime = 0.0, globals = globals)
}
def iledTop(
seenExs: List[Example],
e: Example,
pt: PriorTheory,
weakBatch: ExampleBatch = ExampleBatch(),
withBacktr: Boolean = false,
weaksSelectionStrategy: Either[String, Int] = Left("all-weaks"),
globals: Globals, bkFile: String): (List[Example], PriorTheory, Double) = {
var totalTime = 0.0
var forwardTime = 0.0
var weaksTime = 0.0
var totalBacktrTime = 0.0
val allBacktrTimes = List[Double]()
var supportUpdateTime = 0.0
if (e.time == "1048160") {
val stop = "stop"
}
val withWeaks = !weakBatch.isEmpty // learning from weak examples
var priorTheory = if (!withWeaks) pt else pt.clearSupports(e, globals = globals)
if (!isSAT(priorTheory.merge.strongRules, e, ASP.check_SAT_Program, globals = globals)) {
logger.info(s"Procesing new example: ${e.time}")
val wholeBlock = time {
val learnNewBlock = time {
priorTheory = learnFromNew(e, priorTheory, withWeaks = withWeaks, bkFile = bkFile, globals = globals)
}
forwardTime = learnNewBlock._2
if (withWeaks) {
val weaksBlock = time {
priorTheory = learnFromWeaks(priorTheory, weakBatch, weaksSelectionStrategy, bkFile = bkFile, globals = globals)
}
weaksTime = weaksBlock._2
}
if (withBacktr) {
if (priorTheory.newRules.clauses.nonEmpty && seenExs.nonEmpty) {
val bcktrBlock = time {
priorTheory = goBack(priorTheory, seenExs, withWeaks = withWeaks, globals = globals)
}
totalBacktrTime = bcktrBlock._2
}
}
}
totalTime = wholeBlock._2
} else {
//don't time this it reduces the average revision time
val sUpdate = time {
val kernel =
// It takes too long to do this. Of course it's not correct. It's only for playing around with Arindam's data
if (!Globals.glvalues("iter-deepening").toBoolean) thisGenerateKernel(e.toMapASP, bkFile = bkFile, globals = globals)
else List[Clause]()
if (kernel.nonEmpty) {
logger.info(s"Example ${e.time}: correct/updating support")
updateSupport(priorTheory.merge, Theory(kernel), withWeaks && e.isWeak)
} else {
logger.info(s"Example ${e.time}: correct")
}
}
supportUpdateTime = sUpdate._2
/*
val updateSupports =
priorTheory.merge.clauses filter {
p => p.supportNeedsUpdate(e)
}
if (updateSupports.nonEmpty) {
val kernel = IledStreaming.generateKernel(e.toMapASP)
if (kernel.nonEmpty) {
logger.info(s"Example: ${e.time} correct/updating support")
updateSupport(Theory(updateSupports), Theory(kernel))
}
} else {
logger.info(s"Example: ${e.time} correct")
}
*/
}
val newSeen = seenExs :+ e
(newSeen, priorTheory, totalTime)
}
def thisGenerateKernel(examples: Map[String, List[String]], fromWeakExmpl: Boolean = false, bkFile: String, globals: Globals) = {
val infile = utils.Utils.getTempFile("example", ".lp", deleteOnExit = true)
val f = (x: String) => if (x.endsWith(".")) x.split("\\.")(0) else x
//val g = (x: String) => if(x.contains("example(")) x else s"example($x)"
val interpretation = examples("annotation").map(x => s"${f(x)}.") ++ examples("narrative").map(x => s"${f(x)}.")
utils.Utils.writeToFile(infile, "overwrite") { p => interpretation.foreach(p.println) }
var (_, varKernel) = Xhail.runXhail(fromFile = infile.getAbsolutePath, kernelSetOnly = true, fromWeakExmpl = fromWeakExmpl, bkFile = bkFile, globals = globals)
if (fromWeakExmpl) {
varKernel = varKernel.map (x => Clause.updateField(x, fromWeakExample = true))
}
varKernel
}
def learnFromNew(e: Example, priorTheory: PriorTheory, withWeaks: Boolean, bkFile: String, globals: Globals): PriorTheory = {
//val kernel = IledStreaming.generateKernel(e.toMapASP, jep=jep)
val kernel =
if (Globals.glvalues("specializeOnly").toBoolean && !priorTheory.isEmpty) List[Clause]()
else thisGenerateKernel(e.toMapASP, bkFile = bkFile, globals = globals)
var theory = new PriorTheory(retainedRules = priorTheory.merge.compress)
if (!withWeaks) {
theory = revise(kernelSet = Theory(kernel), priorTheory = theory, example = e, globals = globals)
} else {
theory = reviseWithWeaks(Theory(kernel), theory, e, globals = globals)
}
show(theory.retainedRules, theory.newRules, theory.refinedRules, e, "forward")
//Utils.checkIfCompressed(theory)
theory
}
def learnFromWeaks(pt: PriorTheory, weakBatch: ExampleBatch,
weaksSelectionStrategy: Either[String, Int], globals: Globals, bkFile: String): PriorTheory = {
def showWeaks(WeakRules: Theory) = {
if (WeakRules != Theory()) logger.info(s"\nNew weak rule: \n ${WeakRules.tostring}")
}
def weaksSelection = (how: Either[String, Int], weakBatch: ExampleBatch) => {
how match {
case Left(x) => x match {
case "all-weaks" => weakBatch.weakExmpls
case "hausdorff" => List[Example]() // Not implemented yet
}
case Right(x) =>
val howMany = (x.toFloat / 100.0 * weakBatch.weakExmpls.length).toInt
utils.Utils.sampleN(howMany, weakBatch.weakExmpls)
}
}
var theory = pt.clearSupports(Example.merge(weakBatch.examples), globals = globals)
val weaksToLearnFrom = weaksSelection(weaksSelectionStrategy, weakBatch)
for (weak <- weaksToLearnFrom) {
val newWeakBatch = Example.merge(weakBatch.weakExmpls.drop(weakBatch.weakExmpls.indexOf(weak)), markedAsWeak = true)
logger.info(s"learning from weak example ${newWeakBatch.time}")
val kernel = thisGenerateKernel(newWeakBatch.toMapASP, fromWeakExmpl = true, globals = globals, bkFile = bkFile)
// IT IS NOT CORRECT TO TRY TO LEARN A NEW RULE FROM EACH NEW POINT. Instead, we simply pack weak support rules in existing rules
val extraRules =
if (!isSAT(theory.merge, newWeakBatch, ASP.check_SAT_Program, globals = globals)) {
val weakRules = revise(kernelSet = Theory(kernel), priorTheory = new PriorTheory(),
example = newWeakBatch, fromWeakExmpl = true, globals = globals).newRules
/** Check if they are marked as weak (during debugging) */
val extras = weakRules.clauses.filter(x => !theory.merge.containsRule(x) && x.head.functor != "terminatedAt")
//val extras = weakRules.clauses.filter(x => !theory.merge.containsRule(x) )
showWeaks(Theory(extras))
extras
//val extraRules = List[Clause]() // this shouldn't exist at all. It's left-over from learning weak rules from a batch
} else {
logger.info("No new weak rule, updating supports only")
List[Clause]()
}
if (kernel.nonEmpty) {
// Packing weak support rules is messy and leads to large and over-general theories
// via the addition of extra rules during specialization
//theory.newRules.updateSupports(Theory(kernel), fromWeakExample = true)
//theory.retainedRules.updateSupports(Theory(kernel), fromWeakExample = true)
//theory.refinedRules.updateSupports(Theory(kernel), fromWeakExample = true)
theory =
new PriorTheory(
retainedRules = theory.retainedRules,
newRules = theory.newRules.extendUnique(Theory(extraRules)),
refinedRules = theory.refinedRules)
}
}
theory
}
def goBack(priorTheory: PriorTheory, seenExamples: List[Example], withWeaks: Boolean = false, globals: Globals): PriorTheory = {
logger.info("Re-checking the historical memory")
var theory = new PriorTheory(retainedRules = priorTheory.merge.compress)
for (past <- seenExamples) {
logger.info(s"Re-seeing Example: ${past.time}")
if (!isSAT(theory.merge, past, ASP.check_SAT_Program, globals = globals)) {
if (past.time == "26960") {
val stop = "stop"
}
theory = if (!withWeaks) theory else theory.clearSupports(past, globals = globals)
if (!withWeaks) {
theory = revise(priorTheory = theory, example = past, globals = globals)
} else {
theory = reviseWithWeaks(priorTheory = theory, e = past, globals = globals)
}
show(theory.retainedRules, theory.newRules, theory.refinedRules, past, "backwards")
}
}
//Utils.checkIfCompressed(theory)
theory
}
def reviseWithWeaks(kernel: Theory = Theory(), priorTheory: PriorTheory, e: Example, globals: Globals) = {
var theory = priorTheory
val (strongs, weaks) = theory.merge.strongWeakSplit
// The correct approach is to first account for all the strong rules,
// so that we have a "base" for the current example, and then try to
// refine the weak ones. If strong and weak rules are treated together,
// then it is likely to miss a "strong" revision, resulting later on to a dead-end.
val strongRevision = revise(kernelSet = kernel, priorTheory = new PriorTheory(retainedRules = Theory(strongs)),
example = e, withSupport = "strongOnly", globals = globals)
val filtered = strongs.filter(p => p.supportSet.clauses.exists(q => q.fromWeakExample))
val weakRevision = filtered match {
case Nil => new PriorTheory()
case _ =>
val toRevise = new PriorTheory(retainedRules = Theory(filtered map (p => Clause(p, p.supportSet.weakRules))))
val keepIntact = new PriorTheory(retainedRules = strongRevision.retainedRules, refinedRules = strongRevision.refinedRules, newRules = strongRevision.newRules)
revise(priorTheory = toRevise, keepIntact = keepIntact, example = e, withSupport = "fullSupport", globals = globals)
}
//val weakRevision = new PriorTheory()
theory = new PriorTheory(retainedRules = strongRevision.retainedRules.extendUnique(weakRevision.retainedRules),
refinedRules = strongRevision.refinedRules.extendUnique(weakRevision.refinedRules),
newRules = strongRevision.newRules.extendUnique(weakRevision.newRules))
if (weaks != Nil) {
//val reviseWeaks = revise(keepIntact=theory,priorTheory = new PriorTheory(theory.retainedRules.extendUnique(Theory(weaks)), theory.newRules, theory.refinedRules), example = e)
val reviseWeaks = revise(keepIntact = theory, priorTheory = new PriorTheory(retainedRules = Theory(weaks)), example = e, globals = globals)
val newWeaks = reviseWeaks.newRules
val refinedWeaks = reviseWeaks.refinedRules
val retainedWeaks = reviseWeaks.retainedRules
theory =
new PriorTheory(
retainedRules = theory.retainedRules.extendUnique(retainedWeaks),
newRules = theory.newRules.extendUnique(newWeaks),
refinedRules = theory.refinedRules.extendUnique(refinedWeaks))
}
theory
}
def revise(
kernelSet: Theory = Theory(),
priorTheory: PriorTheory,
keepIntact: PriorTheory = new PriorTheory(),
example: Example,
withSupport: String = "fullSupport",
fromWeakExmpl: Boolean = false,
noiseTolerant: Boolean = false,
globals: Globals): PriorTheory = {
val aspFile: File = utils.Utils.getTempFile("aspInduction", ".lp", "", deleteOnExit = true)
val (_, use2AtomsMap, defeasiblePrior, use3AtomsMap, _, _) =
ASP.inductionASPProgram(
kernelSet = kernelSet,
priorTheory = priorTheory.merge,
examples = example.toMapASP,
aspInputFile = aspFile,
withSupport = withSupport,
retained = keepIntact.merge,
globals = globals)
val answerSet = ASP.solve("iled", use2AtomsMap ++ use3AtomsMap, aspFile, example.toMapASP, fromWeakExmpl = fromWeakExmpl)
if (answerSet != Nil) {
val newRules = Rules.getNewRules(
answerSet.head.atoms, use2AtomsMap,
fromWeakExample = example.isWeak)
updateSupport(newRules, kernelSet, fromWeakExample = example.isWeak)
/*
val icRules =
try {
} catch {
case e: java.util.NoSuchElementException =>
}
*/
val icRules = Rules.getInconsistentRules(answerSet.head.atoms, priorTheory.merge, use3AtomsMap)
val retainedRules = Theory(priorTheory.merge.clauses.filter(x => icRules.forall(y => y.rule != x)))
updateSupport(retainedRules, kernelSet, fromWeakExample = example.isWeak)
// Check whether to keep the initial refinement from
// each inconsistent rule, or search further.
/*
val refinedRules = icRules map (
p => getRefinedProgram(p, retainedRules, newRules, example)
) map (x =>
if (x.finalRefinement == Theory())
Theory(x.initialRefinement)
else x.finalRefinement)
*/
val refinedRules =
if (!noiseTolerant) Rules.getRefined(icRules, retainedRules.extendUnique(keepIntact.merge), newRules, example,
withSupport = withSupport, globals = globals)
else thisRefine(icRules)
new PriorTheory(retainedRules, newRules, Theory.mergeTheories(refinedRules))
} else {
priorTheory
}
}
/* This is something forgotten from ILEDNoiseTollerant. It's here so that its reference above compiles.
* If we go into a noise-tollerant version of ILED we could look at this. */
def thisRefine(incRules: List[InconstistentRule]): List[Theory] = {
// For each inconsistent rule R:
// First, check each one of the stored refinements, to see if it
// rejects the negative examples that the rule currently
// covers. If one does, we are done (we simply update the
// negative counts for R to use in the future and also the counts
// for each existing refinement and each support clause).
//-----------------------------------------------------
// This is done by the function checkExistingRefs().
// Also, maybe its better for the existing refinements
// to be stored as fields of the support set rule they
// were generated from.
//-----------------------------------------------------
// If None of the existing refinements rejects the negative
// examples, we try to generate new refinements. How will
// we do that? Further refine each one of the existing refs?
// I guess so.
// This is done by the function generateNewRefs().
//----------------------------------------------------------
// Perhaps its best to proceed as follows:
// When a new example arrives we first score everything
// (theory rules, their support rules and their refs). Via
// scoring, we identify the inconsistent theory rules.
// Next we check if an existing refinement rejects the negatives
// (for the inconsistent rules)
for (incRule <- incRules) {
// val (_, use2AtomsMap, defeasiblePrior, use3AtomsMap, _, _) =
// ASP.inductionASPProgram(kernelSet=kernelSet,priorTheory=priorTheory.merge,examples=example.toMapASP,aspInputFile=aspFile,withSupport=withSupport,retained=keepIntact.merge)
}
List[Theory]()
}
def updateSupport(theory: Theory, kernelSet: Theory, fromWeakExample: Boolean = false) = {
// This is used to avoid adding redundant rules in the
// support set. A rule is redundant if it subsumes
// by a rule already present in the support set
val isRedundant = (ss: Clause, c: Clause) =>
c.supportSet.clauses exists (x => ss.thetaSubsumes(x))
for (
c <- theory.clauses;
ks <- kernelSet.clauses if !isRedundant(ks, c) && c.thetaSubsumes(ks)
) {
val markedKS = Clause.updateField(ks, fromWeakExample = fromWeakExample)
c.supportSet = Theory(c.supportSet.clauses :+ markedKS)
}
// This is used in order to avoid maintaining redundant
// rules in the support set. In this context, a support set
// rule is redundant if it subsumes some other rule
// in the support set. This can happen in cases where e.g.
// p :- q,r was added to the support set early on and
// later on p :- q,r,s was added. In this case the first
// rule is redundant and should be removed. This redundancy
// checking should be done whenever the support set
// changes with the addition of a rule.
theory.clauses foreach (x => x.compressSupport)
}
def wrapUp(DB: Database, theory: PriorTheory, batchTimes: List[Double] = List(),
totalTime: Double, testingSet: List[(Int, Int)] = Nil, globals: Globals) = {
def crossValidation(theory: Theory) = {
var done = false
var (tps, fps, fns) = (0.0, 0.0, 0.0)
//var start = DB.startTime
var start = 597240
while (!done) {
/** @todo factor out the bacth calls it is repeated throughout the code **/
//println("crossval")
val getbatch = DB.getBatch1(start, 5000) // get batches at 5000 to go faster
val (batch, endTime) = (getbatch._1, getbatch._2)
if (endTime == start) done = true
val res = new Crossvalidation(batch.toMapASP, theory.clauses.map(x => x.withTypePreds(globals = globals)), globals = globals)
tps = tps + res.tps
fps = fps + res.fps
fns = fns + res.fns
start = endTime
}
val precision = tps.toFloat / (tps + fps)
val recall = tps.toFloat / (tps + fns)
val f_score = 2 * (precision * recall) / (precision + recall)
(tps, fps, fns, precision, recall, f_score)
}
def crossValidation2(theory: Theory, testingSet: List[(Int, Int)], globals: Globals): (Double, Double, Double, Double, Double, Double) = {
def cv(currentInterval: (Int, Int)) = {
val batches = DB.getBatches(currentInterval._1, currentInterval._2, step = 40,
howMany = List.range(currentInterval._1, currentInterval._2 + 40, 40).length, usingWeakExmpls = false)
val examples = batches map (x => x.asSingleExmpl)
examples.foldLeft((0.0, 0.0, 0.0)) {
(x, y) =>
val (tps, fps, fns) = (x._1, x._2, x._3)
val res = new Crossvalidation(y.toMapASP, theory.clauses.map(x => x.withTypePreds(globals = globals)), globals = globals)
(tps + res.tps, fps + res.fps, fns + res.fns)
}
}
val stats = testingSet.foldLeft((0.0, 0.0, 0.0)) {
(x, y) =>
val (_tps, _fps, _fns) = (x._1, x._2, x._3)
val m = cv(y)
val (tps, fps, fns) = (m._1, m._2, m._3)
(_tps + tps, _fps + fps, _fns + fns)
}
val (tps, fps, fns) = (stats._1, stats._2, stats._3)
val precision = tps.toFloat / (tps + fps)
val recall = tps.toFloat / (tps + fns)
val f_score = 2 * (precision * recall) / (precision + recall)
//val out = (tps, fps, fns, precision, recall, f_score)
(tps, fps, fns, precision, recall, f_score)
}
//List.range()
val hypothesis = Theory(Xhail.compressTheory(theory.merge.clauses))
val (tps, fps, fns, precision, recall, f_score) =
if (testingSet.isEmpty) {
crossValidation(hypothesis)
} else {
//val c = new crossValidation2(hypothesis, testingSet)
//c.out
crossValidation2(hypothesis, testingSet, globals)
}
val hypothesisSize = hypothesis.clauses.foldLeft(0)((count, p) => count + p.toLiteralList.length)
val showHypothesis = hypothesis.tostring
logger.info(
/*
s"\nFinal Hypothesis:\n $showHypothesis" +
s"\n\nHypothesis size: $hypothesisSize\n" +
s"Total time (secs): $totalTime \n" +
s"${lined("Crossvalidation")}\n" +
s"tps: $tps\n" +
s"fps: $fps\n" +
s"fns: $fns\n" +
s"precision: $precision\n" +
s"recall: $recall\n" +
s"f-score: $f_score"
*/
s"\n$showHypothesis\ntps: $tps\nfps: $fps\nfns: $fns\nprecision: $precision\nrecall: $recall\nf-score: $f_score\ntraining time:" +
s"$totalTime\ntheory size: $hypothesisSize"
)
//Utils.checkIfCompressed(theory)
(hypothesisSize.toDouble, tps, fps, fns, precision, recall, f_score, totalTime)
}
// pruning threshold is an optional parameter that sets the pruning threshold to
// some percent of the total positives counts. This is used by the code in
// RTECCleanCaviarExperiments. It may be either a Right(x: Int), in which case the
// threshold is set to x tps, or a Left( (hle: String, percent: Int) ), in which case
// the pruning threshold is percent% of the total positives count for hle, found in
// the training set.
def pruneBadWeaks(seen: List[Example], theory: Theory, DB: Database,
totalTime: Double, pruningThreshold: Either[(String, Int), Int] = Right(2000),
testingSet: List[(Int, Int)] = Nil, globals: Globals) = {
class RuleEvaluator(seen: List[Example], c: Clause) extends Actor {
def receive = {
case "go" =>
seen.foldLeft(()) {
(_, e) =>
val res = new Crossvalidation(e.toMapASP, List(c.withTypePreds(globals = globals)), withInertia = false, globals = globals)
c.tps = c.tps + res.tps
c.fps = c.fps + res.fps
c.fns = c.fns + res.fns
println(c.tps)
}
logger.info("done evaluating single rule")
sender ! "done"
}
}
class CrossVal(pruned: Theory, globals: Globals) extends Actor {
def receive = {
case "go" =>
sender ! wrapUp(DB, new PriorTheory(retainedRules = pruned), batchTimes = List[Double](), totalTime, testingSet = testingSet, globals = globals)
}
}
class ResultsHandler(seen: List[Example], theory: Theory) extends Actor {
logger.info("\nPruning weak rules")
var results = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
var done = false
val pruneThreshold = pruningThreshold match {
case Right(x) => x // hard-coded threshold
case Left(x) =>
val hle = x._1
val percentage = x._2
// compute the pruning threshold as a percentage of the total positives count in the training set
val totalTps = seen.foldLeft(0) {
(x, y) =>
val positives = y.annotation.toSeq.count(p => p.contains(hle))
x + positives
}
percentage / 100.0 * totalTps.toDouble
}
logger.info(s"\nPruning threshold: >= ${pruneThreshold.toInt}")
var size = theory.clauses.size
//for (c <- theory.weakRules.clauses) {
for (c <- theory.clauses) {
//context.actorOf(Props(new RuleEvaluator(seen, c, jep = new Jep()))) ! "go" // one jep instance per rule
val grouped = seen.grouped(20).map(x => Example.merge(x)).toList
context.actorOf(Props(new RuleEvaluator(grouped, c))) ! "go" // one jep instance per rule
}
def receive = {
case "done" =>
size -= 1
if (size == 0) {
val pruned = Theory(theory.clauses.filter(p => !p.fromWeakExample | (p.fromWeakExample && p.tps > pruneThreshold)))
pruned.clauses foreach {
x => logger.info(s"\n ${if (x.fromWeakExample) "weak" else "strong"} rule: \n ${x.tostring} \n tps: ${x.tps} \n fps: ${x.fps} \n fns: ${x.fns}")
}
// Pruning is finished, perform cross-validation on the pruned theory
val crossValActor = context.actorOf(Props(new CrossVal(pruned, globals = globals)), name = "CrossValidationActor")
implicit val timeout = Timeout(600 seconds) // wait ten minutes, just to be sure (it won't ever happen but I don't konw how to handle early time-outs)
val future = crossValActor ? "go"
results = Await.result(future, timeout.duration).asInstanceOf[(Double, Double, Double, Double, Double, Double, Double, Double)]
done = true
}
case "isItReady?" =>
if (done) {
sender ! Right(results)
} else {
sender ! Left("wait!")
}
}
}
val system = ActorSystem("WeakRulesPruning")
val actor = system.actorOf(Props(new ResultsHandler(seen, theory)), name = "WeakRulesEvaluator")
implicit val timeout = Timeout(600 seconds)
var done = false
var out = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
while (!done) {
Thread.sleep(5000)
val future = actor ? "isItReady?"
val result = Await.result(future, timeout.duration).asInstanceOf[Either[String, (Double, Double, Double, Double, Double, Double, Double, Double)]]
result match {
case Left(x) => logger.info(s"...just asked if pruning is finished. ResultsHandler response: $x") // do nothing, ask again in a while to see of the results are ready
case Right(x) =>
done = true
out = x
//system.shutdown()
system.terminate()
}
}
out
}
class ResultsBean(val hypothesisSize: Double, val tps: Double, val fps: Double,
val fns: Double, val precision: Double, val recall: Double,
val f_score: Double, val totalTime: Double)
def show(retained: Theory, news: Theory, refined: Theory, e: Example, how: String): Unit = {
def showTheory(flag: String, t: Theory) = t match {
case Theory.empty => ""
case _ =>
val header = s"\n$flag rules:\n"
val line = "-" * header.length + "\n"
header + line + t.tostring + "\n"
}
def header = how match {
case "forward" => s" Example: ${e.time} "
case "backwards" => s" Example: ${e.time} (re-seeing) "
}
val theoryEmpty = retained.isEmpty && news.isEmpty && refined.isEmpty
val headerStr = s"\n==============$header==================\n"
val footer = "\n" + "=" * headerStr.length + "\n"
if (!theoryEmpty) {
logger.info(headerStr + showTheory("Retained", retained) + showTheory("Refined", refined) + showTheory("New", news) + footer)
}
}
def isSAT(theory: Theory, example: Example, F: (Theory, Example, Globals) => String, globals: Globals): Boolean = {
val f = F(theory, example, globals)
val out = ASP.solve(Globals.CHECKSAT, Map(), new java.io.File(f), example.toMapASP)
if (out != Nil && out.head == AnswerSet.UNSAT) false else true
}
def iterSearchFindNotCovered(theory: Theory, example: Example, F: (Theory, Example) => String, globals: Globals): List[String] = {
val f = ASP.iterSearchFindNotCoveredExmpls(theory, example, globals)
val out = ASP.solve(Globals.CHECKSAT, Map(), new java.io.File(f), example.toMapASP)
if (out != Nil && out.head == AnswerSet.UNSAT) out.head.atoms else List[String]()
}
}
class Crossvalidation(val examples: Map[String, List[String]], val theory: List[Clause],
val withInertia: Boolean = true, val globals: Globals) extends ASPResultsParser {
val FNS_PRED = "posNotCovered"
val FPS_PRED = "negsCovered"
val TPS_PRED = "posCovered"
val aspInputFile = utils.Utils.getTempFile(prefix = "crossVal", suffix = ".lp", deleteOnExit = true)
//val bk = if (withInertia) List(s"\n#include " + "\"" + Core.bkFile + "\".\n") else List(s"\n#include " + "\"" + Core.bkFileNoInertia + "\".\n")
val bk = if (withInertia) List(s"\n#include " + "\"" + globals.BK_CROSSVAL + "\".\n") else List(s"\n#include " + "\"" + globals.ILED_NO_INERTIA + "\".\n")
val command = Seq("python", Globals.ASPHandler, s"aspfile=${aspInputFile.getCanonicalPath}")
val varbedExmplPatterns = for (x <- globals.eps2) yield x.varbed.tostring
val coverageConstr =
varbedExmplPatterns flatMap (x => List(s"\nposNotCovered($x) :- example($x), not $x.", s"\nnegsCovered($x):- $x, not example($x).", s"\nposCovered($x):- $x, example($x).\n"))
utils.Utils.toASPprogram(
program = bk ++ examples("annotation") ++ examples("narrative") ++
theory.map(x => x.tostring) ++ coverageConstr,
show = List("posNotCovered/1", "negsCovered/1", "posCovered/1"), writeToFile = aspInputFile.getCanonicalPath
)
val res = ASP.solveASPNoJep("crossvalidation", aspFile = aspInputFile.getCanonicalPath)
val model = if (res.isEmpty) List[String]() else res.head.atoms
def get(what: String) = {
model match {
case Nil => 0
case _ =>
model count (_.contains(what))
}
}
val fns = get(FNS_PRED)
val fps = get(FPS_PRED)
val tps = get(TPS_PRED)
val precision = tps.toFloat / (tps + fps)
val recall = tps.toFloat / (tps + fns)
val fscore = 2 * (precision * recall) / (precision + recall)
val out = (tps, fps, fns, precision, recall, fscore)
}
| 35,264 | 43.191729 | 182 | scala |
OLED | OLED-master/src/main/scala/iled/TestActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package iled
import akka.actor._
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import scala.language.postfixOps
case object AskNameMessage
class TestActor extends Actor {
def receive = {
case AskNameMessage =>
//Thread.sleep(5000)
sender ! "Fred" // respond to the 'ask' request
case _ => println("that was unexpected")
}
}
object AskTest extends App {
// create the system and actor
val system = ActorSystem("AskTestSystem")
val myActor = system.actorOf(Props[TestActor], name = "myActor")
// (1) this is one way to "ask" another actor for information
implicit val timeout = Timeout(60 seconds)
val future = myActor ? AskNameMessage
val result = Await.result(future, timeout.duration).asInstanceOf[String]
println(result)
// (2) a slightly different way to ask another actor for information
///*
val future2: Future[String] = ask(myActor, AskNameMessage).mapTo[String]
val result2 = Await.result(future2, 1 second)
println(result2)
//*/
system.terminate()
}
| 1,812 | 30.258621 | 74 | scala |
OLED | OLED-master/src/main/scala/logic/AtomSignature.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
/**
* Created by nkatz at 6/10/19
*/
class AtomSignature(val predSymbol: String, arity: Int) {
def tostring = s"$predSymbol/$arity"
}
| 854 | 28.482759 | 72 | scala |
OLED | OLED-master/src/main/scala/logic/Clause.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import java.text.DecimalFormat
import java.util.UUID
import app.runutils.Globals
import logic.Examples.Example
import oled.distributed.Structures.ClauseStats
import logic.Exceptions._
import logic.Modes.ModeAtom
import utils.{ASP, Utils}
import oled.distributed.Structures
import scala.collection.mutable.ListBuffer
import scala.util.Random
import utils.ClauseImplicits._
import utils.parsers.{ClausalLogicParser, PB2LogicParser}
/**
* Companion object
*/
object Clause {
val empty = Clause()
def apply(lits: List[Literal]) = {
new Clause(head = lits.head.asPosLiteral, body = lits.drop(1))
}
def apply(c: Clause, ss: Theory) = {
new Clause(head = c.head, body = c.body, supportSet = ss, fromWeakExample = c.fromWeakExample)
}
def updateField(c: Clause, fromWeakExample: Boolean) = {
new Clause(head = c.head, body = c.body, supportSet = c.supportSet, fromWeakExample = fromWeakExample)
}
def apply(head: Literal, body: List[Literal]) = {
new Clause(head = head.asPosLiteral, body = body)
}
/* Parses a string into a Clause. */
/*
def parse(cl: String): Clause = {
val p = new ClausalLogicParser
p.getParseResult(p.parse(p.clause, cl)).asInstanceOf[Clause]
}
*/
/* Use this which is faster that combinators parsing. If any problems occur just fall back to the previous parser
* (uncomment the method above.) See also the related comment at the parse method of the Literal companion object.*/
def parse(cl: String) = parseWPB2(cl)
def parseWPB2(cl: String) = PB2LogicParser.parseClause(cl).asInstanceOf[Clause]
def toMLNFlat(c: Clause) = {}
}
case class Clause(
head: PosLiteral = PosLiteral(),
body: List[Literal] = Nil,
fromWeakExample: Boolean = false,
var supportSet: Theory = Theory(),
uuid: String = UUID.randomUUID.toString) extends Expression {
var parentClause: Clause = Clause.empty
var isBottomRule = false
var isTopRule = false
// This field is used by the distributed version of oled.
// It is a (k,v) map, where each k is the id (name) of one of the other nodes N and
// v is a Structures.Stats instance carrying the current counts from N.
var countsPerNode = scala.collection.mutable.Map[String, Structures.ClauseStats]()
var weight: Double = 0.0
var subGradient: Double = 0.0
var mistakes: Double = 0.0
// This is a "general-purpose" weight variable, the intention is to use this
// for all online convex optimization methods that we'll try (e.g. winnow, AdaGrad, Adam etc).
// Currently, it's only used for the multiplicative weights update framework.
var w_pos: Double = 1.0
var w_neg: Double = 1.0
// This is used in the (sleeping) expert setting, for randomized prediction.
var selectionProbability = 0.0
// Counts the updates to the w variable to calculate the running average
var weightUpdateCount = 0.0
var avgWeight = 0.0
def updateRunningWeightAvg(newWeight: Double) = {
val newAvg = ((avgWeight * weightUpdateCount) + newWeight) / (weightUpdateCount + 1)
avgWeight = newAvg
weightUpdateCount += 1
}
// Counts the updates to the w variable to calculate the running average
var negWeightUpdateCount = 0.0
var avgNegWeight = 0.0
def updateNegRunningWeightAvg(newWeight: Double) = {
val newAvg = ((avgNegWeight * negWeightUpdateCount) + newWeight) / (negWeightUpdateCount + 1)
avgNegWeight = newAvg
negWeightUpdateCount += 1
}
private val weights = new ListBuffer[Double]
def updateWeightsBuffer(weight: Double) = weights += weight
//(previousMeanDiff * previousMeanDiffCount) + newDiff)/(previousMeanDiffCount + 1)
// These variables store the total current counts from all nodes.
// These are also used in the distributed setting.
var totalTPs = 0
var totalFPs = 0
var totalFNs = 0
var totalSeenExmpls = 0
// This is used in the distributed setting.
// excludeNodeName is th e name of the node the clause is currently being
// evaluated. The stats from this node are excluded from the total counts sum,
// since these stats are taken from this.tps, this.fps etc
def updateTotalCounts(excludeNodeName: String) = {
// These are the tp, fp, fn and Nexmpl counts for clause, accumulated for all nodes
val (totalTps, totalFps, totalFns, totalNexmpls) = this.countsPerNode.filter(x => x._1 != excludeNodeName).foldLeft(0, 0, 0, 0){ (x, y) =>
(x._1 + y._2.tps, x._2 + y._2.fps, x._3 + y._2.fns, x._4 + y._2.Nexmls)
}
this.totalTPs = totalTps * tpw
this.totalFPs = totalFps * fpw
this.totalFNs = totalFns * fnw
this.totalSeenExmpls = totalNexmpls
}
// These are used in the distributed setting
def getTotalTPs = this.tps * tpw + this.totalTPs
def getTotalFPs = this.fps * fpw + this.totalFPs
def getTotalFNs = this.fns * fnw + this.totalFNs
def getTotalSeenExmpls = this.seenExmplsNum + this.totalSeenExmpls
def showCountsPerNode(excludeNodeName: String) = {
val transposed = this.countsPerNode.filter(x => x._1 != excludeNodeName).values.toVector.map(obj => Vector(obj.tps, obj.fps, obj.fns, obj.Nexmls)).transpose
/*
val tpsMsg = s"tps: ${transposed(0).sum} (${transposed(0).mkString("+")})"
val fpsMsg = s"fps: ${transposed(1).sum} (${transposed(1).mkString("+")})"
val fnsMsg = s"fps: ${transposed(2).sum} (${transposed(2).mkString("+")})"
val exmplsMsg = s"emxpls: ${transposed(3).sum} (${transposed(3).mkString("+")})"
*/
val tpsMsg = s"tps: $getTotalTPs [${tps * tpw} + (${transposed(0).mkString("+")})]"
val fpsMsg = s"fps: $getTotalFPs [${fps * fpw} + (${transposed(1).mkString("+")})]"
val fnsMsg = s"fns: $getTotalFNs [${fns * fnw} + (${transposed(2).mkString("+")})]"
val exmplsMsg = s"emxpls: $getTotalSeenExmpls [$seenExmplsNum + (${transposed(3).mkString("+")})]"
s"$tpsMsg $fpsMsg $fnsMsg $exmplsMsg"
}
/**
* True positives, false positive and false negatives
* covered by this clause (respectively). These are
* intended to be used as accumulative scores (in streaming mode).
* Also there are used in the noise-tolerant setting to identify
* "good" support clauses to use for refinement.
*/
var tps: Int = 0
var fps: Int = 0
var fns: Int = 0
var tns: Int = 0
private val (tpw, fpw, fnw) = (Globals.glvalues("tp-weight").toInt, Globals.glvalues("fp-weight").toInt, Globals.glvalues("fn-weight").toInt)
def mestimate(mparam: Double, totalPositives: Int, totalNegatives: Int): Double = {
// The Laplace estimate is:
// l = (positivesCovered + m * relFreq)/totalExamplesCovered + m
// where
// positivesCovered = the number of positives covered by the clause
// relFreq = the relative frequency of positives in the training set (pos/pos+negs)
// totalExamplesCovered = the total number of examples covered by the clause
// m = a parameter set according to the expected amount of noise (larger m for more noise)
// We'll use the laplace m-estimate where m = 2 and p(+|clause) = 0.5
//val positivesRelativeFrequency = totalPositives.toFloat / (totalPositives.toFloat + totalNegatives.toFloat)
//val totalExmplsCovered = tps+fps
//(tps.toFloat + (mparam * positivesRelativeFrequency)) / (totalExmplsCovered.toFloat + mparam)
(tps.toFloat * tpw + 1) / (tps.toFloat * tpw + fps.toFloat * fpw + 2.0)
}
def precision: Double = {
val pr = tps.toFloat * tpw / (tps * tpw + fps * fpw)
if (pr.isNaN) 0.0 else pr
}
def recall: Double = {
val rec = tps.toFloat * tpw / (tps * tpw + fns * fnw)
if (rec.isNaN) 0.0 else rec
}
def fscore: Double =
if (this.precision + this.recall == 0) 0.0
else (2 * this.precision * this.recall) / (this.precision + this.recall)
def compressionInit = (tps * tpw - fps * fpw - (this.body.length + 1)).toDouble
def compressionTerm = (tps * tpw - (fns * fns) - (this.body.length + 1)).toDouble
def tpsRelativeFrequency =
if (this.tps == 0 || this.parentClause.tps == 0) 0.0
else this.tps.toDouble * tpw / this.parentClause.tps * tpw
def foilGain(funct: String) = {
val thisCoverage = if (funct == "precision") this.precision else this.recall
val parentCoverage = if (funct == "precision") parentClause.precision else parentClause.recall
if (thisCoverage == 0.0 || thisCoverage == 1.0) {
// If thisCoverage == 0.0 then this rules covers nothing, it's useless, so set it's gain to 0.
// Note that otherwise we'll have a logarithm evaluated to -infinity (log(0)).
// If, on the other hand thisCoverage == 1.0 then this rule is perfect (but so is the parent --
// the parent cannot have smaller coverage), so again, no gain.
0.0
} else {
// here thisCoverage is in (0,1)
if (parentCoverage == 1.0 || parentCoverage == 0.0) {
// If parentCoverage == 1.0 then the parent rule is perfect, no way to beat that, so set this rule's gain to 0
// Note that otherwise we'll have the parent's log evaluated to 0 and the gain formula
// returning a negative value (parentTPs * log(thisCoverage), which is < 0 since thisCoverage < 1).
// Eitherway, we only care for positive gain.
// If, on the other hand, parentCoverage == 0.0 then thisCoverage == 0 (the parent covers nothing, so no way for
// this rule -- a refinement -- to cover something)
0.0
} else {
// here parentCoverage is in (0,1)
val _gain = tps * (Math.log(thisCoverage) - Math.log(parentCoverage))
// We are interested only in positive gain, therefore we consider 0 as the minimum of the gain function:
val gain = if (_gain <= 0) 0.0 else _gain
// This is the maximum gain for a given rule:
val max = parentClause.tps.toDouble * (-Math.log(parentCoverage))
val normalizedGain = gain / max
normalizedGain
}
}
}
/*
def foilGainInit = {
val nonzero = 0.0000006
val adjust = (x: Double) => if (x.isNaN || x == 0.0) nonzero else x
// How can this be normalized so we get a range in [0,1]???
// Remember also that if you use this, the parent rule should not be included
// in the calculation of the best-scoring rule, since it will always win
//tpsRelativeFrequency * (Math.log(adjust(precision)) - Math.log(adjust(parentClause.precision)))
val _gain = tps * (Math.log(adjust(precision)) - Math.log(adjust(parentClause.precision)))
// We are interested only in positive gain, therefore we consider 0 as the minimum of the gain function:
val gain = if (_gain <= 0) 0.0 else _gain
// This is the maximum for a given rule:
val max = parentClause.tps.toDouble * (- Math.log(adjust(parentClause.precision)) )
val normalizedGain = if (max == 0) 0.0 else gain/max
normalizedGain
}
def foilGainTerm = {
val nonzero = 0.0000006
val adjust = (x: Double) => if (x.isNaN || x == 0.0) nonzero else x
// How can this be normalized so we get a range in [0,1]???
// Remember also that if you use this, the parent rule should not be included
// in the calculation of the best-scoring rule, since it will always win
//tps * (Math.log(adjust(recall)) - Math.log(adjust(parentClause.recall)))
//tpsRelativeFrequency * (Math.log(adjust(recall)) - Math.log(adjust(parentClause.recall)))
val _gain = tps * (Math.log(adjust(recall)) - Math.log(adjust(parentClause.recall)))
// We are interested only in positive gain, therefore we consider the minimum of the
// gain function as 0:
val gain = if (_gain <= 0) 0.0 else _gain
// This is the maximum for a given rule:
val max = parentClause.tps.toDouble * (- Math.log(adjust(parentClause.recall)) )
val normalizedGain = if (max == 0) 0.0 else gain/max
normalizedGain
}
*/
var refinements = List[Clause]()
//var refinements = List[Refinement]()
// The number of examples until the Hoeffding test succeeds
var seenExmplsNum = 0
// The number over which the mean of best scores' difference
// has been computed. When a new mean score difference newDiff arrives,
// to find the new mean we simply need to calculate
//
// newMean = (oldMean*previousCount + newDiff)/(previousCount+1)
//
// see also the calculation of meanDiff below.
var previousMeanDiffCount = 0
var previousMeanScoreCount = 0
// Stores the previous mean difference observed between
// the best and second-best specializations. It is used
// for the calculation of the current mean in meanDiff method.
var previousMeanDiff = 0.0
// This stores the previous mean score (used for pruning)
var previousScore = 0.0
def meanDiff(scoringFunction: String) = {
/*
if (Globals.glvalues("distributed").toBoolean) {
throw new RuntimeException("This is just to debug the distributed version, where the execution flow should not pass from here!")
}
*/
// The - sign is to sort with decreasing order (default is with increasing)
// Also sort clauses by length, so that sorter clauses be preferred over longer ones with the same score
val allSorted =
if (scoringFunction == "foilgain")
// The parent rule should not be included here (otherwise it will always win, see the foil gain formula)
this.refinements.sortBy { x => (-x.score, -x.weight, x.body.length + 1) }
else
(List(this) ++ this.refinements).sortBy { x => (-x.score, -x.weight, x.body.length + 1) }
val bestTwo = allSorted.take(2)
//val (best,secondBest) = (bestTwo.head,bestTwo.tail.head)
// The correct way to do it is as the commented one above. But in some cases
// the refinements lists is empty (this has only occurred when I use basic and auxiliary predicates in fraud).
// This should be handled generically, a clause with no candidate refs should not be considered for specialization
val (best, secondBest) =
if (bestTwo.length > 1)
(bestTwo.head, bestTwo.tail.head)
else
(bestTwo.head, bestTwo.head)
val newDiff = best.score - secondBest.score
val newMeanDiff = ((previousMeanDiff * previousMeanDiffCount) + newDiff) / (previousMeanDiffCount + 1)
if (newMeanDiff.isNaN) {
val stop = "stop"
}
previousMeanDiffCount += 1 // increase the count
previousMeanDiff = newMeanDiff
(newMeanDiff, best, secondBest)
}
/* This is used in the distributed setting */
def distributedMeanDiff = {
// The - sign is to sort with decreasing order (default is with increasing)
// Also sort clauses by length also, so that sorter clauses be preferred over longer ones with the same score
val allSorted = (List(this) ++ this.refinements).sortBy { x => (-x.distScore, x.body.length + 1) }
val bestTwo = allSorted.take(2)
//val (best,secondBest) = (bestTwo.head,bestTwo.tail.head)
// The correct way to do it is as the commented one above. But in some cases
// the refinements lists is empty (this has only occurred when I use basic and auxiliary predicates in fraud).
// This should be handled generically, a clause with no candidate refs should not be considered for specialization
val (best, secondBest) =
if (bestTwo.length > 1)
(bestTwo.head, bestTwo.tail.head)
else
(bestTwo.head, bestTwo.head)
val newDiff = best.distScore - secondBest.distScore
val newMeanDiff = ((previousMeanDiff * previousMeanDiffCount) + newDiff) / (previousMeanDiffCount + 1)
previousMeanDiffCount += 1 // increase the count
previousMeanDiff = newMeanDiff
(newMeanDiff, best, secondBest)
}
def clearStatistics = {
tps = 0
fps = 0
fns = 0
seenExmplsNum = 0
refinements = Nil
previousMeanDiffCount = 0
previousMeanScoreCount = 0
previousMeanDiff = 0
}
def length = this.body.length + 1
def score: Double = {
/*
if (this.foilGainInit.isInfinite || this.foilGainTerm.isInfinite) {
val debug = "stop"
}
*/
/*
if (Globals.glvalues("distributed").toBoolean) {
throw new RuntimeException("This is just to debug the distributed version, where the execution flow should not pass from here!")
}
*/
if (this.head.functor == "initiatedAt") {
Globals.scoringFunction match {
case "default" => if (!precision.isNaN) precision else 0.0 // That's the standard
//case "default" => weighted_precision
//case "default" => if (!precision.isNaN) (tps.toFloat- (fps.toFloat - this.length.toFloat))/(tps.toFloat+fps.toFloat) else 0.0
//case "default" => if (!precision.isNaN) (1.0 - 1.0/(1.0+tps.toDouble)) * precision else 0.0
case "foilgain" => foilGain("precision")
case "fscore" => fscore
case _ => throw new RuntimeException("Error: No scoring function given.")
}
//presision_length
//compressionInit
//foilGainInit
//gainInt
} else if (this.head.functor == "terminatedAt") {
Globals.scoringFunction match {
case "default" => if (!precision.isNaN) precision else 0.0 //if (!recall.isNaN) recall else 0.0 //
//case "default" => weighted_recall
//case "default" => (tps.toFloat- (fns.toFloat - this.length.toFloat))/(tps.toFloat+fns.toFloat)
//case "default" => if (!recall.isNaN) (1.0 - 1.0/(1.0+tps.toDouble)) * recall else 0.0
case "foilgain" => foilGain("recall") //foilGain("precision")
case "fscore" => fscore
case _ => throw new RuntimeException("Error: No scoring function given.")
}
//recall_length
//compressionTerm
//foilGainTerm
//gainTerm
} else {
// this.fscore
/* Until now this has only been used for fraud.
* We don't use f-score for evaluating individual
* rules, because a rule's fns are irrelevant.
* So we'll use precision.
*/
//foilGainInit // No improvement!
//gainInt // No improvement!
if (!precision.isNaN) precision else 0.0 // This is what I use but does not work well
// if (!precision.isNaN) (tps.toFloat + 10) / (tps.toFloat+10 + fps) else 0.0 // weight it just to check
//rateDiff // No! (takes negative values)
//tpsRelativeFrequency
//fscore
}
}
// This is the scoring function used in the distributed setting
def distScore: Double = {
val precision_ = getTotalTPs.toFloat / (getTotalTPs + getTotalFPs)
/*
val recall_ =
if (List("initiatedAt","terminatedAt").contains(this.head.functor)) getTotalTPs.toFloat / ( getTotalTPs + (getTotalFNs * 10))
else getTotalTPs.toFloat / ( getTotalTPs + (getTotalFNs * 10))
*/
val recall_ = getTotalTPs.toFloat / (getTotalTPs + getTotalFNs)
if (this.head.functor == "initiatedAt") {
if (!precision_.isNaN) precision_ else 0.0
} else if (this.head.functor == "terminatedAt") {
if (!recall_.isNaN) recall_ else 0.0
} else {
if (!precision_.isNaN) precision_ else 0.0
}
}
def format(x: Double) = {
val defaultNumFormat = new DecimalFormat("0.############")
defaultNumFormat.format(x)
}
def showWithStats = {
val scoreFunction = if (!Globals.glvalues("distributed").toBoolean) this.score else this.distScore
val (tps_, fps_, fns_) =
if (!Globals.glvalues("distributed").toBoolean) (tps * tpw, fps * fpw, fns * fnw)
else (this.getTotalTPs, this.getTotalFPs, this.getTotalFNs)
s"score:" + s" $scoreFunction, tps: $tps_, fps: $fps_, fns: $fns_ | " +
s"MLN-weight: ${format(this.weight)} | Expert Weight (pos/neg/avgPos/avgNeg): $w_pos/$w_neg/$avgWeight/$avgNegWeight " +
s"Evaluated on: ${this.getTotalSeenExmpls} examples\n$tostring"
}
def showWithStats_NoEC = {
//s"score (precision): $score, tps: $tps, fps: $fps, fns: $fns\n$tostring"
s"score (precision): $score, tps: ${tps * tpw}, fps: ${fps * fpw}, fns: ${fns * fnw}\n$tostring"
}
/* Returns the maximum Hausdorff distance of this clause from a list of clauses */
def similarity(x: List[Clause]) = {
val maxDist = x.foldLeft(List[Double]()){ (accum, newClause) =>
val sim = Hausdorff.litlistFromlitlist(this.literals, newClause.literals)
accum :+ sim
}.max
maxDist
}
/* generates candidate refinements for the Hoeffding test. otherAwakeRules is used by
* Hedge (sleeping experts) to avoid generating refinements that already exist. */
def generateCandidateRefs(gl: Globals, otherAwakeExperts: Vector[Clause] = Vector.empty[Clause]): Unit = {
/*
* Checks if a specialization is redundant. Currently a specialization is
* redundant if it consists only of comparison predicates of the same type.
* For instance, this is redundant:
*
* blah :- close(X,Y,30,12), close(X,Y,40,12), close(X,Y,50,12)
*
* where close(X, Y, Z, T) means that the Euclidean distance of X and Y at time T is less than Z.
*
* */
def redundant(newLits: Set[Literal]) = {
val all = this.body ++ newLits
val test: Set[ModeAtom] = all.map(x => x.modeAtom).toSet
// if the test variable is a singleton then all
// predicates are comparison predicates of the same type
if (all.size == 1) false else test.size == 1 && gl.comparisonPredicates.contains(test.head)
}
val specializationDepth = Globals.glvalues("specializationDepth").toInt
val candidateList = this.supportSet.clauses.flatMap(_.body).distinct.filter(!this.body.contains(_))
// This is the original implementation
///*
val refinementsSets =
(for (x <- 1 to specializationDepth) yield x).foldLeft(List[List[Clause]]()) { (accum, depth) =>
val z = for (lits <- candidateList.toSet.subsets(depth).toVector if !redundant(lits)) yield Clause(this.head, this.body ++ lits)
val z_ = Theory.compressTheory(z)
accum :+ z_
}
// The filtering is used by Hedge
val flattend = refinementsSets.flatten.filter(ref => !otherAwakeExperts.exists(rule => rule.thetaSubsumes(ref) && ref.thetaSubsumes(rule)))
//val flattend = refinementsSets.flatten.filter( ref => !otherAwakeExperts.exists(rule => ref.thetaSubsumes(rule)) )
//*/
// 6-6-2019: I'll try this: Start with a conjunction of 2 literals at the body, instead of 1. Then,
// add new ones one by one.
/*
val flattend = {
if (this.body.isEmpty) {
val t = candidateList.toSet.subsets(2).filter(x => !redundant(x)).map(x => Clause(this.head, this.body ++ x)).toList
Theory.compressTheory(t)
} else {
val t = candidateList.map(x => Clause(this.head, this.body :+ x))
Theory.compressTheory(t)
}
}
*/
flattend.foreach { refinement =>
refinement.parentClause = this
//------------------------------------
refinement.weight = this.weight
//------------------------------------
//refinement.w_pos = this.w_pos
//------------------------------------
refinement.supportSet = this.supportSet
//------------------------------------
val newMap = scala.collection.mutable.Map[String, ClauseStats]()
if (Globals.glvalues("distributed").toBoolean) {
// Just to be on the safe side in the distributed case
if (this.countsPerNode.isEmpty) throw new RuntimeException(s"The countsPerNode map of clause ${this.uuid} is empty," +
s" when it should have been populated at clause generation")
this.countsPerNode.foreach { entry => newMap(entry._1) = new ClauseStats(0, 0, 0, 0) }
refinement.countsPerNode = newMap
}
}
this.refinements = flattend
}
def marked(globals: Globals) = {
Clause(head = Literal(predSymbol = "marked", terms = List(this.##.toString, this.head)), body = this.withTypePreds(globals).body)
}
//val isEmpty = this == Clause.empty
def addToSupport(c: Clause) = {
this.supportSet = Theory(this.supportSet.clauses :+ c)
}
def addToSupport(c: List[Clause]) = {
this.supportSet = Theory(this.supportSet.clauses ++ c)
}
def removeFromSupport(c: Clause) = {
this.supportSet = Theory(this.supportSet.clauses.filter(x => x != c))
}
// This is used in order to avoid maintaining redundant
// rules in the support set. In this context, a support set
// rule is redundant if it subsumes some other rule
// in the support set. This can happen in cases where e.g.
// p :- q,r was added to the support set early on and
// later on p :- q,r,s was added. In this case the first
// rule is redundant and should be removed. This redundancy
// checking should be done every time the support set
// changes with the addition of a rule.
def compressSupport = {
val redundants = this.supportSet.clauses filter {
p =>
this.supportSet.clauses exists {
q => !p.equals(q) && (p thetaSubsumes q)
}
}
this.supportSet = Theory(this.supportSet.clauses filter (p => !redundants.contains(p)))
}
override lazy val tostring = this.toStrList match {
case List() => throw new LogicException("Cannot generate a Clause object for the empty clause")
case h :: ts =>
ts.length match {
case 0 => h + "."
case 1 => h + " :- \n" + " " + ts.head + "."
case _ => h + " :- \n" + (for (x <- ts) yield if (ts.indexOf(x) == ts.length - 1)
s" $x."
else
s" $x,").mkString("\n")
}
}
/* No new line after each literal */
def tostring_debug = this.toStrList match {
case List() => throw new LogicException("Cannot generate a Clause object for the empty clause")
case h :: ts =>
ts.length match {
case 0 => h + "."
case 1 => h + " :- " + ts.head + "."
case _ =>
h + " :- " + (for (x <- ts) yield if (ts.indexOf(x) == ts.length - 1) s"$x."
else s"$x,").mkString("")
}
}
/* No new line after each literal, no final ".", "^" instead of "," for conjunctions. */
def tostring_MLN(id: Int) = {
def format(x: Double) = {
val defaultNumFormat = new DecimalFormat("0.############")
defaultNumFormat.format(x)
}
val markedHead = PosLiteral(this.head.functor, terms = this.head.terms :+ Constant(s"ruleId_$id"))
(List(markedHead.asLiteral) ++ this.body).map(x => x.toMLN).map(x => x.tostring) match {
case List() => throw new LogicException("Cannot generate a Clause object for the empty clause")
case h :: ts =>
ts.length match {
case 0 => format(this.weight) + " " + h
case 1 => (format(this.weight) + " " + h + " :- " + ts.head).replaceAll("not ", "!")
case _ =>
format(this.weight) + " " + h + " :- " + (for (x <- ts) yield if (ts.indexOf(x) == ts.length - 1) s"$x" else s"$x ^ ").mkString("").replaceAll("not ", "!")
}
}
}
def to_MLNClause() = {
val litsToMLN = this.toLiteralList.map(x => Literal.toMLNClauseLiteral(x).tostring)
litsToMLN match {
case List() => throw new LogicException("Cannot generate a Clause object for the empty clause")
case h :: ts =>
ts.length match {
case 0 => format(this.weight) + " " + h
case 1 => (format(this.weight) + " " + h + " :- " + ts.head).replaceAll("not ", "!")
case _ =>
format(this.weight) + " " + h + " :- " + (for (x <- ts) yield if (ts.indexOf(x) == ts.length - 1) s"$x" else s"$x ^ ").mkString("").replaceAll("not ", "!")
}
}
}
def varbed: Clause = {
var accum = ListBuffer[Literal]()
var map = scala.collection.mutable.Map[Expression, Expression]()
var counter = 0
for (x <- this.toLiteralList) {
val (a, _, c, d) = x.variabilize(List(Literal(predSymbol = x.predSymbol, isNAF = x.isNAF)),
x.terms zip x.modeAtom.args, map, List(), counter)
val aa = Literal(a.head.predSymbol, a.head.terms, a.head.isNAF, x.modeAtom, a.head.typePreds)
accum ++= List(aa)
map ++ c
counter = d
}
val l = accum.toList
val out = Clause(head = l.head, body = l.tail)
out
}
/**
* this theta-subsumes that
*
* @param that @tparam Clause the (potentially) sumbumed clause
* @return true if this subsumes that else false
*/
def thetaSubsumes(that: Clause): Boolean = {
/*
this.toLiteralList.forall{ l =>
that.toLiteralList.exists { l2 =>
f(l,l2)
}
}
*/
val isVar = (x: String) => try {
Variable(x); true
} catch {
case _: IllegalArgumentException => false
}
val (skolemised, skmap) = that.skolemise
var skolems = (for (y <- skmap.keySet.filter(x => isVar(x))) yield skmap(y)).toList
val thisVars = this.getVars
while (thisVars.length > skolems.length) {
skolems = skolems ::: skolems
}
for (x <- skolems.permutations) {
val trySubstitution = (thisVars zip x).map { x => (x._1, Constant(x._2)) }.toMap
val repl = this.toLiteralList.map { x => x.replaceAll(trySubstitution) }.map { x => x.tostring }
if (Utils.isSubset(repl.toSet, skolemised.toStrList.toSet)) return true
}
false
}
def thetaSubsumes(t: Theory): Boolean = {
t.clauses.forall(x => this.thetaSubsumes(x))
}
/**
* Same as above, but returns a List[String].
*/
def toStrList: List[String] = List(head.tostring) ++ (for (x <- body) yield x.tostring)
def literals: List[Literal] = List(this.head.asLiteral) ++ this.body
/**
* Get the variables from this clause
*/
def getVars = {
val vars = this.head.asLiteral.getVars
for (x <- this.body) vars ++= x.getVars.filter { x => !vars.contains(x) }
vars.toList
}
/**
* Replaces all variables with a new constant symbol 'skolem0', 'skolem1' etc. Same variables correspond to the
* same constant symbol. Constants remain intact, i.e. they are used as skolem constants themselves. Example:
*
* a(X,Y,Z) :-
* p(x,q(Y,const1,2),Z),
* not r(A,B,C).
*
* is turned into:
*
* a(skolem0,skolem1,skolem2) :-
* p(skolem0,q(skolem1,const1,2),skolem2),
* not r(skolem3,skolem4,skolem5).
*
* Returns the skolemised clause and the 'vars -> skolems' map
*
*/
def skolemise: (Clause, Map[String, String]) = {
val l = this.toLiteralList
val skmap = this.getSkolemConsts
var temp = new ListBuffer[Literal]
for (x <- l) {
val m = x.skolemize(skmap).toList
val toLit = Literal(predSymbol = x.predSymbol, terms = m, isNAF = x.isNAF)
temp += toLit
}
val fl = temp.toList
val sk = Clause(head = fl.head,
body = for (x <- fl; if fl.indexOf(x) != 0) yield x)
(sk, skmap)
}
/**
* Generates skolem constants from the variables and the constants of the clause. It returns a map of the form
* Map('X -> skolem0', 'Y -> skolem1', 'const -> const', .... ) (we use the constants as skolem constants)
*/
private def getSkolemConsts: Map[String, String] = {
val l = this.toLiteralList
//print(l)
var skolems = new ListBuffer[(String, String)]
var counter = 0
for (x <- l) {
val m = x.getSkolemConsts(skolems, counter);
skolems = m._1; counter = m._2
}
skolems.toMap
}
def use_2_split(i: Int, globals: Globals): (Theory, Map[String, Literal]) = {
val temp = for (
(lit, j) <- this.toLiteralList zip List.range(0, this.toLiteralList.length);
vars = lit.variables(globals);
_try = Literal(predSymbol = "try",
terms = List(Constant(s"$i"), Constant(s"$j"),
Literal(predSymbol = "vars",
terms = for (x <- vars) yield Variable(x.name, _type = x._type))),
typePreds = for (x <- vars) yield s"${x._type}(${x.name})");
tryLit = j match {
case 0 => Literal(predSymbol = "use2", terms = List(Constant(s"$i"), Constant("0")))
case _ => _try
};
useMap = j match {
case 0 => s"use2($i,0)" -> this.head.asLiteral
case _ => s"use2($i,$j)" -> Literal(predSymbol = lit.predSymbol, terms = lit.terms,
isNAF = lit.isNAF, typePreds = _try.typePreds, modeAtom = lit.modeAtom)
};
tryClause1 = if (j > 0)
Clause(head = _try,
body = List(Literal(predSymbol = "use2",
terms = List(Constant(s"$i"), Constant(s"$j"))), lit))
else None;
tryClause2 = if (j > 0)
Clause(head = _try, body = List(Literal(predSymbol = "use2",
terms = List(Constant(s"$i"), Constant(s"$j")), isNAF = true)) :::
(for (x <- _try.typePreds) yield Literal(x)))
else None
) yield (tryLit, tryClause1, tryClause2, useMap)
val ls = temp.map { x => List(x._1, x._2, x._3, x._4) }.transpose
val defeasible = Theory(
(List(Clause(head = this.head, body = ls.head.map(x => x.asInstanceOf[Literal]))) :::
ls(1).filter { x => x != None }.map(x => x.asInstanceOf[Clause]) :::
ls(2).filter { x => x != None }.map(x => x.asInstanceOf[Clause])).map { x => x.withTypePreds(globals) })
//val useMap = ls(3).asInstanceOf[List[(String,Literal)]].groupBy(_._1).map { case (k,v) => (k,v.map(_._2))}
val useMap = ls(3).asInstanceOf[List[(String, Literal)]].groupBy(_._1).map { case (k, v) => v.head }
(defeasible, useMap)
}
/**
* Generates a defeasible theory from a single support set rule. This rule
* may either be chosen at random from the support set, or it can be set
* to a particular one (given with the input). This is used in order to identify
* inconsistent rules from the prior hypothesis w.r.t. a new example window.
*
*/
def use_3_split_one(i: Int, ssClause: Clause = Clause.empty, withSupport: String = "fullSupport", globals: Globals) = {
if (this != Clause.empty) {
val SSRule = ssClause match {
case Clause.empty =>
if (withSupport == "fullSupport")
Random.shuffle(this.supportSet.clauses).head // select an arbitrary rule
else
Random.shuffle(this.supportSet.strongRules.clauses).head // select a strong rule
case _ => ssClause
}
val j = this.supportSet.clauses.indexOf(SSRule) + 1
val (defeasible, useMap) = this.f(SSRule, i, j, globals)
(Theory(defeasible), useMap, s"{use3($i,$j,1..${SSRule.body.length})}.")
} else {
(Theory(), Map[String, Literal](), "")
}
}
/**
* generates a defeasible theory from this clause, using every rule in its
* support set. Each defeasible theory resulting from each support set rule
* is merged with the others.
*/
def use_3_split(i: Int, withSupport: String = "fullSupport", globals: Globals) = {
//val support = if(withSupport=="fullSupport") this.supportSet else this.supportSet.strongRules
val e = if (withSupport == "fullSupport") { // analyze the whole support set
for ((x, j) <- this.supportSet.clauses zip List.range(1, this.supportSet.clauses.length + 1)) yield f(x, i, j, globals)
} else { // analyze only the strong support rules, by skipping the weak ones. Indexing remains consistent with the actual ordering in the support
for ((x, j) <- this.supportSet.clauses zip List.range(1, this.supportSet.clauses.length + 1) if !this.supportSet.clauses(j - 1).fromWeakExample) yield f(x, i, j, globals)
}
val g = e.foldLeft(Tuple2(List[Clause](), Map[String, Literal]()))((x, y) => (x._1 ++ y._1, x._2 ++ y._2))
val generates = (this.supportSet.clauses zip List.range(1, this.supportSet.clauses.length + 1)).map(p => s"{use3($i,${p._2},1..${p._1.body.length})}.").mkString("\n")
(Theory(g._1), g._2, generates)
}
def f(c2: Clause, i: Int, j: Int, globals: Globals) = {
val usedPart = c2.body filter (x => !this.body.contains(x))
val (iconst, jconst) = (Constant(s"$i"), Constant(s"$j"))
val vars = this.head.asLiteral.variables(globals)
val varlit = Literal(predSymbol = "vars", terms = vars map (x => Variable(x.name, _type = x._type)))
val exceptionLit = Literal(predSymbol = "exception",
terms = List(iconst, jconst, varlit), isNAF = true)
val res = (
for (
(x, k) <- usedPart zip List.range(1, usedPart.length + 1);
use3lit = Literal(predSymbol = "use3", terms = List(Constant(s"$i"), Constant(s"$j"), Constant(s"$k")))
) yield (Clause(head = exceptionLit.nonNegated, body = List(use3lit, x.negateThis)),
use3lit.tostring -> x)).map { z => List(z._1, z._2) }.transpose
val defeasible =
(List(Clause(this.toLiteralList :+ exceptionLit)) ::: res.head.
map { x => x.asInstanceOf[Clause] }).
map { x => x.withTypePreds(globals, extraTypePreds = this.head.asLiteral.getTypePredicates(globals)) }
val useMap = res(1).asInstanceOf[List[(String, Literal)]].groupBy(_._1).map { case (k, v) => v.head }
(defeasible, useMap)
}
/**
* Helper method that converts a clause to a List[Literal] with the head of the clause as the first element.
*/
def toLiteralList = List(head.asLiteral) ++ (for (x <- body) yield x)
/**
* Returns this clause with type predicates in the body, for each variable that appears in the
* clause. The optional input parameter is for adding extra type
* predicates that cannot be be inferred from the head of the rule.
* Examples of the latter are transformation rules where the heads consist
* of auxiliary, fresh predicates, not included in the target
* language and thus not described by the mode declarations.
*
*/
def withTypePreds(globals: Globals, extraTypePreds: List[String] = List()): Clause = {
val types = (for (x <- this.toLiteralList)
yield x.getTypePredicates(globals)).filter { z => z != Nil }.
flatten.++(extraTypePreds).distinct.
map { y => Literal.parse(y) }
Clause(head = this.head, body = this.body ::: types)
}
}
| 38,842 | 38.920863 | 176 | scala |
OLED | OLED-master/src/main/scala/logic/Constant.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
object Constant {
def apply(): Constant = {
Constant("")
}
}
case class Constant(override val name: String, plmrk: String = "", override val _type: String = "") extends Expression {
// No use. Allowing upper-case constants helps in MLN <--> ASL
//require(!name.toCharArray()(0).isUpper) // else throws an IllegalArgumentException
override def tostring = name
override def tostringQuote = if (plmrk == "-" || plmrk == "#") "\"" + name + "\"" else name
def asLiteral = Literal(predSymbol = name)
}
| 1,229 | 33.166667 | 120 | scala |
OLED | OLED-master/src/main/scala/logic/Examples.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import com.mongodb.casbah.Imports._
import utils.DataUtils.Data
/**
*
* Utilities for representing and handling training examples.
*
*/
object Examples {
object Example {
val f_annot = (x: DBObject) =>
x.asInstanceOf[BasicDBObject].get("annotation").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val f_narrative = (x: DBObject) =>
x.asInstanceOf[BasicDBObject].get("narrative").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val f_time = (x: DBObject) => x.asInstanceOf[BasicDBObject].get("time").toString
// Merges a batch into one example with the original annotation (does not throw away annotation for weaks)
def merge(_exs: List[Example], markedAsWeak: Boolean = false) = {
_exs match {
case Nil => new Example()
case _ =>
val exs = _exs.filter(x => x != Example())
val startTimePredicate = s"starttime(${exs.map(x => x.time).map(x => x.toInt).distinct.sorted.head})"
/*
val annotation = asWeakExmpls match {
case true => exs.flatMap(x => x.annotation ++ x.suppressedAnnotation).distinct
case _ => exs.flatMap(x => x.annotation).distinct
}
*/
val annotation = exs.flatMap(x => x.annotation).distinct
val narrative = startTimePredicate :: exs.flatMap(x => x.narrative).filter(p => !p.contains("starttime"))
val time = exs.map(x => x.time).map(x => x.toInt).distinct.sorted.head.toString
new Example(annot = annotation, nar = narrative, _time = time, isWeak = markedAsWeak)
}
}
///*
def tranformToExample(examples: List[DBObject], usingWeakExmpls: Boolean = false) = {
val startTimePredicate = s"starttime(${examples.map(x => f_time(x)).map(x => x.toInt).distinct.sorted.head})"
val annotation =
if (!usingWeakExmpls)
examples.flatMap(x => f_annot(x))
else examples.tail.flatMap(x => f_annot(x)) // leave the starttime annotation out to enforce learning
val narrative =
startTimePredicate :: examples.flatMap(x => f_narrative(x)).filter(p => !p.contains("starttime"))
//val time = f_time(examples.head)
val time = examples.map(x => f_time(x)).map(x => x.toInt).distinct.sorted.head.toString
val suppressedAnnotation = if (usingWeakExmpls) f_annot(examples.head) else List[String]()
val isWeak = if (examples.length == 2 && f_annot(examples.head).nonEmpty && f_annot(examples.tail.head).nonEmpty) true else false
new Example(annot = annotation, nar = narrative, _time = time, suppressedAnnotation = suppressedAnnotation, isWeak = isWeak)
}
//*/
/*
def mergeExamples(_exs: List[Example]) = {
_exs match {
case Nil => new Example()
case _ =>
val exs = _exs.filter(x => x!=Example())
val startTimePredicate = s"starttime(${exs.map(x => x.time).map(x => x.toInt).distinct.sorted.head})"
val annotation = exs.flatMap(x => x.annotation).distinct
val narrative = exs.flatMap(x => x.narrative).distinct
val time = exs.map(x => x.time).map(x => x.toInt).distinct.sorted.head.toString
new Example(annot=annotation,nar=narrative,_time=time)
}
}
*/
def apply(examples: List[DBObject], usingWeakExmpls: Boolean) = {
usingWeakExmpls match {
case false => tranformToExample(examples)
case _ =>
val pairs = examples.map(p =>
if (examples.indexOf(p) + 1 < examples.length)
(p, examples(examples.indexOf(p) + 1))
else (p, p)).filter(x => x._1 != x._2)
val weaks = for (p <- pairs) yield tranformToExample(List(p._1, p._2), usingWeakExmpls = true)
new Example(containedExamples = weaks)
}
}
/*
def apply(annotation: List[String],narrative: List[String],time: String) ={}
*/
/*
def apply(a: scala.collection.mutable.Set[String], n: scala.collection.mutable.Set[String], t: String) = {
new Example(annot = a.toList, nar = n.toList, _time = t)
}
*/
}
case class Example(
e: DBObject = DBObject(),
commingFromDB: String = "",
private val annot: List[String] = Nil,
private val nar: List[String] = Nil,
_time: String = "",
isWeak: Boolean = false,
usingWeakExmpls: Boolean = false,
containedExamples: List[Example] = List[Example](),
suppressedAnnotation: List[String] = Nil) extends Data {
var startTime = 0
var endTime = 0
val annotation: List[String] =
if (this.annot.isEmpty && this.nar.isEmpty) // then this is really coming from db, so read the DBObject
if (this.e.nonEmpty)
e.asInstanceOf[BasicDBObject].get("annotation").asInstanceOf[BasicDBList].toList.map(x => x.toString)
else this.annot
else this.annot
val annotationASP = this.annotation map (x => if (x.contains("example(") || x.contains("negExample(")) s"$x." else s"example($x).")
val narrative =
if (this.nar.isEmpty)
if (this.e.nonEmpty)
e.asInstanceOf[BasicDBObject].get("narrative").asInstanceOf[BasicDBList].toList.map(x => x.toString)
else this.nar
else this.nar
val narrativeASP = this.narrative map (x => if (x.endsWith(".")) x else s"$x.")
val time =
if (this._time == "")
if (this.e.nonEmpty)
e.asInstanceOf[BasicDBObject].get("time").toString()
else this._time
else this._time
val isEmpty = e.isEmpty && this.annot.isEmpty && this.nar.isEmpty
def toMap = Map("annotation" -> this.annotation, "narrative" -> this.narrative)
def toMapASP = Map("annotation" -> this.annotationASP, "narrative" -> this.narrativeASP)
def tostring = (annotationASP ++ narrativeASP).mkString("\n")
}
case class ExampleBatch(exs: List[DBObject] = List[DBObject](), usingWeakExmpls: Boolean = false) {
private val pairs = exs.map(p => if (exs.indexOf(p) + 1 < exs.length) (p, exs(exs.indexOf(p) + 1)) else (p, p)).filter(x => x._1 != x._2)
val isEmpty = this.exs == List[DBObject]()
val examples = for (p <- pairs) yield Example.tranformToExample(List(p._1, p._2), usingWeakExmpls = false)
val examplesAsWeaks = for (p <- pairs) yield Example.tranformToExample(List(p._1, p._2), usingWeakExmpls = true)
def weakExmpls = this.examplesAsWeaks.filter(x => x.isWeak)
def strongExmpls = this.examplesAsWeaks.filter(x => !x.isWeak)
//def asSingleExmpl = Example(exs,usingWeakExmpls)
def asSingleExmpl = Example(exs, usingWeakExmpls)
def without(e: Example): List[Example] = {
this.examples.filter(x => x.time != e.time)
}
// e is a weak example here. It removes its original
// instance from the batch (which carries annotation
// by inertia) and adds back its weak version without
// such annotation
def withOneFlipped(e: Example) = {
val asWeakBatch = this.examples map {
x =>
new Example(
annot = x.annotation.filter(y => !e.suppressedAnnotation.contains(y)).distinct,
nar = x.narrative,
_time = x.time
)
}
asWeakBatch
//val without = this.without(e)
//this.examples :+ e
}
}
/**
* This class represents a pair of interpretations at two consecutive time points t1, t2 = t1+step.
* This pair is considered as a single example with narrative consisting of everything true at t1
* and annotation consisting of the annotation of the second example (the example at t2).
*/
case class ExamplePair(first: Example = Example(), second: Example = Example(), learnFromWeakExmpls: Boolean = true) {
val time = first.time
val isStronglyInitiated = if (first.annotation == List() && second.annotation != List()) true else false
val isWeaklyInitiated = if (first.annotation != List() && second.annotation != List()) true else false
val isStronglyTerminated = if (first.annotation != List() && second.annotation == List()) true else false
val isWeaklyTerminated = if (first.annotation == List() && second.annotation == List()) true else false
// If we are trying to learn from weak examples also, then
// we do not need the annotation of of the first example
val annotation = if (learnFromWeakExmpls) second.annotation else first.annotation ::: second.annotation
val annotationASP = if (learnFromWeakExmpls) second.annotationASP else first.annotationASP ::: second.annotationASP
val narrative = first.narrative ::: second.narrative
val narrativeWithPriorSupervision = first.narrative ::: second.narrative ::: first.annotation
val narrativeWithPriorSupervisionASP = first.narrativeASP ::: second.narrativeASP ::: first.annotationASP
val narrativeASP = first.narrativeASP ::: second.narrativeASP ::: first.narrativeASP
var isCoveredBySomeRule: Boolean = false
}
case class LabelledFromMongo(e: DBObject) {
val commingFromDB = e.get("commingFromDB")
val headAtomGrnd: String = e.get("headAtomGrnd").toString()
val headAtomVarbed: String = e.get("headAtomVarbed").toString()
val timeKey: String = e.get("timeKey").toString()
val typePreds: String = e.get("typePreds").toString()
val asGrndInterpretation: List[String] =
e.asInstanceOf[BasicDBObject].get("asGrndInterpretation").asInstanceOf[BasicDBList].toList.map { x => x.toString() }
val asVarbedInterpretation: List[String] =
e.asInstanceOf[BasicDBObject].get("asVarbedInterpretation").asInstanceOf[BasicDBList].toList.map { x => x.toString() }
var similarExamples = 2.0
}
}
| 10,369 | 41.5 | 145 | scala |
OLED | OLED-master/src/main/scala/logic/Exceptions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
object Exceptions {
class MyParsingException(message: String = null, cause: Throwable = null) extends RuntimeException(message, cause)
class AbductionException(message: String, cause: Throwable = null) extends RuntimeException(message, cause)
class ASPInputException(message: String, cause: Throwable = null) extends RuntimeException(message, cause)
class LogicException(message: String, cause: Throwable = null) extends RuntimeException(message, cause)
class TrainingExamplesException(message: String, cause: Throwable = null) extends RuntimeException(message, cause)
}
| 1,298 | 38.363636 | 116 | scala |
OLED | OLED-master/src/main/scala/logic/Expression.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
trait Expression {
def tostring: String = ""
def tostringQuote: String = ""
def _type: String = ""
def name: String = ""
def isVariabe = this match {
case _: Variable => true
case _ => false
}
def isConstant = this match {
case _: Constant => true
case _ => false
}
}
| 1,018 | 23.853659 | 72 | scala |
OLED | OLED-master/src/main/scala/logic/Hausdorff.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import utils.parsers.ClausalLogicParser
/**
* Compute the Haussdorff distance between Herbrand Interpretations.
*/
object Hausdorff extends ClausalLogicParser {
/*
def main(args: Array[String]) {
//val x1 = "initiatedAt(fighting(id1,id2),X)"
//val x2 = "initiatedAt(fighting(id1,id3),X)"
//println(hausdrfDist(x1, x2))
/*
val atom = "a(1,2,4)"
val example1 = List("p(a(2,6,4),Y)","a(1,2,4)")
val example2 = List("a(1,2,3)","p(a(2,3,4),X)","q(z,Term)")
println(elemFromExampleDist(atom,example2))
println(exmplFromExmplDist(example1,example2))
*
*/
val e1 = List("happensAt(walking(id0),680)", "coords(id0,262,285,680)", "happensAt(enters(id0),680)", "orientation(id0,0,680)")
val e2 = List()
}
*/
def litsHausdrfDist(l1: List[Literal], l2: List[Literal]): Double = {
math.max(litlistFromlitlist(l1, l2), litlistFromlitlist(l2, l1))
}
def litlistFromlitlist(l1: List[Literal], l2: List[Literal]): Double = {
val d = for (x <- l1; d = literalFromLiteralList(x, l2)) yield d
d.reduceLeft(_ max _)
}
def literalFromLiteralList(lit: Literal, list: List[Literal]): Double = {
val distances = for (z <- list; d = litHausdrfDist(lit, z)) yield d
distances.reduceLeft(_ min _)
}
def exmplHausdrfDist(ex1: List[String], ex2: List[String]): Double = {
math.max(exmplFromExmplDist(ex1, ex2), exmplFromExmplDist(ex2, ex1))
}
def litHausdrfDist(x: Literal, y: Literal): Double = {
hausdrfDist1(x.asInstanceOf[Expression], y.asInstanceOf[Expression])
}
def exmplFromExmplDist(exmpl1: List[String], exmpl2: List[String]): Double = {
val d = for (x <- exmpl1; d = elemFromExampleDist(x, exmpl2)) yield d
d.reduceLeft(_ max _)
}
def elemFromExampleDist(atom: String, example: List[String]): Double = {
val distances = for (z <- example; d = hausdrfDist(atom, z)) yield d
distances.reduceLeft(_ min _)
}
def hausdrfDist(x: String, y: String): Double = {
val x1 = getParseResult(parse(literal, x))
val x2 = getParseResult(parse(literal, y))
hausdrfDist1(x1, x2)
}
/*
def hausdrfDist(x: Literal, y: Literal): Double = {
hausdrfDist1(x, y)
}
def hausdrfDist(x: Clause, y: Clause): Double = {
hausdrfDist1(x.literals, y.literals)
}
*/
def hausdrfDist1[T <: Expression](x: T, y: T): Double = (x, y) match {
case (x: Literal, y: Literal) =>
val d = haussdorffOuter(x, y)
if (x.predSymbol == y.predSymbol && x.arity == y.arity) {
d / (2 * x.arity)
} else {
d
}
}
def haussdorff(x: Expression, y: Expression, accum: Double, rest: List[Tuple2[Expression, Expression]]): Double = (x, y, rest) match {
case (x: Variable, y: Variable, hd :: tail) =>
if (x.name == y.name) haussdorff(hd._1, hd._2, accum + 0.0, tail)
else haussdorff(hd._1, hd._2, accum + 1.0, tail)
case (x: Variable, y: Variable, List()) =>
if (x.name == y.name) accum + 0.0
else accum + 1.0
case (x: Constant, y: Constant, hd :: tail) =>
if (x.name == y.name) haussdorff(hd._1, hd._2, accum + 0.0, tail)
else haussdorff(hd._1, hd._2, accum + 1.0, tail)
case (x: Constant, y: Constant, List()) =>
if (x.name == y.name) accum + 0.0 else accum + 1.0
case (x: Literal, y: Literal, hd :: tail) =>
if (x.predSymbol != y.predSymbol | x.arity != y.arity) {
haussdorff(hd._1, hd._2, accum + 1.0, tail)
} else {
val currentDist = haussdorffOuter(x, y)
haussdorff(hd._1, hd._2, accum + currentDist, tail)
}
case (x, y, List()) =>
val currentDist = haussdorffOuter(x, y)
accum + currentDist
case (_, _, hd :: tail) =>
haussdorff(hd._1, hd._2, accum + 1.0, tail)
case (_, _, List()) =>
1.0
}
def haussdorffOuter(x: Expression, y: Expression): Double = (x, y) match {
case (x: Literal, y: Literal) =>
if (x.predSymbol != y.predSymbol | x.arity != y.arity) {
1.0
} else {
val hd :: tail = x.terms zip y.terms
haussdorff(hd._1, hd._2, 0.0, tail)
}
case (x: Variable, y: Variable) =>
if (x.name == y.name) 0.0 else 1.0
case (x: Constant, y: Constant) =>
if (x.name == y.name) 0.0 else 1.0
case (_, _) => 1.0
}
}
| 5,006 | 33.294521 | 136 | scala |
OLED | OLED-master/src/main/scala/logic/Literal.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import app.runutils.Globals
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks
import logic.Modes._
import logic.Exceptions._
import utils.parsers.{ClausalLogicParser, ModesParser, PB2LogicParser}
/**
* Companion object for the Literal class
*/
object Literal {
val empty = Literal()
/* Parse a string into a literal, An optional mode atom may be provided. */
/*
def parse(lit: String, mode: String = ""): Literal = {
val p = new ClausalLogicParser
mode match {
case "" => p.getParseResult(p.parse(p.literal, lit)).asInstanceOf[Literal]
case _ =>
val l = p.getParseResult(p.parse(p.literal, lit)).asInstanceOf[Literal]
val m = getModeAtom(mode)
Literal(functor = l.functor, terms = l.terms, isNAF = l.isNAF, modeAtom = m)
}
}
*/
/*
* I'm quiting parsing based on parser combinators, I'll the parboiled lib which is much faster.
* ATTENTION: The PB2LogicParser cannot currently handle whitespace in the input strings (I need to fix it). This
* doesn't happen in the app right now but it might be a problem in the future. If any issue appears just comment-out
* this version of the parse method and use the one above, based on parser combinators.
* */
def parse(lit: String, mode: String = "") = parseWPB2(lit, mode)
/* As above, using the Parboiled2 parser (faster). */
def parseWPB2(lit: String, mode: String = "") = {
mode match {
case "" => PB2LogicParser.parseAtom(lit).asInstanceOf[Literal]
case _ =>
val l = PB2LogicParser.parseAtom(lit).asInstanceOf[Literal]
val m = getModeAtom(mode)
Literal(predSymbol = l.predSymbol, terms = l.terms, isNAF = l.isNAF, modeAtom = m)
}
}
def toLiteral1(lit: String, mode: ModeAtom = ModeAtom("", List())): Literal = {
val p = new ClausalLogicParser
val l = p.getParseResult(p.parse(p.literal, lit)).asInstanceOf[Literal]
val out = Literal(predSymbol = l.predSymbol, terms = l.terms, isNAF = l.isNAF, modeAtom = mode)
out
}
def toLiteral2(lit: Literal, mode: ModeAtom = ModeAtom("", List())): Literal = {
val out = mode match {
case ModeAtom("", List(), false) => lit
case _ => Literal(predSymbol = lit.predSymbol, terms = lit.terms, isNAF = lit.isNAF, modeAtom = mode)
}
out
}
/*def getModeHAtom(atom: String): ModeAtom = {
val p = new ModesParser
p.getParseResult(p.parseModes(p.modeh, atom))
}*/
def getModeAtom(atom: String): ModeAtom = {
val p = new ModesParser
p.getParseResult(p.parseModes(p.mode, atom))
}
/*def getModeBAtom(atom: String): ModeAtom = {
val p = new ModesParser
p.getParseResult(p.parseModes(p.modeb, atom))
}*/
/* Converts a ground literal in ASP format into MLN format.
*
* E.g. initiatedAt(meeting(id1, id2),2000) --> InitiatedAt(Meeting_id1_id2,2000)
*
* This does not work for variabilized literals (throws an exception).
* Variabilized literals are only used (at the MLN side) in rules.
* */
def toMLNFlat(l: Literal) = {
def formFlatConstTerm(funct: String, args: List[Expression]) = {
val t = s"${funct.capitalize}_${args.map(z => z.name.capitalize).mkString("_")}"
Constant(t)
}
def flatten(l: Literal): List[Constant] = {
val flattenInner = l.terms.foldLeft(List[Constant]()) { (stack, currentTerm) =>
currentTerm match {
case x: Constant => stack :+ Constant(x.name.capitalize)
// No variables, this only applies to ground clauses.
case x: Variable => throw new RuntimeException(s"Found variable: $x while transforming ${l.tostring} to MLN flattened form.")
case x: Literal =>
if (x.terms.forall(_.isConstant)) {
stack :+ formFlatConstTerm(x.predSymbol, x.terms)
} else {
val f = flatten(x)
stack :+ formFlatConstTerm(x.predSymbol, f)
}
}
}
flattenInner
}
val flat = flatten(l)
Literal(l.predSymbol.capitalize, flat, isNAF = l.isNAF, derivedFrom = l.derivedFrom)
}
/* Converts a variabilized literal (part of an ASP rule) into a variabilized literal in MLN format.
* E.g. initiatedAt(meeting(X0, X1), X2) --> InitiatedAt(meeting(x0, x1), x2)
* */
def toMLNClauseLiteral(l: Literal) = {
def handleInner(l: Literal): List[Expression] = {
val inner = l.terms.foldLeft(List[Expression]()) { (stack, currentTerm) =>
currentTerm match {
case x: Constant => stack :+ Constant(x.name.capitalize)
case x: Variable => stack :+ Constant(x.name.take(1).toLowerCase() + x.name.drop(1))
case x: Literal =>
val z = handleInner(x)
stack :+ Literal(predSymbol = x.predSymbol, terms = z, isNAF = x.isNAF)
}
}
inner
}
val toMLN = handleInner(l)
Literal(l.predSymbol.capitalize, toMLN, isNAF = l.isNAF, derivedFrom = l.derivedFrom)
}
def types(l: String, mode: ModeAtom, globals: Globals) = {
def terms(lit: Literal): List[Expression] = {
val (in, out, grnd) = lit.placeMarkers
val v = in ++ out ++ grnd
v match {
case Nil =>
val mode = lit.matchingMode(globals)
if (!mode.isEmpty) {
val l = Literal(predSymbol = lit.predSymbol, terms = lit.terms,
isNAF = true, modeAtom = mode, typePreds = lit.typePreds)
l.variables(globals)
} else { Nil }
case _ => v
}
}
val lit = toLiteral1(l, mode)
val termTypes = terms(lit) map { x => s"${x._type}(${x.tostring})" }
termTypes.distinct.mkString(",")
}
}
/**
* A literal is a compound term of the form p(x1,...xn), possibly preceded with 'not' ( 'not p(x1,...xn)' ),
* in which case it is a negated literal. 'p' is the functor of the literal and xi's are its terms. Each xi
* is either a variable, a constant or a non-negated literal.
*
* @param predSymbol the predicate/function symbol of the literal.
* @param terms the inner terms of the literal. This is a var so that it can be updated, by populating the term objects
* by indicators on whether they correspond to input-output vars or constants, a process that takes place during the
* construction of the Literal object, by extracting relevant information from the accompanying modeAtom (if one is present
* with the input). I don't know if this is the best way to do it (having vars), but its seems messy to create a companion object
* for a case class (as this one).
* @param isNAF true or false depending on whether the literal is negated or not.
* @param modeAtom (optional) mode declaration pattern. This is pattern according to which the literal has been generated
* (during bottom clause construction). The mode declaration is used to annotate the variables and constants of the
* literal with additional information (types/sorts of constants/variables, input or output variables), which is used in the
* process of variabilizing the clause in which this literal belongs.
* @param typePreds an (optional) list of typing predicates, extracted from a matching mode declaration,
* for the literal's variables and constants.
*
* @param derivedFrom is the id of the lifted clause from which this atom is proved. This is used for weight learning with AdaGrad.
*
*/
case class Literal(predSymbol: String = "", terms: List[Expression] = Nil, isNAF: Boolean = false,
modeAtom: ModeAtom = ModeAtom("", Nil), typePreds: List[String] = Nil, derivedFrom: Int = 0) extends Expression {
// No need. Plus messes up MLN <--> ASP
//require(if (functor != "") !functor.toCharArray()(0).isUpper else true)
/* TODO comment */
var mlnTruthValue: Boolean = false
lazy val arity: Int = terms.length
lazy val negated = Literal(predSymbol = this.predSymbol, terms = this.terms, isNAF = true, modeAtom = this.modeAtom, typePreds = this.typePreds)
lazy val nonNegated = Literal(predSymbol = this.predSymbol, terms = this.terms, isNAF = false, modeAtom = this.modeAtom, typePreds = this.typePreds)
def negateThis =
if (this.isNAF) {
Literal(predSymbol = this.predSymbol, terms = this.terms, modeAtom = this.modeAtom, typePreds = this.typePreds)
} else {
Literal(predSymbol = this.predSymbol, terms = this.terms, isNAF = true, modeAtom = this.modeAtom, typePreds = this.typePreds)
}
def asPosLiteral = {
if (!this.isNAF) PosLiteral(this.predSymbol, this.terms, this.isNAF, this.modeAtom, this.typePreds)
else throw new LogicException(s"Found negated literal casted as positive literal: ${this.tostring}}")
}
/*
* This is use for comparison predicates only. It returns the constant that represents a threshold value
* for comparison. For example, if this is close(X,Y,40,T) and the corresponding declaration is
*
* comparisonPredicate(close(+person,+person,#threshold_value,+time), lessThan, comparison_term_position(3))
*
* then this method returns 40
*
* */
def getComparisonTerm = {
val m = this.modeAtom
if (m.isComparisonPredicate) {
// Note that since m is a comparison predicate template, its comparisonTermPosition list cannot be empty
val first = this.terms(m.comparisonTermPosition.head - 1)
val rest = m.comparisonTermPosition.tail
if (rest.nonEmpty) {
rest.foldLeft(first) { (term, position) =>
term.asInstanceOf[Literal].terms(position - 1)
}
} else {
first
}
} else {
Constant()
}
}
override val tostring: String = terms match {
case List() => predSymbol
case _ =>
val prefix = if (isNAF) s"not $predSymbol" else predSymbol
prefix + "(" + (for (
a <- terms; x = a match {
case x @ (_: Constant | _: Variable | _: Literal | _: PosLiteral) => x
case _ => throw new LogicException(s"Unexpected type of inner term while parsing Literal: $this")
}
) yield x.tostring).mkString(",") + ")"
}
override def tostringQuote: String = terms match {
case List() => predSymbol
case _ =>
val prefix = if (isNAF) s"not $predSymbol" else predSymbol;
prefix + "(" + (for (
a <- terms; x = a match {
case x @ (_: Constant | _: Variable | _: Literal | _: PosLiteral) => x
case _ => throw new LogicException("Unexpected type of inner term while parsing Literal.")
}
) yield x.tostringQuote).mkString(",") + ")"
}
lazy val tostringMLN: String = terms match {
case List() => predSymbol
case _ =>
val prefix = if (isNAF) s"!$predSymbol" else predSymbol
prefix + "(" + (for (
a <- terms; x = a match {
case x @ (_: Constant | _: Variable | _: Literal | _: PosLiteral) => x
case _ => throw new LogicException(s"Unexpected type of inner term while parsing Literal: $this")
}
) yield x.tostring).mkString(",") + ")"
}
/**
* @return a mode declaration atom that matches this literal.
* If none is found, returns the empty mode atom ( ModeAtom("",List() )
*/
def matchingMode(globals: Globals): ModeAtom = {
var out: ModeAtom = ModeAtom("", List())
this.modeAtom match {
case ModeAtom("", Nil, false) =>
val loop = new Breaks;
loop.breakable {
for (x <- globals.MODEHS ::: globals.MODEBS) {
val test = if (this.predSymbol != x.functor || this.arity != x.arity) false
else matchesMode(this.terms zip x.args)
if (test) {
out = x
loop.break()
}
}
}
case _ => this.modeAtom
}
out
}
private def matchesMode(remaining: List[(Expression, Expression)]): Boolean = {
remaining match {
case head :: tail => head match {
case (n: Constant, m @ (_: PlmrkPos | _: PlmrkNeg | _: PlmrkConst)) => matchesMode(tail)
case (n: Variable, m @ (_: PlmrkPos | _: PlmrkNeg)) => matchesMode(tail)
case (n: Variable, m: PlmrkConst) =>
throw new LogicException("Found a variabilized term that corresponds to a grplmrk.")
case (n: Literal, m: ModeAtom) =>
if (n.predSymbol != m.functor || n.arity != m.arity) false else matchesMode(n.terms zip m.args)
case _ => throw new LogicException("Getting matching mode: Found unexpected term pairing.")
}
case Nil => true
}
}
/**
* Variabilizes a literal. If a matching mode declaration atom is passed with the input, then the literal is variabilzed according
* to the directives provided by that atom. Else (if no mode atom is present), each constant of the literal is replaced by a new
* variable (TODO: this is not implemented yet, see comments below). The variabilization of a literal is part of the process of
* the variabilization of a clause. In this process, constants of the literal that are present in other literals of the clause,
* which have already been variabilized, should be replaced by the same variable that has already been used for these constants.
*
* @param previousMap a map containing previous bindings of constants to variables.
* @param accum an accumulator that collects competed (variabilized) compound sub-terms.
* @param remaining a list containing all sub-terms remaining to be variabilized.
* @param ttypes a list collecting typing predicates for the generated variables,
* e.g. person(X1), time(X100) etc.
* @param counter a counter that is incremented by 1 each time a new variable is generated.
* The name a new variable is simply "X"+currentCounterValue.
* @param runningMode a flag indicating a "mode" (purpose) for which this method is called. Default is
* "", in which case the literal is simply variabilized. If mode = "extract-mode-terms", then this method
* is called on a ground literal and it processes the corresponding mode declaration, extracting a tuple
* (in,out,grnd) representing the terms of the ground atom that correspond to input, output or ground
* placemarkers respectively
*/
def variabilize(accum: List[Literal], remaining: List[(Expression, Expression)],
previousMap: scala.collection.mutable.Map[Expression, Expression],
ttypes: List[String], counter: Int, runningMode: String = ""): (List[Literal], List[String], scala.collection.mutable.Map[Expression, Expression], Int) = {
// x is a tuple (x1,x2), where x1 is a literal's constant and x2 is it's type as specified by the modeAtom
def f(x: (Expression, String), sign: String, tail: List[(Expression, Expression)],
map: scala.collection.mutable.Map[Expression, Expression]) = {
val cur = accum match {
case Nil => Literal(predSymbol = this.predSymbol, terms = List(), isNAF = this.isNAF)
case _ => accum.last
}
val (litUpdate, typesUpdate, varCountUpdate) = sign match {
case "#" =>
// a term corresponding to constant placemarker remains intact
(Literal(predSymbol = cur.predSymbol, terms = cur.terms :+ x._1, isNAF = cur.isNAF), ttypes, counter)
case _ =>
// if the constant has been variabilized previousely, use the same var.
if (map.keySet.contains(x._1)) {
(Literal(predSymbol = cur.predSymbol, terms = cur.terms :+ map(x._1), isNAF = cur.isNAF), ttypes, counter)
} else {
// else, use a new one
val newVar = Variable("X" + counter, "+", x._2)
map += (x._1 -> newVar)
(Literal(predSymbol = cur.predSymbol, terms = cur.terms :+ newVar, isNAF = cur.isNAF),
ttypes :+ x._2 + "(X" + counter + ")", counter + 1)
}
}
this.variabilize(accum.tail :+ litUpdate, tail, map, typesUpdate, varCountUpdate)
}
remaining match {
case head :: tail => head match {
case (x: Constant, y: PlmrkPos) => f((x, y._type), "+", tail, previousMap)
case (x: Constant, y: PlmrkNeg) => f((x, y._type), "-", tail, previousMap)
case (x: Constant, y: PlmrkConst) => f((x, y._type), "#", tail, previousMap)
case (x: Literal, y: ModeAtom) =>
val (varbed, newTypes, newMap, newCount) =
this.variabilize(List(Literal(x.predSymbol)), x.terms zip y.args, previousMap, List(), counter)
val pop = accum.last
this.variabilize(List(Literal(predSymbol = pop.predSymbol, terms = pop.terms ::: varbed, isNAF = pop.isNAF)),
tail, newMap, ttypes ::: newTypes, newCount)
case _ => throw new LogicException("Variabilizing Literal " + this.tostring + ": Found unexpected type")
}
case Nil =>
val pop = accum.last
(accum.tail :+
Literal(predSymbol = pop.predSymbol, terms = pop.terms, isNAF = pop.isNAF), ttypes, previousMap, counter)
}
}
lazy val placeMarkers = getPlmrkTerms(Nil, Nil, Nil, this.terms zip this.modeAtom.args)
/**
* Extracts the terms of the literal marked as input-output or ground terms.
*
* @param in an accumulator for input terms
* @param out an accumulator for output terms
* @param grnd an accumulator for ground terms
* @param remaining the (zipped) terms of the literal and the mode atom
* that remain to be checked
* @return a tuple (in,out,ground) carrying the marked terms
*/
def getPlmrkTerms(in: List[Expression], out: List[Expression], grnd: List[Expression],
remaining: List[(Expression, Expression)]): (List[Expression], List[Expression], List[Expression]) = {
remaining match {
case head :: tail => head match {
case (x: Constant, y: PlmrkPos) =>
getPlmrkTerms(in ::: List(Constant(x.name, "+", y._type)), out, grnd, tail)
case (x: Constant, y: PlmrkNeg) =>
getPlmrkTerms(in, out ::: List(Constant(x.name, "-", y._type)), grnd, tail)
case (x: Constant, y: PlmrkConst) =>
getPlmrkTerms(in, out, grnd ::: List(Constant(x.name, "#", y._type)), tail)
case (x: Variable, y: PlmrkPos) =>
getPlmrkTerms(in ::: List(Variable(x.name, "+", y._type)), out, grnd, tail)
case (x: Variable, y: PlmrkNeg) =>
getPlmrkTerms(in, out ::: List(Variable(x.name, "-", y._type)), grnd, tail)
case (x: Variable, y: PlmrkConst) =>
getPlmrkTerms(in, out, grnd ::: List(Variable(x.name, "#", y._type)), tail)
case (x: Literal, y: ModeAtom) =>
val (newin, newout, newconst) =
getPlmrkTerms(in, out, grnd, x.terms zip y.args)
getPlmrkTerms(newin, newout, newconst, tail)
case _ => throw new LogicException(this.tostring + ": Unexpected type.")
}
case Nil =>
(in, out, grnd)
}
}
def toMLN = {
val mlnTerms = this.termsToMLN
Literal(predSymbol = this.predSymbol.capitalize, terms = mlnTerms, isNAF = this.isNAF)
}
private def termsToMLN: List[Expression] = {
//var temp = new ListBuffer[Expression]
this.terms map {
// variables are always of the form X0, X1, X2 etc, so turning them to lower case
// simply converts the X, no other changes.
case y: Variable => Constant(y.name.toLowerCase)
case y: Constant => Constant(y.name.capitalize)
case y: Literal =>
val l = y
val m = l.termsToMLN
Literal(predSymbol = l.predSymbol, terms = m, isNAF = l.isNAF)
}
}
/* Skolemize this literal (replace all variables with skolem constants) */
def skolemize(skolems: Map[String, String], accum: ListBuffer[Expression] = ListBuffer[Expression]()): ListBuffer[Expression] = {
var temp = new ListBuffer[Expression]
def keyExists = (x: Any) => if (skolems.keySet.exists(_ == x)) true else false
def append = (x: Expression) => temp += x
for (x <- this.terms) x match {
case y: Variable =>
val name = y.name
if (keyExists(name)) append(Constant(skolems(name)))
else throw new LogicException("Skolemise: Found a variable without corresponding skolem constant.")
case y: Constant =>
val name = y.name
if (keyExists(name)) append(Constant(skolems(name)))
else throw new LogicException("Skolemise: Found a constant without corresponding skolem constant.")
case y: Literal =>
val l = y
val m = l.skolemize(skolems, temp)
val toLit = Literal(predSymbol = l.predSymbol, terms = m.toList, isNAF = l.isNAF)
temp += toLit
case _ => throw new LogicException("Skolemise: Unexpected type.")
}
temp
}
def getSkolemConsts(skolems: ListBuffer[(String, String)], counter: Int): (ListBuffer[(String, String)], Int) = {
var c = counter; var s = skolems
def f = (x: String, y: String) => if (!s.contains(x)) s += x -> y else s
def g = (x: Int) => c += x
for (x <- this.terms) x match {
case y: Variable =>
f(y.name, "skolem" + c); g(1)
case y: Constant => f(y.name, y.name) // use the constant as a skolem constant
case y: Literal =>
val m = y.getSkolemConsts(s, c)
s = m._1; c = m._2
case _ => throw new LogicException("Skolemize: Unexpected type of inner term.")
}
(s, c)
}
/**
* Replace all occurrences of thisExpr in this literal with thatExpr
*
* @param thisExpr
* @param thatExpr
* @return a Literal with all occurrences of thisExpr replaced by thatExpr
*
*/
def replace(thisExpr: Expression, thatExpr: Expression): Literal = {
var temp = new ListBuffer[Expression]
def append = (x: Expression) => if (x == thisExpr) temp += thatExpr else temp += x
for (x <- this.terms) x match {
case y: Variable => append(y)
case y: Constant => append(y)
case y: Literal =>
if (y == thisExpr) temp += thatExpr
else {
val l = y
val m = l.replace(thisExpr, thatExpr)
temp += m
}
case _ => throw new LogicException("Replace, don't know what to do.")
}
Literal(predSymbol = this.predSymbol, terms = temp.toList, isNAF = this.isNAF)
}
/**
* @param map a map of expressions
* @return a Literal that results by replacing x with y in the current literal, for each x -> y found in map.
*/
def replaceAll(map: Map[_ <: Expression, _ <: Expression]): Literal = map.foldLeft(this)((x, y) => x.replace(y._1, y._2))
/**
* Get all variables from this Literal
*/
def getVars: ListBuffer[Variable] = {
val vars = new ListBuffer[Variable]
for (x <- this.terms) x match {
case y: Variable => if (!vars.contains(y)) vars += y
case y: Literal =>
val z = y.getVars
vars ++= z.toList.filter { v => !vars.contains(v) }
case _ =>
}
vars
}
/**
* Returns a list of typing predicates for the variables of the literal.
* e.g. 'time(X)' if X is of type 'time'
*/
def getTypePredicates(globals: Globals): List[String] = {
val f = (x: Expression) => x.asInstanceOf[Variable]
val vars = this.variables(globals)
val tpreds = for (x <- vars) yield f(x)._type + "(" + f(x).name + ")"
tpreds
}
def getConstantsTypes(globals: Globals): List[String] = {
val f = (x: Expression) => x.asInstanceOf[Constant]
val vars = this.constants(globals)
val tpreds = for (x <- vars) yield f(x)._type + "(" + f(x).name + ")"
tpreds
}
/**
* Get all terms of this Literal that correspond to variables.
*/
def getTermsThatCorrespondToVars(globals: Globals): List[_ <: Expression] = {
val mode = this.matchingMode(globals)
getTermsThatCorrespondToVars(mode)
}
def variables(globals: Globals): List[Expression] = {
val (in, out, _) = this.placeMarkers
val v = in ++ out
v match {
case Nil =>
val mode = this.matchingMode(globals)
if (!mode.isEmpty) {
val l = Literal(predSymbol = this.predSymbol, terms = this.terms,
isNAF = true, modeAtom = mode, typePreds = this.typePreds)
l.variables(globals)
} else { Nil }
case _ => v filter (x => !x.isConstant)
}
}
def constants(globals: Globals): List[Expression] = {
val (in, out, grnd) = this.placeMarkers
val v = in ++ out ++ grnd
v match {
case Nil =>
val mode = this.matchingMode(globals)
if (!mode.isEmpty) {
val l = Literal(predSymbol = this.predSymbol, terms = this.terms,
isNAF = true, modeAtom = mode, typePreds = this.typePreds)
l.constants(globals)
} else { Nil }
case _ => v filter (x => !x.isVariabe)
}
}
def getTermsThatCorrespondToVars(mode: ModeAtom): List[_ <: Expression] = {
val out = new ListBuffer[T forSome { type T <: Expression }]
for (x <- this.terms zip mode.args) {
x match {
case (term, m @ (_: PlmrkPos | _: PlmrkNeg)) => out += term
case (x: Literal, y: ModeAtom) =>
val inner = x.getTermsThatCorrespondToVars(y)
out ++= inner
case _ =>
}
}
out.toList.distinct
}
}
/**
* This is a helper class for the representation of non-negated literals.
*/
object PosLiteral {
val empty = PosLiteral()
}
case class PosLiteral(functor: String = "", terms: List[Expression] = Nil, isNAF: Boolean = false,
modeAtom: ModeAtom = ModeAtom("", Nil), typePreds: List[String] = Nil) extends Expression {
def arity = terms.length
def asLiteral = Literal(predSymbol = functor, terms = terms,
isNAF = false, modeAtom = modeAtom, typePreds = typePreds)
override def tostring = this.asLiteral.tostring
override def tostringQuote = this.asLiteral.tostringQuote
}
object AnswerSet {
def UNSAT = new AnswerSet(List(Globals.UNSAT))
def empty = new AnswerSet(List[String]())
}
case class AnswerSet(atoms: List[String]) {
val isEmpty = atoms == List()
}
| 26,857 | 39.693939 | 161 | scala |
OLED | OLED-master/src/main/scala/logic/LogicUtils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import app.runutils.Globals
import logic.Examples.Example
import utils.{ASP, Utils}
import xhail.Xhail
import scala.collection.mutable.ListBuffer
/**
* Created by nkatz on 9/13/16.
*/
object LogicUtils {
/*
* Simplifies a rule by removing redundant comparison predicates from its body.
* */
def simplifyRule(c: Clause, gl: Globals) = {
val (nonComparisonPreds, comparisonPreds) = c.body.foldLeft(Set[Literal](), Set[Literal]()) { (accum, lit) =>
if (gl.comparisonPredicates.contains(lit.modeAtom)) (accum._1, accum._2 + lit) else (accum._1 + lit, accum._2)
}
val grouped = comparisonPreds.groupBy(x => x.modeAtom)
val simplified = grouped.map{ case (modeAtom, literals) =>
if (modeAtom.compRelation == "lessThan") {
literals.toList.minBy(_.getComparisonTerm.name.toInt)
} else if (modeAtom.compRelation == "greaterThan") {
literals.toList.maxBy(_.getComparisonTerm.name.toInt)
} else {
throw new RuntimeException(s"Don't know what to do with this comparison relation: ${modeAtom.compRelation}")
}
}.toSet
val newTerms = nonComparisonPreds.toList ++ simplified.toList
val cc = Clause(head = c.head, body = newTerms, supportSet = c.supportSet, uuid = c.uuid)
// Just to be on the safe side...
cc.parentClause = c.parentClause
cc.countsPerNode = c.countsPerNode
cc.weight = c.weight
cc.subGradient = c.subGradient
cc.w_pos = c.w_pos
cc.totalTPs = c.totalTPs
cc.totalFPs = c.totalFPs
cc.totalFNs = c.totalFNs
cc.totalSeenExmpls = c.totalSeenExmpls
cc.tps = c.tps
cc.fps = c.fps
cc.fns = c.fns
cc.refinements = c.refinements
cc.seenExmplsNum = c.seenExmplsNum
cc.previousMeanDiffCount = c.previousMeanDiffCount
cc.previousMeanScoreCount = c.previousMeanScoreCount
cc.previousMeanDiff = c.previousMeanDiff
cc.previousScore = c.previousScore
cc
}
///*
def compressTheory(kernel: List[Clause]): List[Clause] = {
val compressed = new ListBuffer[Clause]
val included = (c: Clause) => compressed.toList.exists(x => x.thetaSubsumes(c) && c.thetaSubsumes(x))
for (c <- kernel) {
if (!included(c)) compressed += c
}
compressed.toList
}
def compressTheory_RemoveSubsumers(kernel: List[Clause]): List[Clause] = {
val compressed = new ListBuffer[Clause]
val included = (c: Clause) => compressed.toList.exists(x => c.thetaSubsumes(x))
for (c <- kernel) {
if (!included(c)) compressed += c
}
compressed.toList
}
//*/
/*
def compressTheory(kernel: List[Clause]): List[Clause] = {
val compressed = new ListBuffer[Clause]
val included = (c: Clause) => compressed.toList.exists(x => c.thetaSubsumes(x))
for (c <- kernel) {
if (!included(c)) compressed += c
}
compressed.toList
}
*/
def generateKernel(examples: Map[String, List[String]], fromWeakExmpl: Boolean = false,
learningTerminatedOnly: Boolean = false, bkFile: String, globals: Globals) = {
val infile = Utils.getTempFile("example", ".lp")
val f = (x: String) => if (x.endsWith(".")) x else s"$x."
val interpretation = examples("annotation").map(x => s"${f(x)}") ++ examples("narrative").map(x => s"${f(x)}")
Utils.writeToFile(infile, "overwrite") { p => interpretation.foreach(p.println) }
var (kernel, varKernel) =
Xhail.runXhail(fromFile = infile.getAbsolutePath, kernelSetOnly = true,
fromWeakExmpl = fromWeakExmpl, learningTerminatedAtOnly = learningTerminatedOnly, bkFile = bkFile, globals = globals)
if (fromWeakExmpl) {
varKernel = varKernel.map (x => Clause.updateField(x, fromWeakExample = true))
}
infile.delete()
(kernel, varKernel)
}
/*The only difference is that the examples are provided with a file. I have to
* fix this, it's stupid to duplicate code like that.*/
def generateKernel2(examplesFile: java.io.File, bkFile: String, globals: Globals) = {
val (kernel, varKernel) =
Xhail.runXhail(fromFile = examplesFile.getAbsolutePath, kernelSetOnly = true, bkFile = bkFile, globals = globals)
(kernel, varKernel)
}
def isSAT(theory: Theory, example: Example, globals: Globals, F: (Theory, Example, Globals) => String): Boolean = {
val f = F(theory, example, globals)
val out = ASP.solve(Globals.CHECKSAT, Map(), new java.io.File(f), example.toMapASP)
if (out != Nil && out.head == AnswerSet.UNSAT) false else true
}
def updateSupport(theory: Theory, kernelSet: Theory, fromWeakExample: Boolean = false) = {
// This is used to avoid adding redundant rules in the
// support set. A rule is redundant if it subsumes
// by a rule already present in the support set
val isRedundant = (ss: Clause, c: Clause) =>
c.supportSet.clauses exists (x => ss.thetaSubsumes(x))
for (
c <- theory.clauses;
ks <- kernelSet.clauses if !isRedundant(ks, c) && c.thetaSubsumes(ks)
) {
val markedKS = Clause.updateField(ks, fromWeakExample = fromWeakExample)
c.supportSet = Theory(c.supportSet.clauses :+ markedKS)
}
// This is used in order to avoid maintaining redundant
// rules in the support set. In this context, a support set
// rule is redundant if it subsumes some other rule
// in the support set. This can happen in cases where e.g.
// p :- q,r was added to the support set early on and
// later on p :- q,r,s was added. In this case the first
// rule is redundant and should be removed. This redundancy
// checking should be done whenever the support set
// changes with the addition of a rule.
theory.clauses foreach (x => x.compressSupport)
}
}
| 6,451 | 36.08046 | 149 | scala |
OLED | OLED-master/src/main/scala/logic/Modes.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import logic.Exceptions._
object Modes {
case class PlmrkPos(override val _type: String) extends Expression {
override val tostring = "+" + _type
override def tostringQuote = this.tostring
}
case class PlmrkNeg(override val _type: String) extends Expression {
override val tostring = "-" + _type
override def tostringQuote = this.tostring
}
case class PlmrkConst(override val _type: String) extends Expression {
override val tostring = "#" + _type
override def tostringQuote = this.tostring
}
object ModeAtom {
def apply(): ModeAtom = {
ModeAtom("", Nil)
}
}
case class ModeAtom(functor: String, args: List[Expression], isNAF: Boolean = false) extends Expression {
val arity = this.args.length
val isEmpty = args match {
case List() => true
case _ => false
}
var compRelation = ""
var comparisonTermPosition: List[Int] = List[Int]()
def comparisonTermType = getComparisonTerm._type
def getComparisonTerm: Expression = {
if (comparisonTermPosition.nonEmpty) {
val first = args(comparisonTermPosition.head - 1)
val rest = comparisonTermPosition.tail
if (rest.nonEmpty) {
rest.foldLeft(first) { (term, position) =>
term.asInstanceOf[ModeAtom].args(position - 1)
}
} else {
first
}
} else {
Constant()
}
}
def isComparisonPredicate = compRelation == "lessThan" || compRelation == "greaterThan"
override val tostring: String = args match {
case List() => if (isNAF) s"not $functor" else functor
case _ => if (isNAF) s"not $functor" else functor + "(" + (for (a <- args) yield a.tostring).mkString(",") + ")"
}
/**
* @return a string representation of the mode declaration. This method is supposed to be called on a
* variabilized version of the mode declaration, and it surrounds with double quotes
* variables that correspond to output and ground placemarkers. For instance, assume the mode atom
*
* modeb(p(+type1,-type2,#type3))
*
* and its variabilized version
*
* p(X,Y,Z)
*
* The result of applying this method on the above is
*
* p(X,"Y","Z"). These atoms are passed to the ASP solver, in order to generate query atoms, for the
* construction of the body of a Kernel Set clause. The quoted variables are treated as constants by the
* solver, which generates instances by grounding only the variables that correspond to input terms.
* The quotes are removed by post-processing each atom in an answer set, thus obtaining a query atom (i.e.
* an atom of the form p(2,Y,Z) from the above). This is a query atom, which is subsequently used to
* generate groundings of the atom that bind only Y,Z vars, keeping the input term intact.
*
*/
override def tostringQuote: String = args match {
case List() => functor
case _ => functor + "(" + (for (a <- args) yield a.tostringQuote).mkString(",") + ")"
}
/**
* Variabilizes a mode declaration atom, i.e. it replaces all in-out-ground placemarkers with fresh variables.
* The variabilized mode declarations are used in the construction of bottom clauses, in order to generate ground
* instances of mode declarations atoms, by replacing variables by constants found in the data.
*
* returns a variabilized Literal. It's variables are annotated as +/-/# and it also carries a List[string] with the
* typing predicates for it's variables.
*
*/
def varbed: Literal = {
val (varbed, ttypes, _) = variabilize(List(Literal(predSymbol = this.functor)), this.args, List(), 0)
Literal(predSymbol = varbed.head.predSymbol, terms = varbed.head.terms, isNAF = this.isNAF, typePreds = ttypes, modeAtom = this)
}
/**
*
* This method does all the work of the variabilation.
*
* @param accum an accumulator that collects competed (variabilized) compound sub-terms.
* @param remaining a list containing all remaining sub-terms that should be variabilized.
* @param ttypes a list collecting typing predicates for the generated variables, e.g. person(X1), time(X100)
* @param counter a counter that is incremented by 1 each time a new variable is generated. The name of a new variable is
* simply "X"+currentCounterValue.
*/
private def variabilize(accum: List[Literal], remaining: List[Expression],
ttypes: List[String], counter: Int): (List[Literal], List[String], Int) = {
def f(x: Expression, sign: String, tail: List[Expression]) = {
val cur = accum match {
case Nil => Literal(predSymbol = this.functor)
case _ => accum.last
}
// We are variabilizing everything (it's modes variabilization) so replace all with a new Var.
val update = Literal(predSymbol = cur.predSymbol, terms = cur.terms :+ Variable("X" + counter, sign, x._type))
this.variabilize(accum.tail :+ update, tail, ttypes :+ x._type + "(X" + counter + ")", counter + 1)
}
remaining match {
case head :: tail => head match {
case x: PlmrkPos => f(x, "+", tail)
case x: PlmrkNeg => f(x, "-", tail)
case x: PlmrkConst => f(x, "#", tail)
case x: ModeAtom =>
val (varbed, newTypes, newCount) = this.variabilize(List(Literal(predSymbol = x.functor)), x.args, List(), counter)
val pop = accum.last
this.variabilize(List(Literal(predSymbol = pop.predSymbol, terms = pop.terms ::: varbed)), tail, ttypes ::: newTypes, newCount)
case _ =>
throw new LogicException("Variabilizing Mode Declaration " + this.tostring + ": Found unexpected type")
}
case Nil =>
val pop = accum.last
(accum.tail :+ Literal(predSymbol = pop.predSymbol, terms = pop.terms), ttypes, counter)
}
}
/**
* Recieves a tuple of the form ('in','out','ground'), where each coordinate in the tuple is List[Expression]
* of constants marked as input, output or ground placemarkerks. From this input, this method constructs
* all istances of the current mode declaration atom 'm', generated as follows:
* -- Each input placemarker in 'm' is replaced by a term in 'in'
* -- Each ground placemarker in 'm' is replaced by a variable.
* -- Each output placemarker in 'm' is replaced by a variable.
*
* These constitute the query atoms, used to generate Kernel Set body atoms.
*
* @example
*
* Assume that the current mode atom is modeb(p(+entity1,-emtity2,#entity3)) and the input is
* (List("e1","e2"),List("e3"),List("e4","e5")). The result of this method is the list of atoms:
*
* List(p(e1,Y,Z),p(e2,Y,Z)).
*
* This method is to be called uppon a variabilized version of the mode atom, and replace the variables that
* correspond to input terms, with terms from 'in'
*
*
*/
def generateQueryAtoms(input: (List[Expression], List[Expression], List[Expression])) = {
}
}
}
| 7,953 | 40.212435 | 139 | scala |
OLED | OLED-master/src/main/scala/logic/Rules.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import app.runutils.Globals
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import utils.ASP
import utils.Utils
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks
/**
*
* Utilities for rule handling.
*
*/
object Rules extends LazyLogging {
/**
*
* @param answerSet a set of use2 or use3 atoms
* @param useAtomsMap a map o the use atoms to actual literals
* @return a theory formed by the use2/use3 atoms and the literals
*/
def getNewRules(
answerSet: List[String],
useAtomsMap: Map[String, Literal], fromWeakExample: Boolean = false): Theory = {
// Group atoms of the same rule.
// Doing this with mat/flatMap etc does not work.
// For comprehensions work as below:
val group = for (
Use2HeadAtom(x) <- answerSet;
val z = for (Use2BodyAtom(y) <- answerSet if y._2._1 == x._2) yield y._1
) yield (x._1, z)
val rules = group match {
case Nil => List[Clause]()
case _ => group map (x =>
Clause(head = useAtomsMap(x._1).asPosLiteral,
body = x._2 map (x => useAtomsMap(x)),
fromWeakExample = fromWeakExample))
}
Theory(rules)
}
def getInconsistentRules(
answerSet: List[String],
priorTheory: Theory,
use3AtomsMap: Map[String, Literal]
): List[InconstistentRule] = {
// Group literal from same support set clause. example:
// Answer set: List("use2(1,0)","use2(1,4)","use2(2,4)","use3(1,2,3)",
// "use3(1,2,5)","use3(2,3,5)","use3(2,3,7)","use3(6,3,8)")
// group = Map((2,3) -> List(use3(2,3,5), use3(2,3,7)),
// (6,3) -> List(use3(6,3,8)), (1,2) -> List(use3(1,2,3), use3(1,2,5)))
val group = answerSet.groupBy{
x =>
x match {
case Use3Atom(y) => (y._2._1, y._2._2)
case _ => None
}
}.filter{ case (k, v) => k != None }.
asInstanceOf[Map[(Int, Int), List[String]]]
val inconstistentRules = (group map { case (k, v) => InconstistentRule(priorTheory, use3AtomsMap, (k, v)) }).toList
inconstistentRules
}
/**
* Represents an inconsistent rule. All the work is done in the companion object.
*
* @param rule The actual rule that needs to be specialized.
* @param priorTheory The theory in which rule belongs.
* @param use3Map A mapping o use3 atoms to literals from a
* rule from this rule's support set.
* @param index a tuple (i,atoms) where i is the position of
* the inconsistent rule
* @param initialRefinement An initial refinement of this rule.
* If it subsumes rule's support set,
* then this refinement will replace rule in
* prior theory, else we'll search some more
* for an appropriate refinement.
* @param finalRefinement The final refinement (one subsuming the whole
* support set) for this rule.
*/
class InconstistentRule private (val rule: Clause = Clause(), val priorTheory: Theory = Theory(),
val use3Map: Map[String, Literal] = Map(), val index: (Int, List[String]) = (0, List()),
val initialRefinement: Clause = Clause(), val finalRefinement: Theory = Theory())
object InconstistentRule {
def apply(priorTheory: Theory, use3Map: Map[String, Literal], index: ((Int, Int), List[String])) = {
require(!priorTheory.isEmpty)
require(use3Map.nonEmpty)
val rule = priorTheory.clauses(index._1._1 - 1)
val SSRule = rule.supportSet.clauses(index._1._2 - 1)
val initialRefinement =
Clause(
head = rule.head,
body = rule.body ++ index._2.map(x => use3Map(x)),
fromWeakExample = rule.fromWeakExample || SSRule.fromWeakExample
)
initialRefinement.addToSupport(rule.supportSet.clauses filter (x => initialRefinement.thetaSubsumes(x)))
new InconstistentRule(rule = rule, initialRefinement = initialRefinement)
}
def apply(rule: Clause, finalRefinement: Theory = Theory()) = {
//Initialize the support set for the final refinement
val _finalRefinement = finalRefinement.isEmpty match {
case true => Theory()
case false =>
finalRefinement.clauses.foreach(
x => x.addToSupport(
rule.supportSet.clauses filter (y => x thetaSubsumes y))
)
finalRefinement
//Theory(finalRefinement.clauses.map(x => Clause.updateField(x, fromWeakExample = rule.fromWeakExample)))
}
new InconstistentRule(rule = rule, finalRefinement = _finalRefinement)
}
}
def subsetsSearch(incRule: InconstistentRule, accum: List[List[InconstistentRule]], examples: Example) = {
logger.info("Minimal refinement search method: Full search")
var done = false; var k = 1; var finalRefinement = Theory()
val loop = new Breaks
loop.breakable {
while (!done) {
for (x <- accum.flatten.toSet.subsets(k)) {
logger.info(s"Example ${examples.time}: Searching support subsets of size" +
s" $k for support of size ${incRule.rule.supportSet.clauses.length}")
val t = Theory((x map (y => y.initialRefinement)).toList) // the initialRefinement field carries one of the tried refs here
finalRefinement = t
if (t.thetaSubsumes(incRule.rule.supportSet)) {
done = true
loop.break
}
}
k = k + 1
}
}
InconstistentRule(incRule.rule, finalRefinement)
}
def setCoverSearch(incRule: InconstistentRule, accum: List[List[InconstistentRule]], examples: Example) = {
logger.info("Minimal refinement search method: Set cover search")
//Find the support rules that are subsumed by each candiadate refinement
val eachSubsumes = (c: InconstistentRule) => (c, incRule.rule.supportSet.clauses.filter(x => c.initialRefinement.thetaSubsumes(x)))
// startWith contains a list of 2-tuples (x,y) where x is a specialization and y is the list
// of support rules that are subsumed by x, sorted by the size of y
val startWith = accum.flatten.map(x => eachSubsumes(x)).sortBy(y => y._2.length)
//for each support rule c, select a specialization find a (x,y) from startWith such that y contains c. Add
// x to a growing set of specializations that will result in the final refinement.
var refinement = List[Clause]()
for (c <- incRule.rule.supportSet.clauses) {
val loop = new Breaks
loop.breakable {
for (s <- startWith) {
if (s._2.contains(c)) {
refinement = (refinement :+ s._1.initialRefinement).distinct
loop.break
}
}
}
}
// do a final check to verify that the whole support is subsumed
if (!Theory(refinement).thetaSubsumes(incRule.rule.supportSet)) {
logger.error("Search for minimal refinement (set cover search) returned a" +
" program that does not subsume the whole support set")
System.exit(-1)
}
InconstistentRule(incRule.rule, Theory(refinement))
}
def getRefinedProgram(
incRule: InconstistentRule,
retainedRules: Theory,
newRules: Theory, examples: Example, globals: Globals): InconstistentRule = {
// Search for a refined program that subsumes the support set
def search: InconstistentRule = {
val accum = new ListBuffer[List[InconstistentRule]]
for (ssc <- incRule.rule.supportSet.clauses) {
val file = Utils.getTempFile("search", ".lp")
val (_, _, _, _, defeasibleRule, use3Map) =
ASP.inductionASPProgram(retained = retainedRules.extendUnique(newRules),
findAllRefs = (incRule.rule, ssc), examples = examples.toMapASP, aspInputFile = file, globals = globals)
val answerSets = ASP.solve(Globals.FIND_ALL_REFMS, use3Map, file, examples.toMapASP)
if (answerSets.head != AnswerSet.UNSAT) {
val inc = (answerSets map (x => getInconsistentRules(x.atoms, Theory(incRule.rule), use3Map))).flatten
accum += inc
} else {
ssc.fromWeakExample match {
case true => incRule.rule.removeFromSupport(ssc)
case _ =>
logger.error(s"\n ...While searching minimal refinements:" +
s" Rule \n ${incRule.rule.tostring} cannot be refined using support rule \n ${ssc.tostring} \n")
System.exit(-1)
}
}
}
Globals.glvalues("refinementSearch") match {
case "setCover" => setCoverSearch(incRule, accum.toList, examples)
case "fullSearch" => subsetsSearch(incRule, accum.toList, examples)
}
}
// Use this for the new search for minimal refinement (the one that gives the whole support set)
// Here prior theory is an inconsistent rule as a single-clause theory.
def tryToRefine(priorTheory: Theory, retained: Theory, examples: Example, globals: Globals): List[AnswerSet] = {
val file = Utils.getTempFile("search", ".lp")
val (_, _, defeasiblePrior, use3Map, _, _) =
ASP.inductionASPProgram(priorTheory = priorTheory, retained = retained,
examples = examples.toMapASP, aspInputFile = file, globals = globals)
val answerSets = ASP.solve(Globals.FIND_ALL_REFMS, use3Map, file, examples.toMapASP)
val inc = (answerSets map (x => getInconsistentRules(x.atoms, priorTheory, use3Map))).flatten
answerSets
}
val refinement: InconstistentRule =
// Avoid redundant search if the initial refinement is ok
//if (incRule.initialRefinement.thetaSubsumes(incRule.rule.supportSet)){
if (retainedRules.extend(newRules).extend(Theory(incRule.initialRefinement)).thetaSubsumes(incRule.rule.supportSet)) {
// finalRefinement stays unpopulated, so the initial refinement is be used
incRule
} else {
logger.info(s"Example ${examples.time}: Searching for minimal refinement")
//search
val answerSets = tryToRefine(priorTheory = Theory(incRule.rule), retained = retainedRules.extendUnique(newRules), examples = examples, globals = globals)
//InconstistentRule(incRule.rule, Theory(refinement))
incRule // Just in order for the code to compile for debugging.
}
refinement
}
// Use this for the new search for minimal refinement (the one that gives the whole support set)
// Here prior theory is an inconsistent rule as a single-clause theory.
def refineRule(incRule: InconstistentRule, retained: Theory,
e: Example, withSupport: String = "fullSupport", globals: Globals): Either[String, List[InconstistentRule]] = {
val file = Utils.getTempFile("search", ".lp")
val (_, _, _, use3Map, _, _) = ASP.inductionASPProgram(
priorTheory = Theory(incRule.rule), retained = retained,
examples = e.toMapASP, aspInputFile = file, use3WithWholeSupport = true, withSupport = withSupport, globals = globals)
val answerSets = ASP.solve(Globals.FIND_ALL_REFMS, use3Map, file, e.toMapASP)
val refined =
answerSets match {
case Nil =>
Left("The support set is not subsumed as a whole, but I cannot get a refinement from the solver. Does this ever happen? What do we do in this case???")
case _ =>
answerSets.head match {
case x: AnswerSet if x == AnswerSet.UNSAT =>
incRule.rule.fromWeakExample match { // then the rule must be discarded
case true =>
Right(List[InconstistentRule]())
case _ =>
Left(s"\n Searching for minimal refinements at example ${e.time}:" + s" Rule \n ${incRule.rule.tostring} cannot be refined!")
}
case _ =>
val refs = (answerSets map (x => getInconsistentRules(x.atoms, Theory(incRule.rule), use3Map))).flatten
Right(refs)
}
}
refined
}
def getRefined(incs: List[InconstistentRule], retainedRules: Theory,
newRules: Theory, e: Example, withSupport: String = "fullSupport", globals: Globals) = {
val retained = retainedRules.extendUnique(newRules)
var refined = List[Theory]()
def searchMore(p: InconstistentRule, extra: Theory) = withSupport match {
case "fullSupport" => !retained.extendUnique(extra).thetaSubsumes(p.rule.supportSet)
case "strongOnly" => !retained.extendUnique(extra).thetaSubsumes(p.rule.supportSet.strongRules)
}
//retained.extend(Theory(p.initialRefinement))
for (incRule <- incs) {
if (searchMore(incRule, Theory(incRule.initialRefinement))) {
val r = refineRule(incRule, retained, e, withSupport = withSupport, globals = globals)
val z = r match {
case Left(x) =>
logger.error(x)
if (Globals.glvalues("perfect-fit") == "true") {
throw new RuntimeException(x)
} else {
List[InconstistentRule]()
}
case Right(x) => x
}
val ref = z.asInstanceOf[List[InconstistentRule]] map (x => x.initialRefinement)
if (searchMore(incRule, Theory(ref))) {
logger.error("we're fucked")
if (Globals.glvalues("perfect-fit") == "true") {
throw new RuntimeException("we're fucked")
}
}
refined = refined :+ Theory(ref).compress
} else {
refined = refined :+ Theory(incRule.initialRefinement)
}
}
refined
}
object Use2HeadAtom {
val use2headPattern = "use2\\(([1-9]+)\\,0\\)".r
def unapply(atom: String): Option[(String, Int)] = atom match {
case use2headPattern(i) => Some(atom, i.toInt)
case _ => None
}
}
object Use2BodyAtom {
//val use2bodyPattern = "use2\\(([0-9]+)\\,([1-9]+)\\)".r
val use2bodyPattern = "use2\\(([0-9]+)\\,([1-9]\\d*)\\)".r
//^[1-9]\d*$
def unapply(atom: String): Option[(String, (Int, Int))] = atom match {
case use2bodyPattern(i, j) => Some(atom, (i.toInt, j.toInt))
case _ => None
}
}
object Use3Atom {
val use3Pattern = "use3\\(([0-9]+)\\,([0-9]+)\\,([0-9]+)\\)".r
def unapply(atom: String): Option[(String, (Int, Int, Int))] = atom match {
case use3Pattern(i, j, k) => Some(atom, (i.toInt, j.toInt, k.toInt))
case _ => None
}
}
}
| 15,328 | 41.580556 | 161 | scala |
OLED | OLED-master/src/main/scala/logic/Theory.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
import java.io.File
import app.runutils.Globals
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import utils.{Utils, ASP}
import scala.collection.mutable.ListBuffer
object Theory {
val empty = Theory()
def apply(c: Clause) = new Theory(List(c))
def mergeTheories(T: List[Theory]) = Theory(T flatMap (x => x.clauses))
def compressTheory(l: Iterable[Clause]): List[Clause] = {
val compressed = new ListBuffer[Clause]
val included = (c: Clause) => compressed.toList.exists(x => x.thetaSubsumes(c) && c.thetaSubsumes(x))
for (c <- l) {
if (!included(c)) compressed += c
}
compressed.toList
}
}
case class Theory(clauses: List[Clause] = List()) extends Expression with LazyLogging {
var tps = new ListBuffer[String]
var fps = new ListBuffer[String]
var fns = new ListBuffer[String]
/*
def clearStats() = {
this.tps = new ListBuffer[String]
this.fps = new ListBuffer[String]
this.fns = new ListBuffer[String]
}
def f1 = if (this.stats._6.toDouble.isNaN) 0.0 else this.stats._6.toDouble
*/
def size = this.clauses.size
def stats = {
val tps = this.tps.distinct.length.toFloat
val fps = this.fps.distinct.length.toFloat
val fns = this.fns.distinct.length.toFloat
val precision = tps / (tps + fps)
val recall = tps / (tps + fns)
val fscore = 2 * precision * recall / (precision + recall)
(tps.toInt, fps.toInt, fns.toInt, precision, recall, fscore)
}
def clearStats() = {
this.tps = new ListBuffer[String]
this.fps = new ListBuffer[String]
this.fns = new ListBuffer[String]
}
def showWithStats = (this.clauses map (_.showWithStats)).mkString("\n")
def showWithStats_NoEC = (this.clauses map (_.showWithStats_NoEC)).mkString("\n")
override val tostring =
// if (Core.glvalues("withWeaks") == "true") {
this.clauses.map { x => if (x.fromWeakExample) x.tostring + " (weak rule)" else x.tostring }.mkString("\n")
// } else {
// this.clauses.map { x => x.tostring }.mkString("\n")
// }
val isEmpty = this == Theory.empty
def toPriorTheory = new PriorTheory(retainedRules = this)
def thetaSubsumes(that: Theory): Boolean = {
that.clauses.forall(p => this.clauses.exists(q => q.thetaSubsumes(p)))
}
def withTypePreds(globals: Globals) = clauses.map(_.withTypePreds(globals))
/**
* Returns "initiated" or "terminated". This is used by the streaming version
*/
def getTargetClass = {
val what = this.clauses.map(x => x.head.functor).toSet
if (what.size > 1) {
val msg = s"\nI'm learning both initiated and terminated rules in the same process!\n\nERROR:\n\n${this.tostring}"
throw new RuntimeException(msg)
}
if (what.nonEmpty) what.head else "empty"
}
/**
*
* @return The marked rules and the marked rule preds (e.g. rule(234234)) as a single string ready for ASP use.
* Also a map of ruleId --> rule
*/
def marked(globals: Globals): (String, Map[String, Clause]) = {
val allRefinements = this.clauses flatMap (_.refinements)
val allRules = this.clauses ++ allRefinements
val markedTheory = this.clauses map (_.marked(globals))
val markedRefinements = allRefinements map (_.marked(globals))
val allRulesMarked = markedTheory ++ markedRefinements
val hashCodesClausesMap = (allRules map (x => x.##.toString -> x)).toMap
val rulePredicates = hashCodesClausesMap.keySet.map(x => s"rule($x). ").mkString("\n")
(allRulesMarked.map(_.tostring).mkString("\n") + rulePredicates, hashCodesClausesMap)
}
/* Used when learning rules separately. */
def scoreRules(example: Example, globals: Globals, postPruningMode: Boolean = false): Unit = {
val targetClass = getTargetClass
// If a rule has just been expanded its refinements are empty, so generate new
if (!postPruningMode) {
// Just to be on the safe side in the distributed case...
/*
if (Globals.glvalues("distributed").toBoolean) {
if (this.clauses.exists(rule => rule.refinements.isEmpty)) {
throw new RuntimeException(s"Found a rule with empty refinements set. That's an error because" +
s" in the distributed setting the refinements' set is generated right after clause construction.")
}
}
*/
this.clauses foreach (rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(globals))
}
// debug:
//this.clauses.foreach(x => println(x.score))
// Proceed to scoring
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
val _marked = marked(globals)
val markedProgram = _marked._1
val markedMap = _marked._2
//val countRules = globals.TIMES_COUNT_RULE
val exmplCountRules = globals.EXAMPLE_COUNT_RULE
//--------------------------------------------------------
// For debugging
//val allRefinements = this.clauses flatMap(_.refinements)
//val allRules = this.clauses ++ allRefinements
//println(s"rules being evaluated: ${allRules.size}")
//--------------------------------------------------------
val show = globals.SHOW_TPS_ARITY_2 + globals.SHOW_FPS_ARITY_2 + globals.SHOW_FNS_ARITY_2 +
globals.SHOW_TIME + globals.SHOW_INTERPRETATIONS_COUNT
val include = {
targetClass match {
case "initiatedAt" => globals.INCLUDE_BK(globals.BK_INITIATED_ONLY_MARKDED)
case "terminatedAt" => globals.INCLUDE_BK(globals.BK_TERMINATED_ONLY_MARKDED)
}
}
val all = e + include + exmplCountRules + markedProgram + show
val f = Utils.getTempFile(s"isConsistent", ".lp")
Utils.writeToFile(f, "append")(p => List(all) foreach p.println)
val path = f.getCanonicalPath
val answerSet = ASP.solve(task = Globals.SCORE_RULES, aspInputFile = new File(path))
f.delete()
answerSet match {
case Nil =>
throw new RuntimeException("Got an empty answer set during rule evaluation (at least times count should be returned)")
case _ =>
val (exampleCounts, coverageCounts) = answerSet.head.atoms.foldLeft(List[String](), List[String]()){ (x, y) =>
val exCount = x._1
val coverageCounts = x._2
if (y.startsWith("tps") || y.startsWith("fps") || y.startsWith("fns")) {
(exCount, coverageCounts :+ y)
} else if (y.startsWith("countGroundings")) {
(exCount :+ y, coverageCounts)
} else {
throw new RuntimeException(s"Don't know what to do with what the solver" +
s" returned.\nExpected tps/2,fps/2,fns/2,countGroundings/1 got\n${answerSet.head.atoms}")
}
}
/*
*
* Don't throw this exception. There are cases where we do not have any groundings.
* For instance consider this example from the maritime domain: We are learning the concept
* terminatedAt(highSpeedIn(Vessel,Area),Time) and an example comes with no area in it.
* Then there are no groundings and, correctly, the "seen examples" from our existing rules
* should not be increased.
* */
//if (exampleCounts.isEmpty) throw new RuntimeException("No example count returned")
// Normally, only one countGroundings/1 atom should be returned, with the number of
// target concept groundings as its argument. If we have more than one target concept
// then we could have more such atoms, but OLED does not handle that.
if (exampleCounts.length > 1)
throw new RuntimeException(s"Only one countGroundings/1 atom was expected, got ${exampleCounts.mkString(" ")} instead.")
// increase the count for seen examples
//val c = exampleCounts.size
val c = exampleCounts.head.split("\\(")(1).split("\\)")(0).toInt
this.clauses foreach { x =>
x.seenExmplsNum += c //times//*100 // interps
x.refinements.foreach(y => y.seenExmplsNum += c)
x.supportSet.clauses.foreach(y => y.seenExmplsNum += c)
}
val parse = (atom: String) => {
val tolit = Literal.parse(atom)
val (what, hashCode, count) = (tolit.predSymbol, tolit.terms.head.tostring, tolit.terms.tail.head.tostring)
(what, hashCode, count)
}
val updateCounts = (what: String, hashCode: String, count: String) => {
val clause = markedMap(hashCode)
what match {
case "tps" => clause.tps += count.toInt
case "fps" => clause.fps += count.toInt
case "fns" => clause.fns += count.toInt
}
}
coverageCounts foreach { x =>
val (what, hashCode, count) = parse(x)
updateCounts(what, hashCode, count)
}
}
}
def scoreRulesNoEC(example: Example, globals: Globals, postPruningMode: Boolean = false): Unit = {
// If a rule has just been expanded its refinements are empty, so generate new
if (!postPruningMode) this.clauses foreach (rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(globals))
// Proceed to scoring
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
val _marked = marked(globals)
val markedProgram = _marked._1
val markedMap = _marked._2
//val countRules = globals.TIMES_COUNT_RULE
val exmplCountRules = globals.EXAMPLE_COUNT_RULE
val show = globals.SHOW_TPS_ARITY_2 + globals.SHOW_FPS_ARITY_2 + globals.SHOW_FNS_ARITY_2 + globals.SHOW_TIME + globals.SHOW_INTERPRETATIONS_COUNT
val include = globals.INCLUDE_BK(globals.BK_RULE_SCORING_MARKDED)
val all = e + include + exmplCountRules + markedProgram + show
val f = Utils.getTempFile(s"isConsistent", ".lp")
Utils.writeToFile(f, "append")(p => List(all) foreach p.println)
val path = f.getCanonicalPath
val answerSet = ASP.solve(task = Globals.SCORE_RULES, aspInputFile = new File(path))
f.delete()
answerSet match {
case Nil =>
throw new RuntimeException("Got an empty answer set during rule evaluation (at least times count should be returned)")
case _ =>
val (exampleCounts, coverageCounts) = answerSet.head.atoms.foldLeft(List[String](), List[String]()){ (x, y) =>
val exCount = x._1
val coverageCounts = x._2
if (y.startsWith("tps") || y.startsWith("fps") || y.startsWith("fns")) {
(exCount, coverageCounts :+ y)
} else if (y.startsWith("countGroundings")) {
(exCount :+ y, coverageCounts)
} else {
throw new RuntimeException(s"Don't know what to do with what the solver" +
s" returned.\nExpected tps/2,fps/2,fns/2,countGroundings/1 got\n${answerSet.head.atoms}")
}
}
/*
*
* Don't throw this exception. There are cases where we do not have any groundings.
* For instance consider this example from the maritime domain: We are learning the concept
* terminatedAt(highSpeedIn(Vessel,Area),Time) and an example comes with no area in it.
* Then there are no groundings and, correctly, the "seen examples" from our existing rules
* should not be increased.
* */
//if (exampleCounts.isEmpty) throw new RuntimeException("No example count returned")
// Normally, only one countGroundings/1 atom should be returned, with the number of
// target concept groundings as its argument. If we have more than one target concept
// then we could have more such atoms, but OLED does not handle that.
if (exampleCounts.length > 1)
throw new RuntimeException(s"Only one countGroundings/1 atom was expected, got ${exampleCounts.mkString(" ")} instead.")
// increase the count for seen examples
//val c = exampleCounts.size
val c = exampleCounts.head.split("\\(")(1).split("\\)")(0).toInt
this.clauses foreach { x =>
x.seenExmplsNum += c //times//*100 // interps
x.refinements.foreach(y => y.seenExmplsNum += c)
x.supportSet.clauses.foreach(y => y.seenExmplsNum += c)
}
val parse = (atom: String) => {
val tolit = Literal.parse(atom)
val (what, hashCode, count) = (tolit.predSymbol, tolit.terms.head.tostring, tolit.terms.tail.head.tostring)
(what, hashCode, count)
}
val updateCounts = (what: String, hashCode: String, count: String) => {
val clause = markedMap(hashCode)
what match {
case "tps" => clause.tps += count.toInt
case "fps" => clause.fps += count.toInt
case "fns" => clause.fns += count.toInt
}
}
coverageCounts foreach { x =>
val (what, hashCode, count) = parse(x)
updateCounts(what, hashCode, count)
}
}
}
/*
* These variables are used for scoring the theory as a whole
* */
var _tps: Int = 0
var _fps: Int = 0
var _fns: Int = 0
def precision: Double = _tps.toFloat / (_tps + _fps)
def recall: Double = _tps.toFloat / (_tps + _fns)
def fscore: Double = {
val s = (2 * precision * recall) / (precision + recall)
if (s.isNaN) 0.0 else s
}
def score = fscore
/* Score this theory as a whole */
def score(example: Example, globals: Globals, postPruningMode: Boolean = false) = {
val e = (example.annotationASP ++ example.narrativeASP).mkString("\n")
val include = globals.INCLUDE_BK(globals.BK_WHOLE_EC)
val exmplCountRules = globals.EXAMPLE_COUNT_RULE
val show = globals.SHOW_TPS_ARITY_1 + globals.SHOW_FPS_ARITY_1 + globals.SHOW_FNS_ARITY_1 + globals.SHOW_INTERPRETATIONS_COUNT
val all =
e + "\n" + include + "\n" + this.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n") +
"\n" + exmplCountRules + globals.TPS_RULES + globals.FPS_RULES + globals.FNS_RULES + "\n" + show
val f = Utils.getTempFile("isConsistent", ".lp", deleteOnExit = true)
Utils.writeToFile(f, "append")(p => List(all) foreach p.println)
val path = f.getCanonicalPath
val answerSet = ASP.solve(task = Globals.SCORE_RULES, aspInputFile = new File(path))
f.delete()
//val (exampleCounts, tpss, fpss, fnss) = (List[String](), List[String](), List[String](), List[String]())
answerSet match {
case Nil => throw new RuntimeException("Got an empty answer set during rule evaluation (at least times count should be returned)")
case _ =>
val (exampleCounts, tpss, fpss, fnss) = answerSet.head.atoms.foldLeft(List[String](), List[String](), List[String](), List[String]()){ (x, y) =>
val exCount = x._1
val tps = x._2
val fps = x._3
val fns = x._4
if (y.startsWith("tps")) (exCount, tps :+ y, fps, fns)
else if (y.startsWith("fps")) (exCount, tps, fps :+ y, fns)
else if (y.startsWith("fns")) (exCount, tps, fps, fns :+ y)
else if (y.startsWith("exampleGrounding")) (exCount :+ y, tps, fps, fns)
else throw new RuntimeException(s"Don't know what to do with what the solver" + s" returned.\nExpected tps/2,fps/2,fns/2,exampleGrounding/1 got\n${answerSet.head.atoms}")
}
if (exampleCounts.isEmpty) throw new RuntimeException("No example count returned")
val c = exampleCounts.size
this.clauses foreach { x =>
x.seenExmplsNum += c
x.refinements.foreach(y => y.seenExmplsNum += c)
x.supportSet.clauses.foreach(y => y.seenExmplsNum += c)
}
this._tps += tpss.length
this._fps += fpss.length
this._fns += fnss.length
(exampleCounts.length, tpss.length, fpss.length, fnss.length)
}
}
var meanScore = 0.0
/*
def updateMeanScore: Globals = {
val newScore = this.fscore
val newMeanScore = ((GlobalValues.TOP_THEORY_SCORE*GlobalValues.TOP_THEORY_SCORE_COUNT) + newScore) / (GlobalValues.TOP_THEORY_SCORE_COUNT + 1)
GlobalValues.TOP_THEORY_SCORE_COUNT += 1
GlobalValues.TOP_THEORY_SCORE = newMeanScore
this.meanScore = GlobalValues.TOP_THEORY_SCORE
}
/* Score each candidate refinement, based on the utility of adding the refinement in the theory */
def scoreRules2(example: Example, jep: Jep, globals: GlobalValues, postPruningMode: Boolean = false) = {
// First get the score of the current running theory.
// Note that this also updates the seen example counts for each "top" clause
// (i.e. each clause in the running theory), for each bottom clause in the
// support set of each top clause and for each one of the top-clause's candidate
// refinements. So DO NOT update these counts again in what follows.
// Sore the running hypothesis:
this.score(example, jep, globals, postPruningMode)
// Update the mean observed f-score for the running hypothesis:
this.updateMeanScore
// The topTheoryFscore above is new score for each clause C in the running
// hypothesis (since each such clause belongs in the running hypothesis).
// Score all refinements for each clause in the current hypothesis
// Therefore, the new observed mean score for each such C is now (i.e. after the update
// in the previous line):
//----------------
// this.meanScore
//----------------
println(s"top theory score: ${this.meanScore}")
// So, we update the mean observed score for each such "top" clause:
this.clauses.foreach(x => x.updateScoreLearnWholeTs(this.meanScore))
// Next, score each refinement, for each clause in the current hypothesis:
for (rule <- this.clauses) {
if (!postPruningMode) {
if (rule.refinements.isEmpty) rule.generateCandidateRefs
for (ref <- rule.refinements) {
val T = Theory(this.clauses.filter(x => x!=rule) :+ ref)
// Score the alternative hypothesis:
T.score(example,jep,globals,postPruningMode)
// Update the mean observed score for the current clause.
// This score is now the mean observed score of the alternative hypothesis T
// that contains a refinement instead of a parent clause.
T.updateMeanScore
ref.updateScoreLearnWholeTs(T.meanScore)
println(s"ref score: ${ref.meanScoreLearningWholeTheories}")
}
}
}
}
*/
def growNewRuleTest(e: Example, target: String, globals: Globals): Boolean = {
// we already have the target with the input (the target parameter).
// But the one from the input is used only in case of an empty theory. In
// other cases we get the target class by looking at the rules' heads,
// just for some extra safety on whether we're indeed learning separately
// (check out the exceptions thrown below in case we end with a mixture
// of initiatedAt and terminated rules in the theory.)
val targetClass = getTargetClass
def solve(program: String): List[AnswerSet] = {
val f = Utils.getTempFile(s"growNewRuleTest-for-$target", ".lp")
Utils.writeToFile(f, "append")(p => List(program) foreach p.println)
val path = f.getCanonicalPath
//ASP.solve(task = Core.INFERENCE, aspInputFile = new File(path), jep=jep)
ASP.solve(task = Globals.GROW_NEW_RULE_TEST, aspInputFile = new File(path))
}
val (includeBKfile, failedTestDirective, show) = {
targetClass match {
// If we are learning the initiatedAt part of the the theory, then we must start growing
// a new rule if we have FNs, i.e. no initiatedAt rule in the current hypothesis fires,
// and fluents are not initiated when they should.
case "initiatedAt" =>
if (Globals.glvalues("with-inertia").toBoolean) {
(globals.INCLUDE_BK(globals.INITIATED_ONLY_INERTIA), globals.FNS_RULES, globals.SHOW_FNS_ARITY_1)
} else {
(globals.INCLUDE_BK(globals.BK_INITIATED_ONLY), globals.FNS_RULES, globals.SHOW_FNS_ARITY_1)
}
// If we are learning the terminatedAt part of the the theory, then we must start growing
// a new rule if we have FPs, i.e. no terminatedAt rule in the current hypothesis fires,
// and fluents are not terminated when they should.
case "terminatedAt" =>
(globals.INCLUDE_BK(globals.BK_TERMINATED_ONLY), globals.FPS_RULES, globals.SHOW_FPS_ARITY_1)
// In this case no theory has been generated yet. We therefore check if the current example
// satisfies the empty theory with the plain isSAT method. To do that, we use the whole set
// of EC axioms in the BK. Also, coverage directives (normally fps, tns etc) are coverage
// constraints here, forcing the SOLVER to try to satisfy them and getting back an UNSAT program in case of failure.
// Note that we use no #show here.
case "empty" =>
// the target is taken from the method's input here.
(if (target == "initiatedAt") globals.INCLUDE_BK(globals.BK_INITIATED_ONLY)
else globals.INCLUDE_BK(globals.BK_TERMINATED_ONLY),
if (target == "initiatedAt") globals.CONSTRAINT_COVER_ALL_POSITIVES
else globals.CONSTRAINT_EXCLUDE_ALL_NEGATIVES, "") // no #show
}
}
val t = (this.withTypePreds(globals) map (_.tostring)).mkString("\n")
// Getting exmplWithInertia here does not cause problems (in the initiated case). See comments at CaviarUtils.getDataAsChunks
val ex = (e.annotationASP ++ e.narrativeASP).mkString("\n")
val program = ex + includeBKfile + t + failedTestDirective + show
// Fail if either one of the existing rules
val failure = (atoms: List[String]) =>
if (targetClass != "empty")
targetClass match {
case "initiatedAt" => atoms.exists(p => p.startsWith("fns"))
case "terminatedAt" => atoms.exists(p => p.startsWith("fps"))
}
else atoms.head == globals.UNSAT // then the example does not satisfy the empty theory. No rules are needed.
//val timeStart = System.nanoTime()
val answerSet = solve(program)
//val timeEnd = System.nanoTime()
//println(s"growNewRuleTest solving time: ${(timeEnd-timeStart)/1000000000.0}")
answerSet.nonEmpty match {
case true =>
val atoms = answerSet.head.atoms
if (failure(atoms)) true
else false
case _ => false
}
}
def growNewRuleTestNoEC(e: Example, globals: Globals): Boolean = {
def solve(program: String): List[AnswerSet] = {
val f = Utils.getTempFile(s"growNewRuleTest", ".lp")
Utils.writeToFile(f, "append")(p => List(program) foreach p.println)
val path = f.getCanonicalPath
ASP.solve(task = Globals.GROW_NEW_RULE_TEST, aspInputFile = new File(path))
}
val (failedTestDirective, show) = (globals.FNS_RULES, globals.SHOW_FNS_ARITY_1)
val t = (this.withTypePreds(globals) map (_.tostring)).mkString("\n")
val ex = (e.annotationASP ++ e.narrativeASP).mkString("\n")
val program = ex + t + failedTestDirective + show
val failure = (atoms: List[String]) => atoms.exists(p => p.startsWith("fns"))
val answerSet = solve(program)
answerSet.nonEmpty match {
case true =>
val atoms = answerSet.head.atoms
if (failure(atoms)) true
else false
case _ => false
}
}
def growNewRuleTestWholeTheories(e: Example, globals: Globals): Boolean = {
def solve(program: String): List[AnswerSet] = {
val f = Utils.getTempFile(s"growNewRuleTest", ".lp", deleteOnExit = true)
Utils.writeToFile(f, "append")(p => List(program) foreach p.println)
val path = f.getCanonicalPath
//ASP.solve(task = Core.INFERENCE, aspInputFile = new File(path), jep=jep)
ASP.solve(task = Globals.GROW_NEW_RULE_TEST, aspInputFile = new File(path))
}
val (includeBKfile, failedTestDirective, show) = {
(globals.INCLUDE_BK(globals.BK_WHOLE_EC),
List(globals.FPS_RULES, globals.FNS_RULES).mkString("\n"),
List(globals.SHOW_FNS_ARITY_1, globals.SHOW_FPS_ARITY_1).mkString("\n"))
}
val t = (this.withTypePreds(globals) map (_.tostring)).mkString("\n")
val ex = (e.annotationASP ++ e.narrativeASP).mkString("\n")
val program = ex + includeBKfile + t + failedTestDirective + show
// Fail if either one of the existing rules
val failure = (atoms: List[String]) => atoms.exists(p => p.startsWith("fns") || p.startsWith("fps"))
//val timeStart = System.nanoTime()
val answerSet = solve(program)
//val timeEnd = System.nanoTime()
//println(s"growNewRuleTest solving time: ${(timeEnd-timeStart)/1000000000.0}")
answerSet.nonEmpty match {
case true =>
val atoms = answerSet.head.atoms
if (failure(atoms)) true
else false
case _ => false
}
}
def use_2_split(globals: Globals): (Theory, Map[String, Literal]) = {
/*
val t = (for ((c, i) <- this.clauses zip List.range(1, this.clauses.length + 1))
yield c.use_2_split(i)).map { x => List(x._1, x._2) }.transpose
val defeasibles = Theory((for (x <- t(0)) yield x.asInstanceOf[Theory].clauses).flatten)
val use = t(1).asInstanceOf[List[Map[String, Literal]]].reduce((x, y) => x ++ y)
(defeasibles, use)
*/
this match {
case Theory.empty => (Theory(), Map[String, Literal]())
case _ =>
val t = (for ((c, i) <- this.clauses zip List.range(1, this.clauses.length + 1))
yield c.use_2_split(i, globals)).map { x => List(x._1, x._2) }.transpose
val defeasibles = Theory((for (x <- t(0)) yield x.asInstanceOf[Theory].clauses).flatten)
val use = t(1).asInstanceOf[List[Map[String, Literal]]].reduce((x, y) => x ++ y)
(defeasibles, use)
}
}
/*
def withStrongSupportsOnly = {
val x = this.strongRules.clauses.map{
p => Clause(head=p.head,body=p.body,fromWeakExample=p.fromWeakExample,supportSet=p.supportSet.strongRules)
}
if (Core.glvalues("withWeaks").toBoolean) Theory(x) else this // don't complicate things in strong-only learning
}
*/
def filterSupports(filterWhat: String) = {
val f = (x: Theory) => filterWhat match {
case "strongRules" => x.strongRules
case "weakRules" => {
val weakRules = x.weakRules
if (!weakRules.isEmpty) weakRules else x
}
}
val x = this.strongRules.clauses.map{
p =>
Clause(
head = p.head,
body = p.body,
fromWeakExample = p.fromWeakExample,
supportSet = f(p.supportSet))
}
if (Globals.glvalues("withWeaks").toBoolean) Theory(x) else this // don't complicate things in strong-only learning
}
def strongWeakSplit = {
val (strongs, weaks) = this.clauses.foldLeft(List[Clause](), List[Clause]()) {
(x, y) =>
val (strongRules, weakRules) = (x._1, x._2)
y.fromWeakExample match {
case true => (strongRules, weakRules :+ y)
case false => (strongRules :+ y, weakRules)
}
}
(strongs, weaks)
}
/**
* This is not used anywhere. I generates a defeasible theory from all rules
* in the prior hypothesis and for each rule, from all the rules in its
* support set. It's not necessary to search in such a large program,
* specializations are implemented in a ryle-by-rule fashion.
*/
def use_3_spilt_one(withSupport: String = "fullSupport", globals: Globals) = {
if (this != Theory()) {
val z = this.clauses zip List.range(1, this.clauses.length + 1) map
(x => x._1.use_3_split_one(x._2, withSupport = withSupport, globals = globals)) map (x => List(x._1, x._2, x._3))
val t = z.transpose
val defeasibles = Theory((for (x <- t(0)) yield x.asInstanceOf[Theory].clauses).flatten)
val use3map = t(1).asInstanceOf[List[Map[String, Literal]]].reduce((x, y) => x ++ y)
val use3generates = t(2).asInstanceOf[List[String]]
(defeasibles, use3map, use3generates)
} else {
(Theory(), Map[String, Literal](), List[String]())
}
}
/**
*
* Same thing as above, but this analyses a rule using its whole support.
*
* @todo This needs to be refactored and merged with the one above
*/
def use_3_split_all(withSupport: String = "fullSupport", globals: Globals) = {
if (this != Theory()) {
val z = this.clauses zip List.range(1, this.clauses.length + 1) map
(x => x._1.use_3_split(x._2, withSupport = withSupport, globals = globals)) map (x => List(x._1, x._2, x._3))
val t = z.transpose
val defeasibles = Theory((for (x <- t(0)) yield x.asInstanceOf[Theory].clauses).flatten)
val use3map = t(1).asInstanceOf[List[Map[String, Literal]]].reduce((x, y) => x ++ y)
val use3generates = t(2).asInstanceOf[List[String]]
(defeasibles, use3map, use3generates)
} else {
(Theory(), Map[String, Literal](), List[String]())
}
}
def use_3_split(globals: Globals): (Theory, Map[String, Literal]) = {
val z = this.clauses zip List.range(1, this.clauses.length + 1) map (x => x._1.use_3_split(x._2, globals = globals)) map (x => List(x._1, x._2))
val t = z.transpose
val defeasibles = Theory((for (x <- t(0)) yield x.asInstanceOf[Theory].clauses).flatten)
val use = t(1).asInstanceOf[List[Map[String, Literal]]].reduce((x, y) => x ++ y)
(defeasibles, use)
}
def map(f: (Clause => Any)) = this.clauses map f
def strongRules = Theory(this.clauses.filter(x => !x.fromWeakExample))
def weakRules = Theory(this.clauses.filter(x => x.fromWeakExample))
def extend(that: Theory): Theory = {
/*
def getStrongRules(t: Theory) = Theory(this.clauses.filter(x => !x.fromWeakExample))
def check(t: Theory) = {
if (getStrongRules(this).clauses.length != getStrongRules(t).clauses.length){
throw new RuntimeException("Some strong rules got lost!")
}
}
*/
val t = Theory((this.clauses ++ that.clauses).distinct)
//check(t)
t
}
def extendUnique(that: Theory): Theory = {
val v = for (x <- that.clauses if !this.containsRule(x)) yield x
val t = Theory(this.clauses ++ v)
t
}
def compress: Theory = {
val t = this.clauses.foldLeft(List[Clause]()){
(p, q) =>
if (!q.fromWeakExample)
if (p.exists(x => !x.fromWeakExample && x.thetaSubsumes(q) && q.thetaSubsumes(x))) p
else p :+ q
else if (p.exists(x => x.thetaSubsumes(q) && q.thetaSubsumes(x))) p
else p :+ q
}
Theory(t)
}
def containsRule(c: Clause) = {
this.clauses.exists(x => x.thetaSubsumes(c) && c.thetaSubsumes(x))
}
def updateSupports(kernel: Theory, fromWeakExample: Boolean = false) = {
LogicUtils.updateSupport(this, kernel, fromWeakExample = fromWeakExample)
}
def clearSupports(e: Example, globals: Globals) = { // it also removes inconsistent weak rules
// Don't throw away weak rules before trying to refine them
//val consistents =
// this.clauses.filter(x => !x.fromWeakExample) ++ this.clauses.filter(x => x.fromWeakExample).filter(x => x.isConsistent(e))
val consistents = this.clauses
/* // This is too slow
consistents.foreach {
x =>
logger.info(s"Checking consistency for weak rules ${this.clauses.indexOf(x)}")
x.supportSet.clauses.foreach {
y =>
if (!y.isConsistent(e)) {
if (y.fromWeakExample) {
x.removeFromSupport(y)
logger.info("Removed inconsistent (weak) support set rule")
} else {
logger.error(s"Strong support set rule covers negatives")
System.exit(-1)
}
}
}
}
*/
///* // This delegates consistency checking for all rules at once to the ASP solver
if (consistents != Nil) {
val path = ASP.isConsistent_program_Marked(Theory(consistents), e, globals)
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = new File(path))
if (answerSet != Nil) {
val f = (a: String) => {
val tolit = Literal.parse(a)
val (i, j) = (tolit.terms.head.tostring.toInt, tolit.terms.tail.head.tostring.toInt)
(i, j)
}
val grouped = answerSet.head.atoms.map(x => f(x)).groupBy{ _._1 }.map{ case (k, v) => k -> v.map(y => y._2).distinct }
for (x <- grouped.keySet) {
val rule = this.clauses(x)
val toBeRemoved = grouped(x) map (p => rule.supportSet.clauses(p))
for (rm <- toBeRemoved) {
if (rm.fromWeakExample) {
rule.removeFromSupport(rm)
logger.info(s"Removed inconsistent support rule: \n ${rm.tostring} \n")
} else {
logger.error(s"Strong support set rule covers" +
s" negatives: \n ${rm.tostring} \n ${answerSet.head.atoms}")
throw new RuntimeException(s"Strong support set rule covers" +
s" negatives: \n ${rm.tostring} \n ${answerSet.head.atoms}")
}
}
}
}
}
//*/
val removeRule = (x: Clause) => x.supportSet.isEmpty match {
case false => false
case _ => x.fromWeakExample match {
case true => true
case _ => throw new RuntimeException(s"Strong rule with empty support: \n ${x.tostring} ${x.fromWeakExample}")
}
}
val keep = (x: Theory) => Theory(x.clauses.filter(x => !removeRule(x)))
keep(Theory(consistents))
}
}
/*
object PriorTheory {
def apply(pt: PriorTheory) = {
new PriorTheory(retainedRules = pt.merge)
}
}
*/
class PriorTheory(
val retainedRules: Theory = Theory(),
val newRules: Theory = Theory(),
val refinedRules: Theory = Theory()) extends Theory {
def merge = Theory(
this.retainedRules.clauses ++
this.newRules.clauses ++
this.refinedRules.clauses
)
override def updateSupports(kernel: Theory, fromWeakExample: Boolean) = {
LogicUtils.updateSupport(this.newRules, kernel, fromWeakExample)
LogicUtils.updateSupport(this.retainedRules, kernel, fromWeakExample)
LogicUtils.updateSupport(this.refinedRules, kernel, fromWeakExample)
}
override def clearSupports(e: Example, globals: Globals) = {
val news = this.newRules.clearSupports(e, globals)
val ret = this.retainedRules.clearSupports(e, globals)
val ref = this.refinedRules.clearSupports(e, globals)
new PriorTheory(retainedRules = ret, newRules = news, refinedRules = ref)
}
override val isEmpty = this.merge.isEmpty
override val tostring = this.merge.tostring
/*
override def compress: PriorTheory = {
def checkAgainst(currentRules: Theory, allOtherRules: List[Theory]) = {
val allOthers = allOtherRules.foldLeft(List[Clause]()){(x,y) => x ++ y.clauses}
val keep = currentRules.clauses.foldLeft(List[Clause]()){
(p,q) =>
if (!q.fromWeakExample) p :+ q
//if ((p++allOthers).exists(x => !x.fromWeakExample && x.thetaSubsumes(q) && q.thetaSubsumes(x))) p
//else p :+ q
else
if ((p++allOthers).exists(x => x.thetaSubsumes(q) && q.thetaSubsumes(x))) p
else p :+ q
}
Theory(keep)
}
val news = checkAgainst(this.newRules, List(this.refinedRules,this.retainedRules))
val retained = checkAgainst(this.retainedRules, List(news,this.refinedRules))
val refined = checkAgainst(this.refinedRules, List(news, retained))
new PriorTheory(retainedRules=retained,newRules=news,refinedRules=refined)
}
*/
}
/*
case class DefeasibleProgram(kernelSet: Theory = Theory(), priorTheory: Theory = Theory()) {
private val _priorTheory = priorTheory match {
case x: Theory => x
case x: PriorTheory => x.merge
}
private val splitKernel = new DefeasibleKernel
private val splitPrior = new DefeasiblePrior
class DefeasibleKernel {
private def split = kernelSet.use_2_split
val isEmpty = this.split == (Theory.empty, Map())
val defeasibleKS = split._1
val use2AtomsMap = split._2
}
class DefeasiblePrior {
private def split = _priorTheory.use_3_spilt_one
val isEmpty = this.split == (Theory.empty, Map(),List())
val defeasiblePrior = split._1
val use3AtomsMap = split._2
val use3generates = split._3
}
}
*/
| 37,091 | 39.361262 | 180 | scala |
OLED | OLED-master/src/main/scala/logic/Variable.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logic
/** A variable is any term that starts with an upper-case letter */
case class Variable(override val name: String, inOrOutVar: String = "", override val _type: String = "") extends Expression {
require(name.toCharArray()(0).isUpper) // else throws an IllegalArgumentException
override def tostring = name
override def tostringQuote = if (inOrOutVar == "-" || inOrOutVar == "#") "\"" + name + "\"" else name
def asLiteral = Literal(predSymbol = name)
}
| 1,172 | 40.892857 | 125 | scala |
OLED | OLED-master/src/main/scala/lomcts/LoMCTS.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package lomcts
import java.io.File
import java.util.UUID
import akka.actor.Actor
import app.runutils.{Globals, RunningOptions}
import app.runutils.IOHandling.InputSource
import com.typesafe.scalalogging.LazyLogging
import iled.ILED
import logic.Examples.Example
import logic.{Literal, PriorTheory, Rules, Theory}
import mcts.{InnerNode, RootNode, TreeNode}
import utils.{ASP, Utils}
import xhail.Xhail
class LoMCTS[T <: InputSource](
inps: RunningOptions,
trainingDataOptions: T,
testingDataOptions: T,
trainingDataFunction: T => Iterator[Example],
testingDataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
private var trainingData = trainingDataFunction(trainingDataOptions)
var continue = true
val upto = 10
val scoreThemFor = 80
var bestTheorySoFar = Theory()
val globals = inps.globals
val exploreRate = 0.005 //1.0/Math.sqrt(2) //
val rootNode = RootNode()
def receive = {
case "start" => run()
}
def run() = {
var iterationCount = 0
while (iterationCount < 2000) {
logger.info(s"Iteration: $iterationCount")
val exmpl = getNextBatch()
// First, generate a new kernel set from the new example and a new theory from it,
// without assuming any prior theory T. If there is no equivalent theory in the first
// level of children of the root node, add T as a node there (so, expand the tree horizontally)
//---------------------------------------------------------------------------------------------
// NOTE: Perhaps a strategy worth exploring is to randomly select a (random) number of existing
// bottom clauses and include them in the new theory generated from the new example.
//---------------------------------------------------------------------------------------------
val newLevelOneNode = generateChildNode(rootNode.theory, exmpl, globals)
if (newLevelOneNode != Theory()) {
if (rootNode.children.forall(p =>
!(p.theory.thetaSubsumes(newLevelOneNode) && newLevelOneNode.thetaSubsumes(p.theory)))) {
logger.info("Added new child at level 1 (tree expanded horizontally).")
val score = scoreNode(newLevelOneNode, exmpl, globals)
val n = InnerNode("new-level-1-node", newLevelOneNode, rootNode)
n.updateRewards(score)
n.incrementVisits()
rootNode.addChild(n)
n.propagateReward(score)
}
}
// Continue to an MCTS round
if (rootNode.children.nonEmpty) {
val bestChild = rootNode.descendToBestChild(exploreRate)
val newNode = generateAndScoreChildren(bestChild, exmpl, globals, iterationCount)
if (newNode != bestChild) {
logger.info(s"\nLeaf node: (MCTS score: ${bestChild.getMCTSScore(exploreRate)} |" +
s" mean f1-score: ${bestChild.theory.fscore} | visits: ${bestChild.visits} | id: ${bestChild.id}):\n${bestChild.theory.tostring}\nwas expanded " +
s"to (MCTS score: ${newNode.getMCTSScore(exploreRate)} |" +
s" mean f1-score: ${newNode.theory.fscore} | visits: ${newNode.visits} | id: ${newNode.id}):\n${newNode.theory.tostring}\n")
bestTheorySoFar = bestChild.theory
} else {
logger.info("Failed to find a better revision")
}
}
iterationCount += 1
}
var finalTheory = bestTheorySoFar
logger.info(s"Final theory:\n${finalTheory.tostring}")
logger.info("Done")
logger.info("Cross-validation...")
val testSet = testingDataFunction(testingDataOptions)
val theory_ = Theory(finalTheory.clauses).compress // generate new theory to clear the stats counter
crossVal(theory_, testSet, "", globals)
val x = theory_.precision
logger.info(s"F1-score on test set: ${theory_.fscore} | (tps, fps, fns) = (${theory_._tps}, ${theory_._fps}, ${theory_._fns})")
}
def crossVal(t: Theory, data: Iterator[Example], handCraftedTheoryFile: String = "", globals: Globals) = {
while (data.hasNext) {
val e = data.next()
evaluateTheory(t, e, handCraftedTheoryFile, globals)
}
//val stats = t.stats
//(stats._1, stats._2, stats._3, stats._4, stats._5, stats._6)
}
def generateAndScoreChildren(fromNode: TreeNode, exmpl: Example, gl: Globals, iterationCount: Int) = {
require(fromNode.isLeafNode())
// This generates minimal revisions from the current leaf node using a fixed number of interpretations
// to guide the revision generation process. I should also try other strategies, such as e.g. generating
// all revisions (including non-minimal ones) from a single example; or using additionally other revisions
// from other examples.
val candidateChildren = (1 to upto).foldLeft(Vector[Theory]()) { (theories, _) =>
val newExmpl = getNextBatch()
val newTheory = generateChildNode(fromNode.theory, newExmpl, gl)
// This is too fucking expensive, something must be done.
val isNew = theories.forall(p => !p.thetaSubsumes(newTheory) && !newTheory.thetaSubsumes(p))
if (isNew) {
// score it on this example, don't waste it
scoreNode(newTheory, newExmpl, gl)
theories :+ newTheory
} else {
theories
}
}
logger.info(s"Generated ${candidateChildren.size} candidate revisions")
if (candidateChildren.nonEmpty) {
// Next we need to use a Hoeffding test to identify the best of these theories.
val candidates = Vector(fromNode.theory) ++ candidateChildren
var hoeffdingSucceeds = false
// score all on the initial example, don't waste it
candidates.foreach(p => scoreNode(p, exmpl, gl))
/*
while (!hoeffdingSucceeds) {
}
*/
// for now I'll do this stupid thing where you score them on a fixed number of interpretations
// and get the best theory. There are several complications to implement an actual Hoeffding
// test (you'll see them if you follow the code), which I need to address.
logger.info("scoring candidates")
for (1 <- 0 to scoreThemFor) {
val e = getNextBatch()
candidates.foreach(p => scoreNode(p, e, gl))
}
val sorted = candidates.sortBy(x => -x.fscore)
val best = if (sorted.head.fscore > fromNode.theory.fscore) {
// The depth is used in the id generation of the children nodes.
val depth = fromNode.getDepth() + 1
val id = s"$iterationCount-$depth-1"
val newNode = InnerNode(id, sorted.head, fromNode)
//val score = score1 - score2
newNode.updateRewards(sorted.head.fscore)
newNode.incrementVisits()
fromNode.addChild(newNode)
newNode.propagateReward(sorted.head.fscore)
newNode
} else {
scoreAreReturnNode(fromNode, exmpl, gl)
}
best
} else {
scoreAreReturnNode(fromNode, exmpl, gl)
}
}
def scoreAreReturnNode(node: TreeNode, exmpl: Example, gl: Globals) = {
//val score = scoreNode(node.theory, exmpl, jep, gl)
node.updateRewards(node.theory.fscore)
node.incrementVisits()
node.propagateReward(node.theory.fscore)
node
}
def scoreNode(node: Theory, exmpl: Example, gl: Globals) = {
evaluateTheory(node, exmpl, "", gl)
val score = node.fscore
score
}
def evaluateTheory(theory: Theory, e: Example, handCraftedTheoryFile: String = "", globals: Globals): Unit = {
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val coverageConstr = s"${globals.TPS_RULES}\n${globals.FPS_RULES}\n${globals.FNS_RULES}"
val t =
if (theory != Theory()) {
theory.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n")
} else {
globals.INCLUDE_BK(handCraftedTheoryFile)
}
val show = globals.SHOW_TPS_ARITY_1 + globals.SHOW_FPS_ARITY_1 + globals.SHOW_FNS_ARITY_1
val ex = e.tostring
val program = ex + globals.INCLUDE_BK(globals.BK_CROSSVAL) + t + coverageConstr + show
val f = Utils.getTempFile(s"eval-theory-${UUID.randomUUID().toString}-${System.currentTimeMillis()}", ".lp")
Utils.writeLine(program, f.getCanonicalPath, "overwrite")
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
if (answerSet.nonEmpty) {
val atoms = answerSet.head.atoms
atoms.foreach { a =>
val lit = Literal.parse(a)
//val inner = lit.terms.head
lit.predSymbol match {
case "tps" => theory._tps += 1
case "fps" => theory._fps += 1
case "fns" => theory._fns += 1
}
}
}
}
def generateChildNode(currentNode: Theory, currentExample: Example, gl: Globals) = {
//logger.info("Generating children nodes")
//println(s"Generating children at example ${currentExample.time}")
val isSat = ILED.isSAT(currentNode, currentExample, ASP.check_SAT_Program, gl)
if (isSat) {
Theory()
} else {
val interpretation = currentExample.annotationASP ++ currentExample.narrativeASP
val infile = Utils.getTempFile("example", ".lp")
Utils.writeToFile(infile, "overwrite") { p => interpretation.foreach(p.println) }
val bk = gl.BK_WHOLE_EC
val (_, varKernel) = Xhail.runXhail(fromFile = infile.getAbsolutePath, kernelSetOnly = true, bkFile = bk, globals = gl)
val aspFile: File = utils.Utils.getTempFile("aspInduction", ".lp")
val (_, use2AtomsMap, defeasible, use3AtomsMap, _, _) =
ASP.inductionASPProgram(kernelSet = Theory(varKernel),
priorTheory = currentNode, examples = currentExample.toMapASP, aspInputFile = aspFile, globals = gl)
val answerSet = ASP.solve("iled", use2AtomsMap ++ use3AtomsMap, aspFile, currentExample.toMapASP)
if (answerSet != Nil) {
val newRules = Rules.getNewRules(answerSet.head.atoms, use2AtomsMap)
ILED.updateSupport(newRules, Theory(varKernel))
val icRules = Rules.getInconsistentRules(answerSet.head.atoms, currentNode, use3AtomsMap)
val retainedRules = Theory(currentNode.clauses.filter(x => icRules.forall(y => y.rule != x)))
//iled.ILED.updateSupport(retainedRules, bottomTheory) //no need for that, each rule has one rule in its support, its bottom clause, this doesn't change
val refinedRules = icRules.map(x => x.initialRefinement)
val newTheory = new PriorTheory(retainedRules, newRules, Theory(refinedRules)).merge
newTheory
/*
if (theories.exists(theory => theory.thetaSubsumes(newTheory) && newTheory.thetaSubsumes(theory))) theories else theories :+ newTheory
} else {
theories
}
*/
} else {
Theory()
}
}
}
def getNextBatch() = {
if (trainingData.nonEmpty) trainingData.next()
else {
trainingData = trainingDataFunction(trainingDataOptions)
trainingData.next()
}
}
}
| 11,628 | 38.02349 | 160 | scala |
OLED | OLED-master/src/main/scala/mcts/HillClimbing.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import java.io.File
import java.util.UUID
import app.runners.{MLNDataHandler, OLEDRunner_MLNExperiments}
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.Globals
import com.mongodb.casbah.MongoClient
import com.typesafe.scalalogging.LazyLogging
import experiments.datautils.caviar_intervals.MeetingTrainingData
import iled.ILED
import iled.ILED.updateSupport
import logic.Examples.Example
import logic._
import utils.DataUtils.{DataAsExamples, DataAsIntervals}
import utils.{ASP, CaviarUtils, Utils}
import xhail.Xhail
/**
* Created by nkatz on 9/14/17.
*/
object HillClimbing extends App with LazyLogging {
//runCaviarMLN()
runCaviarFull()
def runCaviarFull() = {
Globals.glvalues("perfect-fit") = "false"
val chunkSize = 1
val data = MeetingTrainingData.getMeetingTrainingData(3, randomOrder = false)
//List(MovingTrainingData.getMovingTrainingData(trainSetNum, randomOrder = randomOrder))
val mongoClient = MongoClient()
//val collection = mongoClient("caviar-whole")("examples")
val collection = mongoClient("ctm")("examples")
//def getTrainingData() = CaviarUtils.getDataFromIntervals(collection, "meeting", data.asInstanceOf[DataAsIntervals].trainingSet, chunkSize)
def getTrainingData() = {
val chunked =
if (chunkSize > 1) collection.find().limit(100).map(x => Example(x)).sliding(chunkSize, chunkSize - 1)
else collection.find().limit(100).map(x => Example(x)).sliding(chunkSize, chunkSize)
chunked map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
}
def iterateOnce(selectedNode: Theory, bottomTheory: Theory, gl: Globals) = {
val children = generateChildrenNodes(selectedNode, bottomTheory, getTrainingData(), gl).filter(x => x != Theory())
scoreNodes(children, gl)
val f1 = (t: Theory) => t.stats._6
val selectedChildNode = children.sortBy(x => -f1(x)).head // sort by f1-score
logger.info(s"Best theory so far (F1-score ${selectedChildNode.stats._6}):\n${selectedChildNode.tostring}")
selectedChildNode
}
def scoreNodes(children: Vector[Theory], gl: Globals) = {
logger.info("Scoring children nodes")
children.foreach { childNode =>
crossVal(childNode, getTrainingData(), "", gl)
}
//children.foreach(x => println(x.tostring + " " + x.stats._6))
}
//val globals = new Globals("/home/nkatz/dev/iled/datasets/Caviar/meeting", "")
val globals = new Globals("/home/nkatz/dev/iled/datasets/CTM/whole-hierarchy")
val bottomTheory = constructBottomTheory(getTrainingData(), globals)
val iterations = 8
val theory = (1 to iterations).foldLeft(Theory()) { (x, y) =>
logger.info(s"Iteration $y")
iterateOnce(x, bottomTheory, globals)
}
logger.info("Done")
logger.info("Cross-validation...")
val testSet = CaviarUtils.getDataFromIntervals(collection, "meeting", data.asInstanceOf[DataAsIntervals].testingSet, chunkSize, withChunking = false)
val theory_ = Theory(theory.clauses)
crossVal(theory_, testSet, "", globals) // generate new theory to clear the stats counter
logger.info(s"F1-score on test set: ${theory_.stats._6}")
}
def runCaviarMLN() = {
Globals.glvalues("perfect-fit") = "false"
val foldPath = "/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/move/fold_2"
val chunkSize = 50
val opts = new MLNDataOptions(foldPath, chunkSize)
val dataset = getData(opts)
val globals = new Globals("/home/nkatz/dev/iled/datasets/CaviarMLN")
val bottomTheory = constructBottomTheory(dataset, globals)
//println(bottomTheory.tostring)
val iterations = 2
/*
// Just a test. Run XHAIL:
val exmpls = getData(new MLNDataOptions(foldPath, 10000)).next()
val theory = Xhail.findHypothesis(bottomTheory.clauses,
examples = Map("annotation" -> exmpls.annotationASP, "narrative" -> exmpls.narrativeASP),
jep= new Jep(), globals = globals)
logger.info(theory.tostring)
*/
val theory = (1 to iterations).foldLeft(Theory()) { (x, y) =>
logger.info(s"Iteration $y")
iterateOnce(x, bottomTheory, globals, opts)
}
logger.info("Done")
logger.info("Cross-validation...")
val testSet = MLNDataHandler.getTestingData(opts)
val theory_ = Theory(theory.clauses)
crossVal(theory_, testSet, "", globals) // generate new theory to clear the stats counter
logger.info(s"F1-score on test set: ${theory_.stats._6}")
//iterateOnce(Theory(), bottomTheory, jep, globals, opts)
}
def getData(opts: MLNDataOptions) = MLNDataHandler.getTrainingData(opts)
def iterateOnce(selectedNode: Theory, bottomTheory: Theory, gl: Globals, opts: MLNDataOptions) = {
val children = generateChildrenNodes(selectedNode, bottomTheory, getData(opts), gl).filter(x => x != Theory())
scoreNodes(children, gl, opts)
val f1 = (t: Theory) => t.stats._6
val selectedChildNode = children.sortBy(x => -f1(x)).head // sort by f1-score
logger.info(s"Best theory so far (F1-score ${selectedChildNode.stats._6}):\n${selectedChildNode.tostring}")
selectedChildNode
}
def scoreNodes(children: Vector[Theory], gl: Globals, opts: MLNDataOptions) = {
logger.info("Scoring children nodes")
children.foreach { childNode =>
crossVal(childNode, getData(opts), "", gl)
}
//children.foreach(x => println(x.tostring + " " + x.stats._6))
}
def generateChildrenNodes(currentNode: Theory, bottomTheory: Theory, trainingSet: Iterator[Example], gl: Globals) = {
logger.info("Generating children nodes")
trainingSet.foldLeft(Vector[Theory]()){ (theories, newExample) =>
println(s"Generating children at example ${newExample.time}")
val isSat = ILED.isSAT(currentNode, newExample, ASP.check_SAT_Program, gl)
if (isSat) {
theories
} else {
val aspFile: File = utils.Utils.getTempFile("aspInduction", ".lp")
val (_, use2AtomsMap, defeasible, use3AtomsMap, _, _) =
ASP.inductionASPProgram(kernelSet = bottomTheory, priorTheory = currentNode, examples = newExample.toMapASP, aspInputFile = aspFile, globals = gl)
val answerSet = ASP.solve("iled", use2AtomsMap ++ use3AtomsMap, aspFile, newExample.toMapASP)
if (answerSet != Nil) {
val newRules = Rules.getNewRules(answerSet.head.atoms, use2AtomsMap)
ILED.updateSupport(newRules, bottomTheory)
val icRules = Rules.getInconsistentRules(answerSet.head.atoms, currentNode, use3AtomsMap)
val retainedRules = Theory(currentNode.clauses.filter(x => icRules.forall(y => y.rule != x)))
updateSupport(retainedRules, bottomTheory)
//val refinedRules = Rules.getRefined(icRules, retainedRules, newRules, newExample, "fullSupport", jep, gl)
val refinedRules = icRules.map(x => x.initialRefinement)
val newTheory = new PriorTheory(retainedRules, newRules, Theory(refinedRules)).merge
if (theories.exists(theory => theory.thetaSubsumes(newTheory) && newTheory.thetaSubsumes(theory))) theories else theories :+ newTheory
} else {
theories
}
}
} //.map(x => x.initialRefinement)
}
def constructBottomTheory(trainingSet: Iterator[Example], globals: Globals): Theory = {
val infile = Utils.getTempFile("example", ".lp")
val bk = globals.BK_WHOLE_EC
Globals.glvalues("perfect-fit") = "false"
var time = 0
val (accumKernel, accumAnnotation, accumNarrative) =
trainingSet.foldLeft(List[Clause](), List[String](), List[String]()) { (x, y) =>
val ker = x._1
val annotAccum = x._2
val narrativeAccum = x._3
println(y.time.toInt)
if (y.time.toInt <= time) time = y.time.toInt
// generate a kernel set from the current example
val interpretation = y.annotationASP ++ y.narrativeASP
Utils.writeToFile(infile, "overwrite") { p => interpretation.foreach(p.println) }
val (_, varKernel) =
Xhail.runXhail(fromFile = infile.getAbsolutePath, kernelSetOnly = true, bkFile = bk, globals = globals)
logger.info("Compressing bottom theory")
val usefulNewBottomRules = varKernel.foldLeft(List[Clause]()) { (accum, bottomClause) =>
if (ker.forall(p => !bottomClause.thetaSubsumes(p))) {
accum :+ bottomClause
} else {
accum
}
}
//(ker ++ varKernel, annotAccum ++ y.annotation, narrativeAccum ++ y.narrative)
(ker ++ usefulNewBottomRules, annotAccum ++ y.annotation, narrativeAccum ++ y.narrative)
}
//val compressedKernel = Theory(LogicUtils.compressTheory(accumKernel))
val compressedKernel = Theory(accumKernel)
compressedKernel
}
def crossVal(t: Theory, data: Iterator[Example], handCraftedTheoryFile: String = "", globals: Globals) = {
while (data.hasNext) {
val e = data.next()
evaluateTheory(t, e, handCraftedTheoryFile, globals)
}
//val stats = t.stats
//(stats._1, stats._2, stats._3, stats._4, stats._5, stats._6)
}
def evaluateTheory(theory: Theory, e: Example, handCraftedTheoryFile: String = "", globals: Globals): Unit = {
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val coverageConstr = s"${globals.TPS_RULES}\n${globals.FPS_RULES}\n${globals.FNS_RULES}"
val t =
if (theory != Theory()) {
theory.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n")
} else {
globals.INCLUDE_BK(handCraftedTheoryFile)
}
val show = globals.SHOW_TPS_ARITY_1 + globals.SHOW_FPS_ARITY_1 + globals.SHOW_FNS_ARITY_1
val ex = e.tostring
val program = ex + globals.INCLUDE_BK(globals.BK_CROSSVAL) + t + coverageConstr + show
val f = Utils.getTempFile(s"eval-theory-${UUID.randomUUID().toString}-${System.currentTimeMillis()}", ".lp")
Utils.writeLine(program, f.getCanonicalPath, "overwrite")
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
if (answerSet.nonEmpty) {
val atoms = answerSet.head.atoms
atoms.foreach { a =>
val lit = Literal.parse(a)
val inner = lit.terms.head
lit.predSymbol match {
case "tps" => theory.tps += inner.tostring
case "fps" => theory.fps += inner.tostring
case "fns" => theory.fns += inner.tostring
}
}
}
}
}
| 11,381 | 42.277567 | 159 | scala |
OLED | OLED-master/src/main/scala/mcts/MCTS_FOL.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import app.runners.MLNDataHandler
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.Globals
import com.typesafe.scalalogging.LazyLogging
import logic.Theory
import mcts.HillClimbing.{constructBottomTheory, generateChildrenNodes, getData, scoreNodes, crossVal}
/**
* Created by nkatz on 9/19/17.
*/
object MCTS_FOL extends LazyLogging {
/* TODO Need to implement the propagation to all theories that subsume the best child */
def main(args: Array[String]) = {
runCaviarMNL()
}
def runCaviarMNL() = {
Globals.glvalues("perfect-fit") = "false"
val foldPath = "/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/move/fold_2"
val chunkSize = 50
val opts = new MLNDataOptions(foldPath, chunkSize)
val globals = new Globals("/home/nkatz/dev/iled/datasets/CaviarMLN/move")
val bottomTheory = constructBottomTheory(getData(opts), globals)
val iterations = 4
val exploreRate = 1.0 / Math.sqrt(2) //0.005 //
val f1 = (t: Theory) => t.stats._6
val rootNode = RootNode()
// Generate the 1rst-level children.
generateAndScoreChildren(rootNode, bottomTheory, globals, opts, 0)
// Descent down the tree a few times and return the best
// theory from the last iteration.
val bestNode = (1 to iterations).foldLeft(rootNode.asInstanceOf[TreeNode]) { (theorySearchedLast, iterCount) =>
logger.info(s"Iteration $iterCount")
val bestChild = rootNode.descendToBestChild(exploreRate)
logger.info(s"Best leaf node selected (MCTS score: ${bestChild.getMCTSScore(exploreRate)} | id: ${bestChild.id}):\n${bestChild.theory.tostring}")
val newNodes = generateAndScoreChildren(bestChild, bottomTheory, globals, opts, iterCount)
val bestChildNode = newNodes.maxBy(x => f1(x.theory))
bestChildNode.propagateReward(f1(bestChildNode.theory))
if (theorySearchedLast.theory == Theory()) {
logger.info(s"Best theory so far (F1-score ${f1(bestChildNode.theory)} | id: ${bestChildNode.id}):\n${bestChildNode.theory.tostring}")
bestChildNode
} else {
if (f1(bestChildNode.theory) > f1(theorySearchedLast.theory)) {
logger.info(s"Best theory so far (F1-score ${f1(bestChildNode.theory)} | id: ${bestChildNode.id}):\n${bestChildNode.theory.tostring}")
bestChildNode //.theory
} else {
logger.info(s"Best theory so far (F1-score ${f1(theorySearchedLast.theory)} | id: ${theorySearchedLast.id}):\n${theorySearchedLast.theory.tostring}")
theorySearchedLast
}
}
}
logger.info("Done")
logger.info("Cross-validation...")
val testSet = MLNDataHandler.getTestingData(opts)
val theory_ = Theory(bestNode.theory.clauses).compress
crossVal(theory_, testSet, "", globals) // generate new theory to clear the stats counter
logger.info(s"F1-score on test set: ${theory_.stats._6}")
}
def generateAndScoreChildren(fromNode: TreeNode, bottomTheory: Theory, gl: Globals, opts: MLNDataOptions, iterationCount: Int) = {
require(fromNode.isLeafNode())
val newTheories = generateChildrenNodes(fromNode.theory, bottomTheory, getData(opts), gl)
scoreNodes(newTheories, gl, opts)
// The depth is used in the id generation of the children nodes.
val depth = fromNode.getDepth() + 1
val newNodes = newTheories.foldLeft(1, Vector[InnerNode]()) { (x, theory) =>
val (newNodeCount, newNodes) = (x._1, x._2)
val id = s"$iterationCount-$depth-$newNodeCount"
val newNode = InnerNode(id, theory, fromNode)
(newNodeCount + 1, newNodes :+ newNode)
}._2
newNodes foreach { node =>
// Add each theory's f1-score to the corresponding node's rewards vector
// and increment the node's visits counter.
node.updateRewards(node.theory.stats._6)
node.incrementVisits()
// Finally, add the new node as a child to the parent node.
fromNode.addChild(node)
}
/** FOR DEBUGGING ONLY */
//println(newNodes.map(x => x.theory.tostring + " " + x.theory.stats._6).foreach(x => println(x+"\n")))
newNodes
}
}
| 4,803 | 37.741935 | 159 | scala |
OLED | OLED-master/src/main/scala/mcts/MCTS_FOL_Online.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import java.io.File
import java.util.UUID
import app.runners.MLNDataHandler
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.Globals
import com.typesafe.scalalogging.LazyLogging
import iled.ILED
import logic.Examples.Example
import logic.{Literal, PriorTheory, Rules, Theory}
import mcts.HillClimbing.{getData, crossVal}
import utils.{ASP, Utils}
import xhail.Xhail
/**
* Created by nkatz on 9/19/17.
*/
object MCTS_FOL_Online extends LazyLogging {
/* TODO Need to implement the propagation to all theories that subsume the best child */
def main(args: Array[String]) = {
runCaviarMNL()
}
def runCaviarMNL() = {
/*-----------------------------------------------*/
Globals.glvalues("perfect-fit") = "false"
Globals.glvalues("smallest-nonempty") = "true"
/*-----------------------------------------------*/
val foldPath = "/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/move/fold_2"
val chunkSize = 500
val opts = new MLNDataOptions(foldPath, chunkSize)
val globals = new Globals("/home/nkatz/dev/iled/datasets/CaviarMLN/move")
val exploreRate = 0.005 //1.0/Math.sqrt(2) //
val rootNode = RootNode()
val repeatFor = 1
// Generate the 1rst-level child.
//generateAndScoreChildren(rootNode, trainingData.next(), jep, globals, opts, 0)
var exmplsCount = 1
for (x <- 1 to repeatFor) {
val trainingData = getData(opts)
while (trainingData.hasNext) {
val exmpl = trainingData.next()
// First, generate a new kernel set from the new example and a new theory from it,
// without assuming any prior theory T. If there is no equivalent theory in the first
// level of children of the root node, add T as a node there (so, expand the tree horizontally)
val newLevelOneNode = generateChildNode(rootNode.theory, exmpl, globals)
if (newLevelOneNode != Theory()) {
if (rootNode.children.forall(p => !(p.theory.thetaSubsumes(newLevelOneNode) && newLevelOneNode.thetaSubsumes(p.theory)))) {
logger.info("Added new child at level 1 (tree expanded horizontally).")
val score = scoreNode(newLevelOneNode, exmpl, globals, opts)
val n = InnerNode("new-level-1-node", newLevelOneNode, rootNode)
n.updateRewards(score)
n.incrementVisits()
rootNode.addChild(n)
n.propagateReward(score)
}
}
if (rootNode.children.nonEmpty) {
val bestChild = rootNode.descendToBestChild(exploreRate)
//logger.info(s"Best leaf node selected (MCTS score: ${bestChild.getMCTSScore(exploreRate)} | id: ${bestChild.id}):\n${bestChild.theory.tostring}")
val newNode = generateAndScoreChildren(bestChild, exmpl, globals, opts, exmplsCount)
if (newNode != bestChild) {
logger.info(s"\nLeaf node: (MCTS score: ${bestChild.getMCTSScore(exploreRate)} |" +
s" mean f1-score: ${bestChild.theory.fscore} | visits: ${bestChild.visits} | id: ${bestChild.id}):\n${bestChild.theory.tostring}\nwas expanded " +
s"to (MCTS score: ${newNode.getMCTSScore(exploreRate)} |" +
s" mean f1-score: ${newNode.theory.fscore} | visits: ${newNode.visits} | id: ${newNode.id}):\n${newNode.theory.tostring}\n")
}
}
exmplsCount += 1
}
}
var finalTheory = rootNode.descendToBestChild(exploreRate).theory
if (finalTheory == Theory()) finalTheory = rootNode.descendToBestChild(exploreRate).parentNode.theory
logger.info(s"Final theory:\n${finalTheory.tostring}")
logger.info("Done")
logger.info("Cross-validation...")
val testSet = MLNDataHandler.getTestingData(opts)
val theory_ = Theory(finalTheory.clauses).compress // generate new theory to clear the stats counter
crossVal(theory_, testSet, "", globals)
logger.info(s"F1-score on test set: ${theory_.stats._6} | (tps, fps, fns) = (${theory_.stats._1}, ${theory_.stats._2}, ${theory_.stats._3})")
}
def generateAndScoreChildren(fromNode: TreeNode, exmpl: Example, gl: Globals, opts: MLNDataOptions, iterationCount: Int) = {
require(fromNode.isLeafNode())
val childTheory = generateChildNode(fromNode.theory, exmpl, gl)
// The depth is used in the id generation of the children nodes.
val depth = fromNode.getDepth() + 1
if (childTheory != Theory()) {
val score1 = scoreNode(childTheory, exmpl, gl, opts)
val score2 = scoreNode(fromNode.theory, exmpl, gl, opts)
if (score1 > score2) {
val id = s"$iterationCount-$depth-1"
val newNode = InnerNode(id, childTheory, fromNode)
val score = score1 - score2
newNode.updateRewards(score)
newNode.incrementVisits()
fromNode.addChild(newNode)
newNode.propagateReward(score)
newNode
} else {
scoreAreReturnNode(fromNode, exmpl, gl, opts)
}
} else {
scoreAreReturnNode(fromNode, exmpl, gl, opts)
}
}
def scoreAreReturnNode(node: TreeNode, exmpl: Example, gl: Globals, opts: MLNDataOptions) = {
val score = scoreNode(node.theory, exmpl, gl, opts)
node.updateRewards(score)
node.incrementVisits()
node.propagateReward(score)
node
}
def scoreNode(node: Theory, exmpl: Example, gl: Globals, opts: MLNDataOptions) = {
//logger.info("Scoring child node")
evaluateTheory(node, exmpl, "", gl)
//evaluateTheory(parentNode, exmpl, jep, "", gl)
val score = node.fscore
score
}
def evaluateTheory(theory: Theory, e: Example, handCraftedTheoryFile: String = "", globals: Globals): Unit = {
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val coverageConstr = s"${globals.TPS_RULES}\n${globals.FPS_RULES}\n${globals.FNS_RULES}"
val t =
if (theory != Theory()) {
theory.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n")
} else {
globals.INCLUDE_BK(handCraftedTheoryFile)
}
val show = globals.SHOW_TPS_ARITY_1 + globals.SHOW_FPS_ARITY_1 + globals.SHOW_FNS_ARITY_1
val ex = e.tostring
val program = ex + globals.INCLUDE_BK(globals.BK_CROSSVAL) + t + coverageConstr + show
val f = Utils.getTempFile(s"eval-theory-${UUID.randomUUID().toString}-${System.currentTimeMillis()}", ".lp")
Utils.writeLine(program, f.getCanonicalPath, "overwrite")
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
if (answerSet.nonEmpty) {
val atoms = answerSet.head.atoms
atoms.foreach { a =>
val lit = Literal.parse(a)
//val inner = lit.terms.head
lit.predSymbol match {
case "tps" => theory._tps += 1
case "fps" => theory._fps += 1
case "fns" => theory._fns += 1
}
}
}
}
def generateChildNode(currentNode: Theory, currentExample: Example, gl: Globals) = {
//logger.info("Generating children nodes")
//println(s"Generating children at example ${currentExample.time}")
val isSat = ILED.isSAT(currentNode, currentExample, ASP.check_SAT_Program, gl)
if (isSat) {
Theory()
} else {
val interpretation = currentExample.annotationASP ++ currentExample.narrativeASP
val infile = Utils.getTempFile("example", ".lp")
Utils.writeToFile(infile, "overwrite") { p => interpretation.foreach(p.println) }
val bk = gl.BK_WHOLE_EC
val (_, varKernel) = Xhail.runXhail(fromFile = infile.getAbsolutePath, kernelSetOnly = true, bkFile = bk, globals = gl)
val aspFile: File = utils.Utils.getTempFile("aspInduction", ".lp")
val (_, use2AtomsMap, defeasible, use3AtomsMap, _, _) =
ASP.inductionASPProgram(kernelSet = Theory(varKernel),
priorTheory = currentNode, examples = currentExample.toMapASP, aspInputFile = aspFile, globals = gl)
val answerSet = ASP.solve("iled", use2AtomsMap ++ use3AtomsMap, aspFile, currentExample.toMapASP)
if (answerSet != Nil) {
val newRules = Rules.getNewRules(answerSet.head.atoms, use2AtomsMap)
ILED.updateSupport(newRules, Theory(varKernel))
val icRules = Rules.getInconsistentRules(answerSet.head.atoms, currentNode, use3AtomsMap)
val retainedRules = Theory(currentNode.clauses.filter(x => icRules.forall(y => y.rule != x)))
//iled.ILED.updateSupport(retainedRules, bottomTheory) //no need for that, each rule has one rule in its support, its bottom clause, this doesn't change
val refinedRules = icRules.map(x => x.initialRefinement)
val newTheory = new PriorTheory(retainedRules, newRules, Theory(refinedRules)).merge
newTheory
/*
if (theories.exists(theory => theory.thetaSubsumes(newTheory) && newTheory.thetaSubsumes(theory))) theories else theories :+ newTheory
} else {
theories
}
*/
} else {
Theory()
}
}
}
}
| 9,655 | 39.742616 | 160 | scala |
OLED | OLED-master/src/main/scala/mcts/MathExchange.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import app.runutils.Globals
import com.mongodb.casbah.Imports.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.MongoClient
import logic.Examples.Example
import logic.Theory
import com.mongodb.casbah.Imports._
import com.typesafe.scalalogging.LazyLogging
import mcts.HillClimbing._
/**
* Created by nkatz on 9/22/17.
*/
/**
* This is just a test to run MCTS with the MathExchange data
*/
object MathExchange extends App with LazyLogging {
Globals.glvalues("perfect-fit") = "false"
val chunkSize = 10
val globals = new Globals("/home/nkatz/dev/MathExchange-for-OLED")
val data = {
val mongoClient = MongoClient()
val collection = mongoClient("MathExchange")("examples")
val exmpls = collection.find().foldLeft(List[Example]()){ (accum, dbObj) =>
val time = dbObj.asInstanceOf[BasicDBObject].get("time").toString
val annotation = dbObj.get("annotation").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val narrative = dbObj.get("narrative").asInstanceOf[BasicDBList].toList.map(x => x.toString)
accum :+ new Example(annot = annotation, nar = narrative, _time = time)
}
val chunked = exmpls.sliding(chunkSize, chunkSize - 1).toList
chunked map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
}
val bottomTheory = HillClimbing.constructBottomTheory(data.toIterator, globals)
println(bottomTheory.tostring)
val iterations = 10
val exploreRate = 1.0 / Math.sqrt(2)
val f1 = (t: Theory) => t.stats._6
val rootNode = RootNode()
generateAndScoreChildren(rootNode, bottomTheory, globals, data, 0)
val bestNode = (1 to iterations).foldLeft(rootNode.asInstanceOf[TreeNode]) { (theorySearchedLast, iterCount) =>
logger.info(s"Iteration $iterCount")
val bestChild = rootNode.descendToBestChild(exploreRate)
logger.info(s"Best leaf node selected (MCTS score: ${bestChild.getMCTSScore(exploreRate)} | id: ${bestChild.id}):\n${bestChild.theory.tostring}")
val newNodes = generateAndScoreChildren(bestChild, bottomTheory, globals, data, iterCount)
val bestChildNode = newNodes.maxBy(x => f1(x.theory))
bestChildNode.propagateReward(f1(bestChildNode.theory))
if (theorySearchedLast.theory == Theory()) {
logger.info(s"Best theory so far (F1-score ${f1(bestChildNode.theory)} | id: ${bestChildNode.id}):\n${bestChildNode.theory.tostring}")
bestChildNode
} else {
if (f1(bestChildNode.theory) > f1(theorySearchedLast.theory)) {
logger.info(s"Best theory so far (F1-score ${f1(bestChildNode.theory)} | id: ${bestChildNode.id}):\n${bestChildNode.theory.tostring}")
bestChildNode //.theory
} else {
logger.info(s"Best theory so far (F1-score ${f1(theorySearchedLast.theory)} | id: ${theorySearchedLast.id}):\n${theorySearchedLast.theory.tostring}")
theorySearchedLast
}
}
}
logger.info("Done")
logger.info("Cross-validation...")
val theory_ = Theory(bestNode.theory.clauses).compress
crossVal(theory_, data.toIterator, "", globals) // generate new theory to clear the stats counter
logger.info(s"F1-score on test set: ${theory_.stats._6}")
def generateAndScoreChildren(fromNode: TreeNode, bottomTheory: Theory, gl: Globals, data: List[Example], iterationCount: Int) = {
require(fromNode.isLeafNode())
val newTheories = generateChildrenNodes(fromNode.theory, bottomTheory, data.toIterator, gl)
scoreNodes(newTheories, gl, data)
// The depth is used in the id generation of the children nodes.
val depth = fromNode.getDepth() + 1
val newNodes = newTheories.foldLeft(1, Vector[InnerNode]()) { (x, theory) =>
val (newNodeCount, newNodes) = (x._1, x._2)
val id = s"$iterationCount-$depth-$newNodeCount"
val newNode = InnerNode(id, theory, fromNode)
(newNodeCount + 1, newNodes :+ newNode)
}._2
newNodes foreach { node =>
// Add each theory's f1-score to the corresponding node's rewards vector
// and increment the node's visits counter.
node.updateRewards(node.theory.stats._6)
node.incrementVisits()
// Finally, add the new node as a child to the parent node.
fromNode.addChild(node)
}
/** FOR DEBUGGING ONLY */
//println(newNodes.map(x => x.theory.tostring + " " + x.theory.stats._6).foreach(x => println(x+"\n")))
newNodes
}
def scoreNodes(children: Vector[Theory], gl: Globals, data: List[Example]) = {
logger.info("Scoring children nodes")
children.foreach { childNode =>
crossVal(childNode, data.toIterator, "", gl)
}
//children.foreach(x => println(x.tostring + " " + x.stats._6))
}
}
| 5,490 | 37.669014 | 157 | scala |
OLED | OLED-master/src/main/scala/mcts/TransformCTM.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import com.mongodb.casbah.Imports.{BasicDBList, BasicDBObject}
import com.mongodb.casbah.MongoClient
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.Imports._
/**
* Created by nkatz on 9/28/17.
*/
object TransformCTM extends App {
object MongoEntry {
def apply() = {
new MongoEntry(Nil, Nil, 0)
}
}
class MongoEntry(val annotation: List[String], val narrative: List[String], val time: Int)
val count = 0
val mongoClient = MongoClient()
//val collection = mongoClient("caviar-whole")("examples")
val collection = mongoClient("ctm")("examples")
collection.createIndex(MongoDBObject("example" -> 1))
val data = collection.find().sort(MongoDBObject("example" -> 1)).foldLeft(Vector[MongoEntry](), 0) { (x, obj) =>
val (accum, count) = (x._1, x._2)
val annotation = obj.asInstanceOf[BasicDBObject].get("pos").asInstanceOf[BasicDBList].toList.map(x => x.toString)
val narrative = obj.asInstanceOf[BasicDBObject].get("nar").asInstanceOf[BasicDBList].toList.map(x => x.toString)
(accum :+ new MongoEntry(annotation, narrative, count), count + 1)
}
data._1.foreach(x => println(x.annotation + "\n" + x.narrative + "\n" + x.time + "\n"))
mongoClient.dropDatabase("ctm")
data._1.foreach { x =>
val entry = MongoDBObject("time" -> x.time) ++ ("annotation" -> x.annotation) ++ ("narrative" -> x.narrative)
collection.insert(entry)
}
println("Done")
}
| 2,156 | 33.238095 | 117 | scala |
OLED | OLED-master/src/main/scala/mcts/TreeNode.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts
import logic.Theory
/**
* Created by nkatz on 9/19/17.
*/
trait TreeNode {
var visits = 0
var rewards: Vector[Double] = Vector[Double]()
var children: Vector[InnerNode] = Vector[InnerNode]()
val theory: Theory = Theory()
val id: String = ""
def updateRewards(x: Double): Unit = this.rewards = this.rewards :+ x
def updateVisits(v: Int): Unit = this.visits = v
def incrementVisits(): Unit = this.visits = this.visits + 1
def meanReward(): Double = this.rewards.sum / this.visits
def addChild(x: InnerNode): Unit = this.children = this.children :+ x
def getBestChild(exploreRate: Double): InnerNode = this.children.maxBy(x => x.getMCTSScore(exploreRate))
def isLeafNode() = this.children.isEmpty
val isRootNode: Boolean = false
/* Abstract methods */
def getMCTSScore(exploreRate: Double): Double
def getDepth(): Int
def getAncestorsPath(): Vector[TreeNode]
def propagateReward(reward: Double) = {
val ancestors = getAncestorsPath()
ancestors foreach { node =>
node.updateRewards(reward)
node.incrementVisits()
}
}
}
/**
*
* The id of a node is a string of the form 2-3-12, where 2 is the iteration number,
* 3 is the "depth" of the node and 12 is a counter of the the other nodes generated at this round.
*
*/
case class RootNode(override val id: String, override val theory: Theory) extends TreeNode {
override val isRootNode = true
this.visits = 1 // increment its visits upon generation.
override def getMCTSScore(exploreRate: Double): Double = 0.0
override def getDepth(): Int = 0
override def getAncestorsPath() = Vector[TreeNode]()
def descendToBestChild(exploreRate: Double) = {
var reachedLeaf = false
var bestChild = this.getBestChild(exploreRate)
while (!reachedLeaf) {
if (!bestChild.isLeafNode()) {
bestChild = bestChild.getBestChild(exploreRate)
} else {
reachedLeaf = true
}
}
bestChild
}
}
object RootNode {
def apply() = {
new RootNode("0", Theory())
}
}
case class InnerNode(override val id: String, override val theory: Theory, parentNode: TreeNode) extends TreeNode {
override val isRootNode = false
override def getMCTSScore(exploreRate: Double) =
meanReward() + exploreRate * Math.sqrt(2 * Math.log(this.parentNode.visits) / this.visits)
override def getDepth() = {
var reachedRoot = false
var parent = this.parentNode
var depth = 1
while (!reachedRoot) {
parent match {
case _: InnerNode =>
depth = depth + 1
parent = parent.asInstanceOf[InnerNode].parentNode
case _: RootNode => reachedRoot = true
}
}
depth
}
override def getAncestorsPath() = {
var reachedRoot = false
var parent = this.parentNode
var ancestors = Vector[TreeNode]() //Vector(parent)
while (!reachedRoot) {
parent match {
case _: InnerNode =>
ancestors = ancestors :+ parent
parent = parent.asInstanceOf[InnerNode].parentNode
case _: RootNode =>
ancestors = ancestors :+ parent
reachedRoot = true
}
//ancestors = ancestors :+ parent
}
ancestors
}
/*
def propagateReward(reward: Double) = {
val ancestors = getAncestorsPath()
ancestors foreach { node =>
node.updateRewards(reward)
node.incrementVisits()
}
}
*/
}
| 4,120 | 24.128049 | 115 | scala |
OLED | OLED-master/src/main/scala/mcts/parallel/Eval.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts.parallel
import java.util.UUID
import app.runutils.Globals
import logic.Examples.Example
import logic.{Literal, Theory}
import utils.{ASP, Utils}
/**
* Created by nkatz on 9/22/17.
*/
object Eval {
def crossVal(t: Theory, data: Iterator[Example], globals: Globals) = {
while (data.hasNext) {
val e = data.next()
val s = t.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n")
evaluateTheory(t, s, e, globals)
}
}
def evaluateTheory(theory: Theory, stringTheory: String, e: Example, globals: Globals): Unit = {
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val coverageConstr = s"${globals.TPS_RULES}\n${globals.FPS_RULES}\n${globals.FNS_RULES}"
val show = globals.SHOW_TPS_ARITY_1 + globals.SHOW_FPS_ARITY_1 + globals.SHOW_FNS_ARITY_1
val ex = e.tostring
val program = ex + globals.INCLUDE_BK(globals.BK_CROSSVAL) + stringTheory + coverageConstr + show
val f = Utils.getTempFile(s"eval-theory-${UUID.randomUUID().toString}-${System.currentTimeMillis()}", ".lp")
Utils.writeLine(program, f.getCanonicalPath, "overwrite")
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
if (answerSet.nonEmpty) {
val atoms = answerSet.head.atoms
atoms.foreach { a =>
val lit = Literal.parse(a)
val inner = lit.terms.head
lit.predSymbol match {
case "tps" => theory.tps += inner.tostring
case "fps" => theory.fps += inner.tostring
case "fns" => theory.fns += inner.tostring
}
}
}
}
}
| 2,296 | 34.890625 | 112 | scala |
OLED | OLED-master/src/main/scala/mcts/parallel/ScorerMaster.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts.parallel
import akka.actor.{Actor, Props}
import app.runutils.Globals
import app.runutils.IOHandling.InputSource
import logic.Examples.Example
import logic.Theory
/**
* Created by nkatz on 9/22/17.
*/
class ScorerMaster[T <: InputSource](
globals: Globals,
options: T,
dataFunction: T => Iterator[Example]) extends Actor {
private var theoriesCount = 0
private var scoredTheories = List[Theory]()
private def stringTheory(t: Theory) = t.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n")
private val cores = Runtime.getRuntime.availableProcessors
private val f1 = (t: Theory) => t.stats._6
def receive = {
case theories: Vector[Theory] =>
theoriesCount = theories.length
scoredTheories = List[Theory]()
val jobsPerCore = math.ceil(cores.toDouble / theories.length).toInt
var i = 0
//theories.map(x => stringTheory(x)).grouped(jobsPerCore) foreach { jobs =>
theories.grouped(jobsPerCore) foreach { jobs =>
jobs foreach { theory =>
context.actorOf(Props(new ScorerWorker(globals, options, dataFunction)), name = s"scorer-slave-$i") ! theory
i += 1
}
}
case theory: Theory =>
scoredTheories = scoredTheories :+ theory
theoriesCount -= 1
if (theoriesCount == 0) {
//val bestTheory = scoredTheories.maxBy( x => f1(x) )
val bestTheory = scoredTheories.sortBy(x => -f1(x)).head
println(s"${bestTheory.tostring} ${f1(bestTheory)}")
}
}
}
| 2,233 | 29.60274 | 118 | scala |
OLED | OLED-master/src/main/scala/mcts/parallel/ScorerWorker.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts.parallel
import akka.actor.{Actor, PoisonPill}
import app.runutils.Globals
import app.runutils.IOHandling.InputSource
import com.typesafe.scalalogging.LazyLogging
import logic.Examples.Example
import logic.Theory
/**
* Created by nkatz on 9/22/17.
*/
class ScorerWorker[T <: InputSource](globals: Globals, options: T,
dataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
def receive = {
case theory: Theory =>
logger.info(s"Scoring\n${theory.tostring}\n")
Eval.crossVal(theory, dataFunction(options), globals)
sender ! theory
//self ! PoisonPill
}
}
| 1,320 | 29.72093 | 74 | scala |
OLED | OLED-master/src/main/scala/mcts/parallel/Tests.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package mcts.parallel
import akka.actor.{ActorSystem, Props}
import app.runners.MLNDataHandler.MLNDataOptions
import app.runutils.Globals
import mcts.HillClimbing.{constructBottomTheory, generateChildrenNodes, getData}
import mcts.RootNode
/**
* Created by nkatz on 9/22/17.
*/
object Tests extends App {
Globals.glvalues("perfect-fit") = "false"
val foldPath = "/home/nkatz/dev/CAVIAR_MLN/CAVIAR_MLN/move/fold_2"
val chunkSize = 50
val opts = new MLNDataOptions(foldPath, chunkSize)
val globals = new Globals("/home/nkatz/dev/BKExamples/BK-various-taks/CaviarMLN/move")
val bottomTheory = constructBottomTheory(getData(opts), globals)
val rootNode = RootNode()
val newTheories = generateChildrenNodes(rootNode.theory, bottomTheory, getData(opts), globals)
val system = ActorSystem("ActorSystem")
val scorer = system.actorOf(Props(new ScorerMaster(globals, opts, getData)))
scorer ! newTheories
}
| 1,634 | 28.727273 | 96 | scala |
OLED | OLED-master/src/main/scala/metric/AtomMetric.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package metric
import lomrf.logic._
/**
* A atomic metric is a distance for atomic formulas that measures the
* structural distance of atoms by ignoring the variables.
*
* @param matcher a matcher function
*/
case class AtomMetric(matcher: Matcher) extends StructureMetric[AtomicFormula] {
/**
* Distance for atoms. The function must obey to the following properties:
*
* {{{
* 1. d(x, y) >= 0 for all x, y and d(x, y) = 0 if and only if x = y
* 2. d(x, y) = d(y, x) for all x, y
* 3. d(x, y) + d(y, z) >= d(x, z) for all x, y, z (triangle inequality)
* }}}
*
* @param xAtom an atom
* @param yAtom another atom
* @return a distance for the given atoms
*/
override def distance(xAtom: AtomicFormula, yAtom: AtomicFormula): Double =
if (xAtom.signature != yAtom.signature) 1
else if (xAtom.constants.isEmpty) 0 // in case no constants exist, distance should be zero
else termSeqDistance(xAtom.terms, yAtom.terms)
/**
* Distance for term sequences.
*
* @param termSeqA a term sequence
* @param termSeqB another term sequence
* @return a distance in the interval [0, 1] for the given term sequences
*/
@inline private def termSeqDistance(termSeqA: IndexedSeq[Term], termSeqB: IndexedSeq[Term]): Double =
(termSeqA zip termSeqB).map { case (a, b) => termDistance(a, b) }.sum / (2d * termSeqA.count(!_.isVariable))
/**
* Distance for individual terms.
*
* @note If the given term is a term function, then the distance for their
* corresponding term functions are measured.
*
* @param xTerm a term
* @param yTerm another term
* @return a distance in the interval [0, 1] for the given terms.
*/
@inline private def termDistance(xTerm: Term, yTerm: Term): Double = (xTerm, yTerm) match {
case (x: Constant, y: Constant) if x.symbol == y.symbol => 0
case (_: Variable, _: Variable) => 0
case (x: TermFunction, y: TermFunction) if x.signature == y.signature => termSeqDistance(x.terms, y.terms)
case _ => 1
}
}
| 2,771 | 35.96 | 112 | scala |
OLED | OLED-master/src/main/scala/metric/Matcher.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package metric
import breeze.optimize.linear.KuhnMunkres
/**
* Matcher is any object that solves an assignment problem. The problem consists of finding
* a maximum cost matching (or a minimum cost perfect matching) in a bipartite graph. The input
* graph is usually represented as a cost matrix. Zero values define the absence of edges.
*
* === General Formulation ===
*
* Each problem instance has a number of agents and a number of tasks. Any agent can be assigned to
* any task, incurring a cost that may vary depending on the agent-task assignment. It is required
* that all tasks are assigned to exactly one agent in such a way that the total cost is minimized.
* In case the numbers of agents and tasks are equal and the total cost of the assignment for all tasks
* is equal to the sum of the costs for each agent then the problem is called the linear assignment problem.
*
* @see https://en.wikipedia.org/wiki/Assignment_problem
*/
trait Matcher extends (Seq[Seq[Double]] => (Array[Int], Double))
/**
* The Hungarian matcher is a combinatorial optimization algorithm that solves the assignment problem in
* polynomial time O(n^3).
*
* @see https://en.wikipedia.org/wiki/Hungarian_algorithm
*/
object HungarianMatcher extends Matcher {
/**
* It solves the assignment problem for the given cost matrix. The cost
* matrix represents the costs for each edge in the graph.
*
* @param costMatrix the bipartite graph cost matrix
* @return the cost of the optimal assignment
*/
override def apply(costMatrix: Seq[Seq[Double]]): (Array[Int], Double) = {
val unmatched = math.abs(costMatrix.length - costMatrix.head.length)
val maxDimension = math.max(costMatrix.length, costMatrix.head.length)
KuhnMunkres.extractMatching(costMatrix) match {
case (matches, cost) => matches.toArray -> (cost + unmatched) / maxDimension
}
}
}
/**
* The Hausdorff matcher is based on the Hausdorff distance. The Hausdorff distance is the longest distance
* you can be forced to travel by an adversary that chooses a point in one set, from where you then must travel
* to the other set. In other words, it is the greatest of all the distances from a point in one set to the
* closest point in another set.
*
* @note The Hausdorff matcher can be used for solving the assignment problem, but the solution is not
* guaranteed to be the optimal one. Moreover, the matching is not guaranteed to be one to one.
* @see https://en.wikipedia.org/wiki/Hausdorff_distance
* Distance Between Herbrand Interpretations: A Measure for Approximations
* to a Target Concept (1997)
*/
object HausdorffMatcher extends Matcher {
/**
* It solves the assignment problem for a given cost matrix. The cost
* matrix represents the costs for each edge in the graph.
*
* @param costMatrix the bipartite graph cost matrix
* @return the cost of the assignment
*/
override def apply(costMatrix: Seq[Seq[Double]]): (Array[Int], Double) =
Array.empty[Int] -> math.max(costMatrix.map(_.min).max, costMatrix.transpose.map(_.min).max)
}
| 3,847 | 42.727273 | 112 | scala |
OLED | OLED-master/src/main/scala/metric/Metric.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package metric
import lomrf.logic.AtomicFormula
import lomrf.mln.model.Evidence
/**
* A metric for atomic formulas is defined by a distance function over atoms
* and a distance function over sequences of atoms.
*/
trait Metric[A <: AtomicFormula] {
/**
* @return the absolute normalized distance
*/
protected def distance(x: Double, y: Double): Double =
if (x == 0 && y == 0) 0
else math.abs(x - y) / (x + y)
/**
* Distance for atoms. The function may obey to the following properties:
*
* {{{
* 1. d(x, y) >= 0 for all x, y and d(x, y) = 0 if and only if x = y
* 2. d(x, y) = d(y, x) for all x, y
* 3. d(x, y) + d(y, z) >= d(x, z) for all x, y, z (triangle inequality)
* }}}
*
* @see [[lomrf.logic.AtomicFormula]]
* @param xAtom an atom
* @param yAtom another atom
* @return a distance for the given atoms
*/
def distance(xAtom: A, yAtom: A): Double
/**
* Distance over sequences of atoms.
*
* @param xAtomSeq a sequence of atoms
* @param yAtomSeq another sequence of atoms
* @return a distance for the given sequences of atoms
*/
def distance(xAtomSeq: IndexedSeq[A], yAtomSeq: IndexedSeq[A]): Double
/**
* Append evidence information to the metric.
*
* @note It should be extended by metrics that can
* exploit evidence information (data driven).
*
* @param evidence an evidence database
* @return an updated metric
*/
def ++(evidence: Evidence): Metric[A] = this
/**
* Append information from atom sequences to the metric.
*
* @note It should be extended by metrics that can
* exploit atom sequences (data driven).
*
* @param atomSeqSeq a sequence of atom sequences.
* @return an updated metric
*/
def ++(atomSeqSeq: Seq[Seq[AtomicFormula]]): Metric[A] = this
}
/**
* A structure metric for atomic formulas is defined by a distance function over
* atoms and a distance function over sequences of atoms by specifying a matcher.
*
* @note It should be extended by metrics that compare the structure of
* the given atomic formulas.
*
* @tparam A the type of atomic formula
*/
trait StructureMetric[A <: AtomicFormula] extends Metric[A] {
// Matcher used for finding a mapping between atoms sequences
val matcher: Matcher
/**
* Distance over sequences of atoms.
*
* @param xAtomSeq a sequence of atoms
* @param yAtomSeq another sequence of atoms
* @return a distance for the given sequences of atoms
*/
final def distance(xAtomSeq: IndexedSeq[A], yAtomSeq: IndexedSeq[A]): Double = {
// Swap atom sequences
val (longAtomSeq, shortAtomSeq) =
if (xAtomSeq.length >= yAtomSeq.length) (xAtomSeq, yAtomSeq)
else (yAtomSeq, xAtomSeq)
// Compute the distance matrix for each pair of atoms
val distanceMatrix = longAtomSeq.map(x => shortAtomSeq.map(distance(x, _)))
// Compute a matching and a total cost
val (_, unweightedDistance) = matcher(distanceMatrix)
unweightedDistance
}
}
| 3,774 | 30.198347 | 82 | scala |
OLED | OLED-master/src/main/scala/metric/Test.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package metric
import lomrf.logic.{AtomicFormula, Constant, Variable}
/**
* Created by nkatz on 18/12/19.
*/
object Test extends App {
val metric = AtomMetric(HungarianMatcher)
val d = metric.distance(
IndexedSeq(AtomicFormula("A", Vector(Variable("x"), Constant("R")))),
IndexedSeq(AtomicFormula("B", Vector(Variable("y"), Constant("E"))))
)
println(d)
}
| 1,077 | 28.944444 | 73 | scala |
OLED | OLED-master/src/main/scala/oled/distributed/Dispatcher.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import akka.actor.{Actor, Props}
import com.typesafe.scalalogging.LazyLogging
import logic.{Clause, LogicUtils, Theory}
import Structures.{FinalTheoryMessage, Initiated, Terminated}
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import oled.functions.DistributedOLEDFunctions.crossVal
/**
* Created by nkatz on 3/14/17.
*
*
* This actor starts two top-level actors to coordinate learning
* the initiated and the terminated part of the theory respectively.
*
*/
class Dispatcher[T <: InputSource](
val dataOptions: List[(T, T => Iterator[Example])],
val inputParams: RunningOptions,
val tasksNumber: Int,
testingDataOptions: T,
testingDataFunction: T => Iterator[Example]) extends Actor with LazyLogging {
private var counter = tasksNumber
private var initTheory = List[Clause]()
private var termTheory = List[Clause]()
private var initTrainingTime = ""
private var termTrainingTime = ""
private var theory = List[Clause]() // this is for future use with single-predicate learning
private var theoryTrainingTime = ""
private var initTotalMsgNum = 0
private var initTotalMsgSize = 0L
private var termTotalMsgNum = 0
private var termTotalMsgSize = 0L
def updateMessages(m: FinalTheoryMessage, what: String) = {
what match {
case "init" =>
initTotalMsgNum = m.totalMsgNum
initTotalMsgSize = m.totalMsgSize
case "term" =>
termTotalMsgNum = m.totalMsgNum
termTotalMsgSize = m.totalMsgSize
case _ => logger.info("UNKNOWN MESSAGE!")
}
}
def receive = {
case "go" =>
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Initiated)), name = "InitTopLevelActor") ! "go"
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Terminated)), name = "TermTopLevelActor") ! "go"
/*---------------------------------------------------------------------------*/
// For debugging (trying to see if the sub-linear speed-up is due to blocking)
/*---------------------------------------------------------------------------*/
case "go-no-communication" =>
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Initiated)), name = "InitTopLevelActor") ! "go-no-communication"
context.actorOf(Props(new TopLevelActor(dataOptions, inputParams, new Terminated)), name = "TermTopLevelActor") ! "go-no-communication"
case msg: FinalTheoryMessage =>
msg.targetPredicate match {
case x: Initiated =>
this.initTheory = msg.theory
this.initTrainingTime = msg.trainingTime
updateMessages(msg, "init")
case x: Terminated =>
this.termTheory = msg.theory
this.termTrainingTime = msg.trainingTime
updateMessages(msg, "term")
case _ =>
this.theory = msg.theory
this.theoryTrainingTime = msg.trainingTime
//updateMessages(msg)
}
counter -= 1
if (counter == 0) {
logger.info(s"\n\nThe initiated part of the theory is\n${Theory(this.initTheory).showWithStats}\nTraining" +
s" time: $initTrainingTime\nTotal messages: $initTotalMsgNum\nTotal message size: $initTotalMsgSize")
logger.info(s"\n\nThe terminated part of the theory is\n${Theory(this.termTheory).showWithStats}\nTraining" +
s" time: $termTrainingTime\nTotal messages: $termTotalMsgNum\nTotal message size: $termTotalMsgSize")
/*
* Cross-validation...
* */
val merged_ = Theory(this.initTheory ++ this.termTheory)
val compressed = Theory(LogicUtils.compressTheory(merged_.clauses))
/*------------------*/
// DEBUGGING-TESTING
/*------------------*/
//val filtered = Theory(compressed.clauses.filter(x => x.tps > 50))
val filtered = compressed
val data = testingDataFunction(testingDataOptions)
val (tps, fps, fns, precision, recall, fscore) =
crossVal(filtered, data = data, handCraftedTheoryFile = inputParams.evalth, globals = inputParams.globals, inps = inputParams)
val time = Math.max(this.initTrainingTime.toDouble, this.termTrainingTime.toDouble)
val theorySize = filtered.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: $fns\nprecision:" +
s" $precision\nrecall: $recall\nf-score: $fscore\ntraining time: " +
s"$time\ntheory size: $theorySize\n" +
s"Total number of messages: ${initTotalMsgNum + termTotalMsgNum}\n" +
s"Total message size: ${initTotalMsgSize + termTotalMsgSize}")
logger.info(s"\nDone. Theory found:\n ${filtered.showWithStats}")
logger.info(s"Mean time per batch: ${Globals.timeDebug.sum / Globals.timeDebug.length}")
logger.info(s"Total batch time: ${Globals.timeDebug.sum}")
context.system.terminate()
}
}
def showTheory(t: Theory) = {
val showClause = (c: Clause) => {
s"score (${if (c.head.functor == "initiatedAt") "precision" else "recall"}):" +
s"${c.score}, tps: ${c.tps}, fps: ${c.fps}, fns: ${c.fns} Evaluated on: ${c.seenExmplsNum} examples\n$$c.tostring}"
}
t.clauses.map(x => showClause(x)).mkString("\n")
}
/*
def crossVal() = {
val merged_ = Theory(this.initTheory ++ this.termTheory)
val compressed = Theory(LogicUtils.compressTheory(merged_.clauses))
/*------------------*/
// DEBUGGING-TESTING
/*------------------*/
//val filtered = Theory(compressed.clauses.filter(x => x.tps > 50))
val filtered = compressed
val crossValJep = new Jep()
val data = testingDataFunction(testingDataOptions)
val (tps,fps,fns,precision,recall,fscore) = crossVal(filtered, crossValJep, data = data, handCraftedTheoryFile = inps.evalth, globals = inps.globals, inps = inps)
val time = Math.max(this.initTrainingTime.toDouble, this.termTrainingTime.toDouble)
val theorySize = filtered.clauses.foldLeft(0)((x,y) => x + y.body.length + 1)
logger.info(s"\ntps: $tps\nfps: $fps\nfns: $fns\nprecision:" +
s" $precision\nrecall: $recall\nf-score: $fscore\ntraining time: " +
s"$time\ntheory size: $theorySize\n" +
s"Total number of messages: ${initTotalMsgNum+termTotalMsgNum}\n" +
s"Total message size: ${initTotalMsgSize+termTotalMsgSize}")
logger.info(s"\nDone. Theory found:\n ${filtered.showWithStats}")
logger.info(s"Mean time per batch: ${Globals.timeDebug.sum/Globals.timeDebug.length}")
logger.info(s"Total batch time: ${Globals.timeDebug.sum}")
crossValJep.close()
}
*/
}
| 7,415 | 39.747253 | 166 | scala |
OLED | OLED-master/src/main/scala/oled/distributed/Node.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import akka.actor.{Actor, PoisonPill}
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import com.madhukaraphatak.sizeof.SizeEstimator
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.distributed.Structures._
import org.slf4j.LoggerFactory
import oled.functions.DistributedOLEDFunctions._
/**
* Created by nkatz on 2/15/17.
*/
/* Represents a processing node in OLED's distributed setting. */
class Node[T <: InputSource](
val otherNodesNames: List[String],
val targetConcept: TargetConcept,
val inputParameters: RunningOptions,
val trainingDataOptions: T,
val trainingDataFunction: T => Iterator[Example]) extends Actor {
import context.become
private val initorterm = targetConcept match {
case x: Initiated => "initiatedAt"
case x: Terminated => "terminatedAt"
}
// Get the training data from the current database
def getTrainData = trainingDataFunction(trainingDataOptions)
private var data = Iterator[Example]()
private var currentTheory = List[Clause]()
// This variable stores the replies from other nodes in response to a StatsRequest from this node
private var statsReplies = List[StatsReply]()
private var statsRepliesCount = 0
// Control learning iterations over the data
private var repeatFor = inputParameters.repeatFor
/* FOR LOGGING-DEBUGGING */
def showCurrentClauseUUIDs = s"(${this.currentTheory.length}): ${this.currentTheory.map(x => x.uuid).mkString(" ")}"
/* FOR LOGGING-DEBUGGING */
def showCurrentExpansionCandidatesUUIDS =
s"(${getCurrentExpansionCandidates.length}): ${getCurrentExpansionCandidates.map(x => x.uuid).mkString(" ")}"
/* FOR LOGGING-DEBUGGING */
def showAlreadySpecialized = s"(${this.specializedSoFar.length}): ${this.specializedSoFar.mkString(" ")}"
/* LOGGING-DEBUGGING */
val showDebugMsgInLogs = false
/* LOGGING-DEBUGGING */
def showClausesDebugMsg = {
if (showDebugMsgInLogs) {
s"\nCurrent theory contains:" +
s" $showCurrentClauseUUIDs\nExpansion candidates: $showCurrentExpansionCandidatesUUIDS\nAlready specialized: $showAlreadySpecialized\n"
} else { "" }
}
def getCurrentExpansionCandidates = {
this.currentExpansionCandidates.filter(p => !this.specializedSoFar.contains(p.uuid))
}
// Monitor current state (that's just for debugging)
var state = "starting"
// for logging
def showState = s"[in ${this.state} state] "
// for logging
val NORMAL_STATE = "normal"
// for logging
val EXPANSION_NODE_WAITING_STATE = "expansion node waiting"
// for logging
val STATS_REQUEST_SENDER_WAITING_STATE = "stats request sender waiting"
// for logging
val STATS_REQUEST_RECEIVER_WAITING_STATE = "stats request receiver waiting"
// for logging
val EXPANSION_NODE_NON_PRIORITY_STATE = "expansion node non-priority"
// for logging
def logNormalState = this.state = NORMAL_STATE
def logExpansionNodeState = this.state = EXPANSION_NODE_WAITING_STATE
def logRequestSenderState = this.state = STATS_REQUEST_SENDER_WAITING_STATE
def logRequestReceiverState = this.state = STATS_REQUEST_RECEIVER_WAITING_STATE
def logNonPriorityState = this.state = EXPANSION_NODE_NON_PRIORITY_STATE
// This variable stores the uuid's of clauses that have been already specialized.
// When this node finds an expansion candidate, it only proceeds to the necessary actions
// to specialize that candidate, if its uuid is not found in this list. This is to avoid
// a situation when this node infers that a clause C must be specialized, while it has
// also received a similar request for C from another node. In that case, if C gets
// specialized in the other node, there is no point in trying to specialize it again.
private var specializedSoFar = List[String]()
private var currentExpansionCandidates = List[Clause]()
var finishedAndSentTheory = false
/*
* The logger for this class. Getting a logger this way instead of mixin-in the LazyLogging trait allows to
* name the logger after a particular class instance, which is helpful for tracing messages
* between different instances of the same class.
* */
private val slf4jLogger = LoggerFactory.getLogger(self.path.name)
def logger_info(msg: String) = this.slf4jLogger.info(s"$showState $msg $showClausesDebugMsg")
def logger_dubug(msg: String) = this.slf4jLogger.debug(s"$showState $msg $showClausesDebugMsg")
private var messages = List[Long]()
def updateMessages(m: AnyRef) = {
val size = SizeEstimator.estimate(m)
messages = messages :+ size
}
def receive = {
// Start processing data. This message is received from the top level actor
case "go" => start()
//case "go-no-communication" => runNoCommunication()
case _: ShutDown => self ! PoisonPill
}
def getNextBatch = {
if (data.isEmpty) Iterator[Example]()
else Utils.getNextBatch(data, inputParameters.processBatchBeforeMailBox)
}
def start() = {
this.repeatFor -= 1
logger_info(s"$showState Getting training data from db ${this.inputParameters.train}")
// Get the training data into a fresh iterator
this.data = getTrainData
if (this.data.isEmpty) {
slf4jLogger.error(s"DB ${inputParameters.train} is empty.")
System.exit(-1)
}
// start at normal state
become(normalState)
// and send the first batch to self
self ! getNextBatch
}
def normalState: Receive = {
/*
* This method encapsulates the normal behaviour of a Node instance. In its normal state a node can either:
*
* 1. Receive a batch of examples for processing. In this case it sends to itself the
* result of this processing in the form of a BatchProcessResult instance.
*
* 2. Receive a A BatchProcessResult instance (from itself). In this case it sends
* all newly generated clauses to other nodes and requests stats for all clauses
* that are about to be expanded.
*
* 3. Receive one or more new clauses generated at some other node. In this case it simply adds these
* clauses to its current theory
*
* 4. Receive a request for stats for some candidates for expansion. In this case the node sends
* the requested stats and goes into a waiting state, where it waits until it has a reply on
* whether the candidates were actually expanded or not.
*
* */
// for debugging, to check the actor's state
case p: Ping => logger_info(s"Pinged by ${p.senderName}")
case _: ShutDown => self ! PoisonPill
case "start-over" =>
logger_info(s"$showState Starting a new training iteration (${this.repeatFor - 1} iterations remaining.)")
start() // re-starts according to the repeatFor parameter
case chunk: Iterator[Example] =>
// Receive a small example batch for processing. This is received from self.
// If the batch is non-empty it is processed in the regular way, otherwise, if the batch is empty,
// that means that the training data have been exhausted, so (based on the repeatFor parameter)
// we either proceed to a new training iteration, or we wrap things up
// (post-pruning if onlinePruning is not on etc).
logNormalState
if (chunk.isEmpty) {
logger_info(s"$showState Finished the data")
if (this.repeatFor > 0) {
self ! "start-over"
} else if (this.repeatFor == 0) {
this.finishedAndSentTheory = true
logger_info(s"$showState Sending the theory to the top-level actor")
context.parent ! new NodeDoneMessage(self.path.name)
} else {
throw new RuntimeException("This should never have happened (repeatfor is now negative?)")
}
} else {
self ! processBatch(chunk, this.slf4jLogger)
}
case result: BatchProcessResult =>
logNormalState
// Receive the result of processing a data batch.
// If new clauses are generated, send them to other nodes
if (result.newClauses.nonEmpty) {
this.currentTheory = this.currentTheory ++ result.newClauses
val copies = result.newClauses.map(x => Utils.copyClause(x))
logger_info(s"Generated new clauses, sending them over...")
Utils.getOtherActors(context, otherNodesNames) foreach { x =>
val cls = new NewClauses(copies, self.path.name)
updateMessages(cls)
x ! cls
}
}
// Handle expansion candidates
if (result.toExpandClauses.nonEmpty) {
this.currentExpansionCandidates = result.toExpandClauses.filter(p => !this.specializedSoFar.contains(p.uuid))
logger_info(s"Found candidates for expansion. Requesting a specialization ticket from the top-level actor")
// Switch to a waiting state
become(expansionNodeWaitingState)
// ...and request a specialization ticket from the top-level actor
val ticket = new SpecializationTicketRequest(self.path.name, otherNodesNames)
updateMessages(ticket)
context.parent ! ticket
}
// When everything is done, send a request to self to process next batch. This request will be processed in time.
// For example, if the node has entered a a waiting state, the new batch will be processed after all the logic in
// the waiting state has been executed.
if (!this.finishedAndSentTheory) self ! getNextBatch
case nc: NewClauses =>
logNormalState
// Receive one or more new clauses. Simply add them to the current
// theory for processing. This message is sent from another node
// Add the copies to the current theory
this.currentTheory = this.currentTheory ++ nc.newClauses
logger_info(s"Received new clauses from ${nc.senderName}")
/*This may be useful but needs some thinking to work*/
/*
if (!inputParameters.compressNewRules) {
this.currentTheory = this.currentTheory ++ nc.newClauses
logger_info(s"Received new clauses from ${nc.senderName}")
} else {
val bottomClauses = this.currentTheory.map(x => x.supportSet.clauses.head)
for (clause <- nc.newClauses) {
val bottom = clause.supportSet.clauses.head
if (!bottomClauses.exists(x => x.thetaSubsumes(bottom) && bottom.thetaSubsumes(x))) {
this.currentTheory = this.currentTheory :+ clause
logger_info(s"Received new clause from ${nc.senderName} (compressNewClause is on)")
} else {
logger_info(s"Received new clause from ${nc.senderName} but it was dropped (compressNewClause is on)")
}
}
}
*/
case request: StatsRequest =>
logNormalState
// When a processing node receives a StatsRequest, it sends a StatsReply to the
// the sender of the request, containing all necessary information (counts) for
// the sender to calculate updated Hoeffding bound heuristics. It then enters a
// statsRequestReceiverwaitingState where it waits for the verdict on whether each
// candidate for expansion has been expanded or not.
handleStatsRequest(request)
become(statsRequestReceiverWaitingState)
case msg: AnotherNodeIsSpecializing =>
//logNormalState
//logger.info(s"$showState Node ${msg.specializingNodeName} has found expansion candidates. Switching to a statsRequestReceiverWaitingState")
//become(statsRequestReceiverWaitingState)
/*
* This is a request for the learnt theory from the coordinating TopLevelActor
* */
case msg: TheoryRequestMessage =>
val msgNum = this.messages.length
val msgSize = this.messages.sum
sender ! new NodeTheoryMessage(Theory(this.currentTheory), msgNum, msgSize, self.path.name)
case x: SpecializationTicket => println("this shouldn't have happened")
}
def expansionNodeWaitingState: Receive = {
/*
* A node enters this state whenever it detects some candidate for expansion clauses. It then notifies the
* top-level actor and enters this state, where it waits either for a specialization ticket (meaning that
* the top-level actor has prioritized this actor to proceed with handling its candidates), or a "no-go" response,
* meaning that the top-level actor has prioritized some other actor to proceed with its own expansion candidates.
* Note that in the latter case, that "other actor" has also detected some expansion candidates simultaneously, or
* in very close temporal proximity with this actor. The top-level actor received both requests almost simultaneously,
* and it randomly prioritized one of the requesting actors to proceed. In detail, the behaviour of a node while in this
* state is determined by the types of message it receives and the way its handles these messages:
*
* 1. Receive a SpecializationTicket from the top-level actor. In this case this node has been prioritized by the top-level actor
* to proceed with handling its expansion candidates. Then this node sends out a StatsRequest to the other nodes requesting counts for
* its candidates. It then switches to a statsRequestSenderWaitingState, where it waits for the replies from the other nodes.
*
* 2. Receive an AnotherNodeIsSpecializing message from the top-level actor. This is just a "letting-you-kow" message that another node
* has been prioritized over this one to send out its stats request. In that case, this node simply switches to a expansionNodeNonPriorityState
* where it can handle the stats request from the prioritized node.
* to a normal state, as it would have done
* if it were in a statsRequestReceiverWaitingState. Instead, it remains in this state and wait for its own specialization ticket.
* */
// for debugging, to check the actor's state
case p: Ping => logger_info(s"Pinged by ${p.senderName}")
case _: ShutDown => self ! PoisonPill
case go: SpecializationTicket =>
logExpansionNodeState
val candidates = getCurrentExpansionCandidates
if (candidates.nonEmpty) {
logger_info(s"Just got a specialization ticket.")
// send a StatsRequest msg to all other nodes
val otherNodes = Utils.getOtherActors(context, otherNodesNames)
val x = new StatsRequest(candidates map (_.uuid), self.path.name)
println(SizeEstimator.estimate(x))
otherNodes foreach { x =>
val request = new StatsRequest(candidates map (_.uuid), self.path.name)
updateMessages(request)
x ! request
}
/*
otherNodesNames foreach { name =>
val actor = Utils.getActorByName(context, name)
actor ! new StatsRequest(candidates map (_.uuid), self.path.name)
println(s"${self.path.name} sent the stats request to $name")
}
*/
// Then enter a waiting state. Wait until all replies from other actors are collected.
this.statsRepliesCount = otherNodes.length // clear the reply counter
this.statsReplies = List[StatsReply]() // clear the reply storage
become(statsRequestSenderWaitingState)
} else {
//logger_info(s"Got a specialization ticket, but all candidates have already been specialized.")
this.currentExpansionCandidates = Nil
context.parent ! new ExpansionAbortMsg(self.path.name)
become(normalState)
}
case nogo: AnotherNodeIsSpecializing =>
logExpansionNodeState
logger_info(s"Postponing expansion, queued by node ${nogo.specializingNodeName}.")
become(expansionNodeNonPriorityState)
/*
case request: StatsRequest =>
logExpansionNodeState
become(expansionNodeNonPriorityState)
logger_info("THIS SHOULD NEVER HAVE HAPPENED")
handleStatsRequest(request)
*/
case request: StatsRequest =>
logExpansionNodeState
//become(expansionNodeNonPriorityState)
//logger_info(s"RECEIVED A StatsRequest FROM ${request.senderName}, SWITCHING TO NON-PRIORITY STATE & FW THE REQUEST THERE")
become(expansionNodeNonPriorityState)
self ! request
//handleStatsRequest(request)
}
def expansionNodeNonPriorityState: Receive = {
/*
* A node enters this state when it is in a expansionNodeWaitingState and it receives an AnotherNodeIsSpecializing
* message from the top-level actor. The latter is just a "letting-you-kow" message that another node has been
* prioritized over this one to send out its stats request. In this case, this node serves the prioritized node by
* providing replies to the received stats request and it waits for the verdicts on the prioritized node's candidates
* (expansion/no expansion). A node's behaviour while in this state is as follows:
*
* 1. Receive a StatsRequest. This is sent out from the prioritized node. This node treats the request in the regular way,
* responding with a StatsReply to the sender of the StatsRequest. It does not switch to statsRequestReceiverWaitingState.
* It remains in this state, since all functionality of the statsRequestReceiverWaitingState (receiving an ExpansionReply
* from the sender of the StatsRequest, and replacing any specialized clauses in this.currentHypothesis) is available
* in the current state too. Remaining in the current state allows this node to serve the node currently prioritized for
* expansion, while waiting for its turn to get its own SpecializationTicket from the top-level actor.
*
* 2. Receive an ExpansionReply from the node currently prioritized for expansion, containing a verdict on its candidates
* (expansion/no expansion). When this node receives an ExpansionReply it handles it normally by replacing the specialized
* clauses in its current theory (see the handleExpansionReply method for details). After that, this node switches
* back to a expansionNodeWaitingState, where it will either get its own turn to proceed with its candidates, or it will
* get another AnotherNodeIsSpecializing, leading the node back to this state. Eventually, its Specialization ticket will
* be processed, leading the node back to normalState.
*
* */
// for debugging, to check the actor's state
case p: Ping => logger_info(s"Pinged by ${p.senderName}")
case _: ShutDown => self ! PoisonPill
case request: StatsRequest =>
logNonPriorityState
// Handle the stats request in the regular way and send back the requested counts.
handleStatsRequest(request)
case msg: AnotherNodeIsSpecializing =>
logNonPriorityState
// do nothing here, just process the next message from the mailbox
logger_info(s"Got an AnotherNodeIsSpecializing message. Node ${msg.specializingNodeName} is prioritized for clause expansion")
case reply: ExpansionReply =>
logNonPriorityState
// Handle the expansion reply in the regular way
handleExpansionReply(reply)
become(expansionNodeWaitingState)
case _: PrioritizedNodeAbortedExpansion => become(expansionNodeWaitingState)
}
def statsRequestSenderWaitingState: Receive = {
/*
* This method encapsulates the behaviour of a processing node while it is waiting for replies on
* a stats request it has sent out to other actors. While in this state, the node can only process
* a StatsReply type of message. The actor waits in this state until all replies from all nodes
* are collected. Then, for each clause in the candidates list (the list of expansion candidates that
* are input to this method) the node re-calculates expansion hueristics with the accumulated counts
* from all nodes for this clause. Finally, it sends an ExpansionReply to all other nodes (which in the
* meantime have been waiting for these expansion replies in statsRequestReceiverwaitingState).
* The expansion reply contains a list of intact rules and a list of expanded rules. Each receiver
* of the expansion reply updates its current theory accordingly.
* */
// ---------------------------------------------------------------------------------------------------------------
// This is just for debugging. Receiving a stats request while in a statsRequestSenderWaitingState
// means that another node is also in the same state, which is a deadlock, since both actors (this and the other)
// are waiting for StatsReplies, which will never arrive...
// ---------------------------------------------------------------------------------------------------------------
// for debugging, to check the actor's state
case p: Ping => logger_info(s"Pinged by ${p.senderName}")
case _: ShutDown => self ! PoisonPill
case x: StatsRequest =>
logRequestSenderState
throw new RuntimeException("XXXXXXXXXX: received a stats" + " request while in a statsRequestSenderWaitingState. Something's really wrong!")
case reply: StatsReply =>
logRequestSenderState
logger_info(s"Received a StatsReply from node ${reply.sender}: $reply")
this.statsRepliesCount -= 1
this.statsReplies = this.statsReplies :+ reply
if (this.statsRepliesCount == 0) {
// all replies have been received from all nodes.
// Time to decide which candidates will eventually be expanded.
val (delta, ties, minseen) = (inputParameters.delta, inputParameters.breakTiesThreshold, inputParameters.minSeenExmpls)
val checked = getCurrentExpansionCandidates map (clause => Utils.expand_?(clause, this.statsReplies, delta, ties, minseen, showState, self.path.name, inputParameters, slf4jLogger))
val (expanded, notExpanded) = checked.foldLeft(List[Clause](), List[Clause]()){ (x, y) =>
val (expandAccum, notExpandAccum) = (x._1, x._2)
val (expandedFlag, clause) = (y._1, y._2)
if (expandedFlag) (expandAccum :+ clause, notExpandAccum) else (expandAccum, notExpandAccum :+ clause)
}
// replace the expanded in the current theory
if (expanded.nonEmpty) {
expanded.foreach { expanded =>
val theoryWithout = this.currentTheory.filter(p => p.uuid != expanded.parentClause.uuid)
if (this.currentTheory.length == theoryWithout.length) {
// then the parent clause of the expanded one is not found in current theory, which is an error...
//throw new RuntimeException(s"$showState Cannot find parent clause in current theory")
}
val theoryWith = theoryWithout :+ expanded
// Remember the uuid's of expanded clauses
this.specializedSoFar = this.specializedSoFar :+ expanded.parentClause.uuid
this.currentTheory = theoryWith
}
}
// Clear the currentExpansionCandidates variable
this.currentExpansionCandidates = Nil
// finally, send the reply the all other actors (which are currently in a statsReceiverwaitingState)
val otherNodes = Utils.getOtherActors(context, otherNodesNames)
otherNodes foreach { x =>
val reply = new ExpansionReply(notExpanded, expanded.map(p => Utils.copyClause(p)), self.path.name)
updateMessages(reply)
x ! reply
}
// notify the top-level actor
context.parent ! new ExpansionFinished(notExpanded.map(_.uuid), expanded.map(_.parentClause.uuid), self.path.name, otherNodesNames)
// ...and switch back to normal state to continue processing.
// Before switching, send to self a request to process a new batch. This request will be processed
// once all messages the have been accumulated in the mailbox, while the node was in the
// statsRequestSenderWaitingState, have been processed (so the actor continues processing).
if (!this.finishedAndSentTheory) self ! getNextBatch
become(normalState)
}
}
def statsRequestReceiverWaitingState: Receive = {
// for debugging, to check the actor's state
case p: Ping => logger_info(s"Pinged by ${p.senderName}")
case _: ShutDown => self ! PoisonPill
case reply: ExpansionReply =>
logRequestReceiverState
handleExpansionReply(reply)
// when the replacement of expanded clauses finishes, switch back to normal state.
// Also send a request to process a new batch (once in normal state and the mailbox is empty).
become(normalState)
if (!this.finishedAndSentTheory) self ! getNextBatch
}
/* Handle a stats request */
def handleStatsRequest(request: StatsRequest) = {
logger_info(s"Received a StatsRequest from node ${request.senderName}")
val statsObject =
(for (uuid <- request.candidatesIds) yield /*
this.currentTheory.find(c => c.uuid == uuid).getOrElse(
throw new RuntimeException(s"$showState Could not find expansion candidate with uuid $uuid in the current theory"))
*/ this.currentTheory.find(c => c.uuid == uuid).getOrElse(Clause())
).map(a => a.uuid -> Stats(ClauseStats(a.tps, a.fps, a.fns, a.seenExmplsNum),
a.refinements.map(r => r.uuid -> ClauseStats(r.tps, r.fps, r.fns, r.seenExmplsNum)).toMap)).toMap
val reply = new StatsReply(statsObject, self.path.name)
updateMessages(reply)
Utils.getActorByName(context, request.senderName) ! reply
}
/* Handle an expansion reply */
def handleExpansionReply(reply: ExpansionReply) = {
logger_info(s"Received an ExpansionReply from node ${reply.senderName}")
// We don't do anything for intact clauses. But we need to replace the expanded clauses in the
// current theory.
if (reply.expandedClauses.nonEmpty) {
reply.expandedClauses.foreach { expanded =>
val theoryWithout = this.currentTheory.filter(p => p.uuid != expanded.parentClause.uuid)
if (this.currentTheory.length == theoryWithout.length) {
// then the parent clause of the expanded one is not found in current theory, which is an error...
//throw new RuntimeException(s"$showState Cannot find parent clause in current theory")
}
val theoryWith = theoryWithout :+ expanded
// Remember the uuid's of expanded clauses
this.specializedSoFar = this.specializedSoFar :+ expanded.parentClause.uuid
this.currentTheory = theoryWith
}
}
}
/*
*
* Process a small batch of examples. This method returns two lists:
* The first contains all new rules that were created from the input
* batch, while the second list contains all rules that are about to be
* expanded.
* */
def processBatch(exmpls: Iterator[Example], logger: org.slf4j.Logger): BatchProcessResult = {
def filterTriedRules(newRules: List[Clause]) = {
val out = newRules.filter{ newRule =>
val bottomClauses = this.currentTheory.map(x => x.supportSet.clauses.head)
val bottom = newRule.supportSet.clauses.head
!bottomClauses.exists(x => x.thetaSubsumes(bottom) && bottom.thetaSubsumes(x))
}
if (out.length != newRules.length) logger.info("Dropped new clause (repeated bottom clause)")
out
}
val out = utils.Utils.time {
var newTopTheory = Theory(this.currentTheory)
val (newRules_, expansionCandidateRules_) = exmpls.foldLeft(List[Clause](), List[Clause]()) { (x, e) =>
var (newRules, expansionCandidateRules) = (x._1, x._2)
val startNew = newTopTheory.growNewRuleTest(e, initorterm, this.inputParameters.globals)
if (startNew) {
newRules = generateNewRules(newTopTheory, e, initorterm, this.inputParameters.globals, this.otherNodesNames)
// Just to be on the same side...
newRules.filter(x => x.head.functor == this.initorterm)
newTopTheory = Theory(newTopTheory.clauses ++ newRules)
}
if (newTopTheory.clauses.nonEmpty) {
newTopTheory.scoreRules(e, this.inputParameters.globals)
}
for (rule <- newTopTheory.clauses) {
val (delta, ties, seen) = (inputParameters.delta, inputParameters.breakTiesThreshold, inputParameters.minSeenExmpls)
if (shouldExpand(rule, delta, ties, seen)) {
expansionCandidateRules = expansionCandidateRules :+ rule
}
}
(newRules, expansionCandidateRules)
}
(newRules_, expansionCandidateRules_)
}
val (newRules, expansionCandidateRules, time) = (out._1._1, out._1._2, out._2)
//Globals.timeDebug = Globals.timeDebug :+ time
if (inputParameters.compressNewRules) {
new BatchProcessResult(filterTriedRules(newRules), expansionCandidateRules)
} else {
new BatchProcessResult(newRules, expansionCandidateRules)
}
}
}
| 29,551 | 46.896272 | 188 | scala |
OLED | OLED-master/src/main/scala/oled/distributed/PingActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import akka.actor.Actor
/**
* Created by nkatz on 3/15/17.
*
* This is just a debugging tool.
* It pings a set of actors to check their state
*
*/
class PingActor(learningActorsNames: List[String]) extends Actor {
var startTime = System.currentTimeMillis()
val actorRefs = Utils.getOtherActors(context, learningActorsNames)
def receive = {
case "go" =>
while (true) {
val now = System.currentTimeMillis()
if (now - startTime > 10000) {
actorRefs.foreach(_ ! "ping")
startTime = System.currentTimeMillis()
}
}
}
}
| 1,319 | 27.695652 | 72 | scala |
OLED | OLED-master/src/main/scala/oled/distributed/Structures.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import logic.{Clause, Theory}
/**
* Created by nkatz on 2/23/17.
*/
object Structures {
class NodeDoneMessage(val sender: String)
class NodeTheoryMessage(val theory: Theory, val msgNum: Int, val msgSize: Long, val sender: String)
class TheoryRequestMessage(val sender: String)
/*
* This message represents the result of processing a small batch of examples.
* It contains a (potentially empty) set of new clauses, generated from this
* batch and a (potentially empty) set of clauses that are candidates for expansion.
* Such a message is sent from a Node instance to its self.
* */
class BatchProcessResult(val newClauses: List[Clause], val toExpandClauses: List[Clause])
/*
* This class represents a message sent to all nodes
* from a node that generates one or more new clauses.
* */
class NewClauses(val newClauses: List[Clause], val senderName: String)
/*
* This class represents a message sent to all nodes from a node that has one
* of more clauses that are about to be expanded to their best-scoring
* specialization. This message serves as a request to the other nodes to send
* back (to the sender of the msg) their counts for the particular expansion candidates.
* The message contains (in addition to the sender's name for debugging), a list
* of the expansion candidates' uuid's that suffice for each receiver node to track the candidates.
* */
class StatsRequest(val candidatesIds: List[String], val senderName: String)
/*
* This class represents a message sent from a node that checks if a clause expansion is still valid,
* after it has received additional counts for that clause from all other nodes. This message is a reply
* sent to all the other nodes and it contains both the candidates that were eventually expanded and those
* that did not. The receiver actors of such a message such act accordingly (retain all the non-expanded
* clauses and replace the parents of the expanded ones with the expanded clauses in their current theories).
* */
class ExpansionReply(val intactClauses: List[Clause], val expandedClauses: List[Clause], val senderName: String)
//class ExpandedClause(parentId: String, refinement: Clause)
/*
* This class encapsulates the stats sent from all other
* nodes as a response to an expansion request. statsMap is
* a (k,v) map where k is the uuid for each clause that is candidate
* for expansion and v is a stats object for that candidate.
* So, in response to one StatsRequest from a particular
* node (requesting the stats for a set of expansion candidates),
* each other node sends one StatsReply, containing the stats for each such candidate.
* The sender field is the id (name) of the Node actor that sends the StatsReply
* */
class StatsReply(val statsMap: Map[String, Stats], val sender: String) {
// utility getter method
def getClauseStats(id: String, blocking: Boolean = true) = {
if (blocking) {
//statsMap.getOrElse(id, throw new RuntimeException(s"Clause $id not found in stats map"))
statsMap.getOrElse(id, Stats())
statsMap.getOrElse(id, Stats())
} else {
statsMap.getOrElse(id, Stats())
}
}
}
/* just a container. parentStats is an object carrying the counts for a parent clause and refinementsStats is
* a (k,v) map where k is the uuid of a parent clause's refinement and v is an object carrying the counts for
* that refinement. */
case class Stats(parentStats: ClauseStats = ClauseStats(), refinementsStats: Map[String, ClauseStats] = Map[String, ClauseStats]())
case class ClauseStats(tps: Int = 0, fps: Int = 0, fns: Int = 0, Nexmls: Int = 0)
class SpecializationTicket(val senderName: String)
class SpecializationTicketRequest(val senderName: String, val otherNodeNames: List[String])
class AnotherNodeIsSpecializing(val specializingNodeName: String)
class ExpansionFinished(val intactClausesIds: List[String], val expandedClausesIds: List[String], val senderName: String, val otherNodeNames: List[String])
class QueuedExpandingNode(val senderName: String, val otherNodesNames: List[String])
class FinalTheoryMessage(val theory: List[Clause], val trainingTime: String,
val totalMsgNum: Int, val totalMsgSize: Long,
val targetPredicate: TargetConcept)
class ExpansionAbortMsg(val abortingNodeName: String)
class PrioritizedNodeAbortedExpansion(val abortingNodeName: String)
class Ping(val senderName: String)
class ShutDown(val senderName: String)
abstract class TargetConcept
case class Initiated() extends TargetConcept { override def toString() = "initiated" }
case class Terminated() extends TargetConcept { override def toString() = "terminated" }
case class WhatEver() extends TargetConcept { override def toString() = "" }
}
| 5,582 | 43.309524 | 157 | scala |
OLED | OLED-master/src/main/scala/oled/distributed/TopLevelActor.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import akka.actor._
import app.runutils.IOHandling.InputSource
import app.runutils.RunningOptions
import com.madhukaraphatak.sizeof.SizeEstimator
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.distributed.Structures._
import org.slf4j.LoggerFactory
/**
* Created by nkatz on 2/15/17.
*/
class TopLevelActor[T <: InputSource](
val dataOptions: List[(T, T => Iterator[Example])],
val inputParams: RunningOptions,
val targetConcept: TargetConcept) extends Actor {
import context._
var actorsPoolSize = 0
var nodesCounter = 0
var startTime = 0L
var endTime = 0L
/* This function starts the learning Nodes. */
def getActorsPool() = {
val NodeActorNames = (1 to dataOptions.length).toList map (i => s"Node-$i-${targetConcept.toString}")
val nodeActorsPool = (NodeActorNames zip this.dataOptions) map { node =>
val (nodeName, nodeOptions, nodeDataDunction) = (node._1, node._2._1, node._2._2)
val otherActors = NodeActorNames.filter(_ != nodeName)
context.actorOf(Props(new Node(otherActors, targetConcept, inputParams, nodeOptions, nodeDataDunction)), name = nodeName)
}
nodeActorsPool
}
val actorsPool: List[ActorRef] = getActorsPool()
val actorNames: List[String] = actorsPool.map(x => x.path.name)
private var queuedExpandingNodes = scala.collection.mutable.Queue[QueuedExpandingNode]()
var nodeHavingTheSlot = "" // that's only for logging
def getOtherActorNames(actorName: String) = actorNames.filter(name => name != actorName)
def getOtherActorRefs(a: String) = getOtherActorNames(a) map (actorName => context.actorSelection(s"${self.path}/$actorName"))
private var finalTheories = List[Theory]() // these should all be the same
/*
* The logger for this class. Getting a logger this way instead of mixin-in the LazyLogging trait allows to
* name the logger after a particular class instance, which is helpful for tracing messages
* between different instances of the same class.
* */
private val logger = LoggerFactory.getLogger(self.path.name)
private var messages = List[Long]()
def updateMessages(m: AnyRef) = {
val size = SizeEstimator.estimate(m)
messages = messages :+ size
}
private var childrenMsgNums = List[Int]()
private var childrenMsgSizes = List[Long]()
def receive = {
case "go" =>
this.actorsPoolSize = actorsPool.length
this.nodesCounter = actorsPool.length
Thread.sleep(4000)
this.startTime = System.nanoTime()
actorsPool foreach (a => a ! "go")
case "go-no-communication" =>
this.actorsPoolSize = actorsPool.length
this.nodesCounter = actorsPool.length
Thread.sleep(4000)
this.startTime = System.nanoTime()
actorsPool foreach (a => a ! "go-no-communication")
become(replyHandler)
/*--------------------------------------------------------------------------------------*/
// For debugging
//context.actorOf(Props( new PingActor(this.actorNames) ), name = "Pinging-Actor") ! "go"
/*--------------------------------------------------------------------------------------*/
case request: SpecializationTicketRequest =>
become(requestHandler)
// re-send the message to self to be processed
self ! request
}
def replyHandler: Receive = {
// Upon receiving a reply, the flow continues by either sending a specialization
// ticket to the next enqueued node, or (if the queue is empty), by freeing the specialization slot
// and becoming a requestHandler to serve further expansion requests (see comment at the handleReply() method).
case reply: ExpansionFinished =>
handleReply()
// When an expansion request while this actor is in a replyHandler state (and therefore another node is
// specializing), the request is enqueued to be processed when the expansion slot opens
case request: SpecializationTicketRequest =>
this.queuedExpandingNodes.enqueue(new QueuedExpandingNode(request.senderName, request.otherNodeNames))
logger.info(s"Node ${request.senderName} is enqueued for expansion. The queue now is ${this.queuedExpandingNodes.map(x => x.senderName).mkString(" ")}")
context.actorSelection(s"${self.path}/${request.senderName}") ! new AnotherNodeIsSpecializing(this.nodeHavingTheSlot)
// This message is received by an enqueued expansion node that eventually received
// its specialization ticket, but all of its candidates have already been specialized
// in the meantime. The aborting node has already switched to normal state to continue processing
// so no message is required to be sent to it. The flow continues by either sending a specialization
// ticket to the next enqueued node, or (if the queue is empty), by freeing the specialization slot
// and becoming a requestHandler to serve further expansion requests (see comment at the handleReply() method).
case abort: ExpansionAbortMsg =>
logger.info(s"Node ${abort.abortingNodeName} aborted expansion (all candidates already specialized).")
val others = getOtherActorRefs(abort.abortingNodeName)
others foreach (_ ! new PrioritizedNodeAbortedExpansion(abort.abortingNodeName))
handleReply()
case msg: NodeDoneMessage =>
acceptNewDoneMsg(msg)
case msg: NodeTheoryMessage =>
acceptNewLearntTheory(msg)
}
def requestHandler: Receive = {
// When in a requestHandler state, the expansion slot (one expanding node at a time) is free,
// so simply sent a SpecializationTicket upon receiving a request when at this state.
case request: SpecializationTicketRequest =>
logger.info(s"Received a specialization ticket request from ${request.senderName}")
processNewRequest(request.senderName)
// When a node supervised by this top-level actor, wrap things up
case msg: NodeDoneMessage =>
acceptNewDoneMsg(msg)
// When all nodes supervised by this top-level actor are done, wrap things up
case msg: NodeTheoryMessage =>
acceptNewLearntTheory(msg)
}
def processNewRequest(requestingNodeName: String) = {
val others = getOtherActorRefs(requestingNodeName)
this.nodeHavingTheSlot = requestingNodeName // that's only for logging
others foreach (_ ! new AnotherNodeIsSpecializing(requestingNodeName))
become(replyHandler)
context.actorSelection(s"${self.path}/$requestingNodeName") ! new SpecializationTicket(self.path.name)
logger.info(s"Sent the ticket to $requestingNodeName")
}
def handleReply() = {
if (this.queuedExpandingNodes.nonEmpty) {
val nextInQueue = this.queuedExpandingNodes.dequeue()
logger.info(s"Sending specialization ticket to queued node ${nextInQueue.senderName}")
processNewRequest(nextInQueue.senderName)
} else {
this.nodeHavingTheSlot = "none" // that's only for logging
become(requestHandler)
}
}
def acceptNewDoneMsg(msg: NodeDoneMessage) = {
this.actorsPoolSize -= 1
logger.info(s"Node ${msg.sender} is done. ${this.actorsPoolSize} nodes remaining")
if (this.actorsPoolSize == 0) {
logger.info("All processing nodes are done")
val theoryRequest = new TheoryRequestMessage(self.path.name)
this.actorsPool foreach (a => a ! theoryRequest)
}
}
def acceptNewLearntTheory(msg: NodeTheoryMessage) = {
this.nodesCounter -= 1
logger.info(s"Node ${msg.sender} sent:\n${msg.theory.clauses.map(x => x.showWithStats + s"evaluated on ${x.seenExmplsNum} exmpls | refs: ${x.refinements.length}").mkString("\n")}")
this.finalTheories = this.finalTheories :+ msg.theory
this.childrenMsgNums = this.childrenMsgNums :+ msg.msgNum
this.childrenMsgSizes = this.childrenMsgSizes :+ msg.msgSize
if (this.nodesCounter == 0) {
this.endTime = System.nanoTime()
this.actorsPool.foreach(_ ! PoisonPill)
val totalTime = (this.endTime - this.startTime) / 1000000000.0
logger.info(s"Total training time: $totalTime sec")
val totalMsgNum = childrenMsgNums.sum + messages.length
val totalMsgSize = childrenMsgSizes.sum + messages.sum
context.parent ! new FinalTheoryMessage(getFinalTheory(), totalTime.toString, totalMsgNum, totalMsgSize, targetConcept)
}
}
/*
def getFinalTheory() = {
val uuids = this.finalTheories.head.clauses.map(_.uuid)
val withAccumScores = uuids.foldLeft(List[Clause]()) { (accumed, newUUID) =>
val copies = this.finalTheories.flatMap(theory => theory.clauses.filter(p => p.uuid == newUUID))
if (copies.length != this.finalTheories.length) throw new RuntimeException("Produced non-identical theories")
val (tps, fps, fns, exmpls) = copies.foldLeft(0, 0, 0, 0) { (x, y) =>
(x._1 + y.tps, x._2 + y.fps, x._3 + y.fns, x._4 + y.seenExmplsNum)
}
copies.head.tps = tps
copies.head.fps = fps
copies.head.fns = fns
copies.head.seenExmplsNum = exmpls
accumed :+ copies.head
}
// Poor-man's pruning. Not online, not time-consuming (and a bit cheating) offline,
// just a quick filtering to see how we're doing
val filteredTheory = withAccumScores.filter(p =>
p.seenExmplsNum > inputParams.minSeenExmpls && p.score > inputParams.postPruningThreshold)
filteredTheory
}
*/
def getFinalTheory() = {
this.finalTheories.head.clauses.foldLeft(List[Clause]()){ (accum, clause) =>
val clauseCopies = this.finalTheories.tail.flatMap(theory => theory.clauses.filter(c => c.uuid == clause.uuid))
if (clauseCopies.length + 1 != this.finalTheories.length) {
logger.info(s"\nCLAUSE\n${clause.tostring} (uuid: ${clause.uuid}) \nIS NOT FOUND IS SOME FINAL THEORY")
}
val sumCounts = clauseCopies.foldLeft(clause.tps, clause.fps, clause.fns, clause.seenExmplsNum) { (x, y) =>
(x._1 + y.tps, x._2 + y.fps, x._3 + y.fns, x._4 + y.seenExmplsNum)
}
clause.tps = sumCounts._1
clause.fps = sumCounts._2
clause.fns = sumCounts._3
clause.seenExmplsNum = sumCounts._4
if (clause.seenExmplsNum > inputParams.minEvalOn && clause.score >= inputParams.pruneThreshold) {
accum :+ clause
} else {
accum
}
}
}
/* Modify to handle data in intervals. */
/*
def getActorsPool() = {
val taskNames = (1 to this.taskIntervals.length).map(i => s"dataset-$i").toList
val NodeActorNames = taskNames map (db => s"Node-$db-${targetConcept.toString}")
val globalsPool = taskNames.map(t => new Globals(inputParams.entryPath, t))
val nodeActorsPool = (NodeActorNames, globalsPool, this.taskIntervals).zipped.toList map { x =>
val (nodeName, global, nodeIntervals) = (x._1, x._2, x._3)
val otherActors = NodeActorNames.filter(_ != nodeName)
context.actorOf(Props(
new Node(otherActors, masterDB, targetConcept, global, getDataFunction, inputParams, nodeIntervals)
), name = nodeName)
}
nodeActorsPool
}
*/
}
| 11,696 | 41.534545 | 184 | scala |
OLED | OLED-master/src/main/scala/oled/distributed/Utils.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.distributed
import java.util.UUID
import akka.actor.{ActorContext, ActorSelection}
import app.runutils.RunningOptions
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.commons.MongoDBObject
import com.mongodb.casbah.{MongoClient, MongoCollection}
import logic.Clause
import logic.Examples.Example
import oled.distributed.Structures.{ClauseStats, StatsReply}
import utils.DataUtils.DataAsIntervals
import utils.Database
import oled.functions.DistributedOLEDFunctions._
/**
* Created by nkatz on 2/15/17.
*/
object Utils {
def getCaviarData(mc: MongoClient, dbName: String, chunkSize: Int): Iterator[List[String]] = {
val collection = mc(dbName)("examples")
collection.find().map(x => Example(x)).grouped(chunkSize).map(x =>
x.foldLeft(List[String]())((z, y) => z ++ y.annotation ++ y.narrative))
}
//, dataSize: Double = Double.PositiveInfinity
/* utility function for retrieving data */
def getDataFromDB(dbName: String, HLE: String, chunkSize: Int,
intervals: DataAsIntervals = DataAsIntervals()): Iterator[Example] = {
// No worry about removing prior annotation from the examples, since in any case inertia
// is not used during learning. Even if a pair is passed where in both times
// there is positive annotation, the first positive example will be covered by
// the initalTime axiom, while the second positive will be covered by abduction (no inertia).
val mc = MongoClient()
val collection = mc(dbName)("examples")
if (intervals.isEmpty) {
//collection.createIndex(MongoDBObject("time" -> 1))
val data = collection.find().sort(MongoDBObject("time" -> 1)).map { x =>
val e = Example(x)
new Example(annot = e.annotation filter (_.contains(HLE)), nar = e.narrative, _time = e.time)
}
val dataChunked = data.grouped(chunkSize)
val dataIterator = dataChunked.map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
dataIterator
} else {
utils.CaviarUtils.getDataFromIntervals(collection, HLE, intervals.trainingSet, chunkSize)
}
}
def intervalsToDB(dbToReadFrom: String, intervals: DataAsIntervals, HLE: String,
chunkSize: Int, withChunking: Boolean = true) = {
val dbToWriteTo = s"d-oled-DB-${UUID.randomUUID()}"
val mongoClient = MongoClient()
val collectionWriteTo = mongoClient(dbToWriteTo)("examples")
val collectionReadFrom = mongoClient(dbToReadFrom)("examples")
println(s"Inserting data to $dbToWriteTo")
for (interval <- intervals.trainingSet) {
val batch = collectionReadFrom.find("time" $gte interval.startPoint $lte interval.endPoint).
sort(MongoDBObject("time" -> 1))
val examples = batch.map(x => Example(x)) //.toList
val HLExmpls = examples map { x =>
val a = x.annotation filter (_.contains(HLE))
new Example(annot = a, nar = x.narrative, _time = x.time)
}
val chunked = if (withChunking) HLExmpls.sliding(chunkSize, chunkSize - 1) else HLExmpls.sliding(HLExmpls.length)
val out = chunked map { x =>
val merged = x.foldLeft(Example()) { (z, y) =>
new Example(annot = z.annotation ++ y.annotation, nar = z.narrative ++ y.narrative, _time = x.head.time)
}
merged
}
out.foreach{ e =>
val entry = MongoDBObject("time" -> e._time.toInt) ++ ("annotation" -> e.annotation) ++ ("narrative" -> e.narrative)
collectionWriteTo.insert(entry)
}
}
dbToWriteTo
}
def getExmplIteratorSorted(collection: MongoCollection) = {
collection.find().sort(MongoDBObject("time" -> 1))
}
def getExmplIteratorShuffle(collection: MongoCollection) = {
}
// Utility function, returns a list of other Node actors
def getOtherActors(context: ActorContext, otherNodesNames: List[String]): List[ActorSelection] = {
otherNodesNames map (actorName => context.actorSelection(s"${context.parent.path}/$actorName"))
}
def getActorByName(context: ActorContext, name: String) = {
context.actorSelection(s"${context.parent.path}/$name")
}
// Utility function, returns a new small example batch for processing
def getNextBatch(data: Iterator[Example], processBatchBeforeMailBox: Int) = {
data.take(processBatchBeforeMailBox)
}
/*
* Decide if a clause will be expanded or not, after taking into account the new counts
* from all nodes. clause is the clause in question, replies is a list of StatsReply objects
* received from all nodes and the remaining parameters are for calculating the hoeffding bound.
* This method returns a (b, c) tuple, where b is true of false, according to whether the input
* clause will be expanded or not and c either the input clause (if b = false) or its best
* specialization (if b = true).
* */
def expand_?(clause: Clause, replies: List[StatsReply], delta: Double,
breakTiesThreshold: Double, minSeenExmpls: Int,
currentNodeState: String, nodeName: String, params: RunningOptions, logger: org.slf4j.Logger) = {
// A StatsReply is a reply from a node. So it should contain stats
// for any requested clause. If a clause id is not found in a reply an exception
// is thrown from r.getClauseStats
val repliesGroupedByNode = (for (r <- replies) yield (r.sender, r.getClauseStats(clause.uuid))).toMap
// update the counts per node for each node, for this clause and for each one of its refinements
repliesGroupedByNode.keys foreach { node =>
updateCountsPerNode(clause, node, repliesGroupedByNode, currentNodeState, nodeName)
}
// Re-check the clause for expansion
expandRule(clause, delta, breakTiesThreshold, minSeenExmpls, nodeName, params, logger)
}
/*
* Returns the new counts (by subtracting the old ones from the received ones)
* for clause c and for node nodeName. The output is a new stats object with the counts,
* along with nodeName (in order to update c.previousCountsPerNode). The replies map
* is a (k,v) map where k is a node id and v is a stats object sent from node k for clause c.
* */
def updateCountsPerNode(clause: Clause, nodeName: String, replies: Map[String, Structures.Stats], currentNodeState: String, currentlyOnNode: String): Unit = {
val receivedStats = replies.getOrElse(nodeName,
throw new RuntimeException(s"$currentNodeState Could not find node's name $nodeName as key in the nodes-stats map. The map is $replies")
)
val parentClauseStats = receivedStats.parentStats
val refinementsStats = receivedStats.refinementsStats
clause.countsPerNode(nodeName) = parentClauseStats // update the countsPerNode map
clause.updateTotalCounts(currentlyOnNode) // Update the accumulated counts variables
// just to be on the safe side...
if (refinementsStats.size != clause.refinements.length) {
//throw new RuntimeException(s"$currentNodeState Problem with refinements reply!")
}
clause.refinements.foreach{ ref =>
/*
val refStats = refinementsStats.getOrElse(ref.uuid,
throw new RuntimeException(s"$currentNodeState Refinement ${ref.uuid} not found in the returned stats map"))
*/
val refStats = refinementsStats.getOrElse(ref.uuid, ClauseStats())
ref.countsPerNode(nodeName) = refStats // update the refinement's countsPerNode map
ref.updateTotalCounts(currentlyOnNode) // Update the accumulated counts variables
}
}
def copyClause(c: Clause) = {
def basicopy(clause: Clause) = {
val copy_ = Clause(head = clause.head, body = clause.body, uuid = clause.uuid)
//copy_.uuid = clause.uuid
copy_.tps = clause.tps
copy_.fps = clause.fps
copy_.fns = clause.fns
copy_.seenExmplsNum = clause.seenExmplsNum
copy_.countsPerNode = clause.countsPerNode
//copy_.generatedAtNode = clause.generatedAtNode
// don't copy these, there's no need (nothing changes in the parent clause or the support set) and copying
// it makes it messy to retrieve ids in other nodes
copy_.parentClause = clause.parentClause
copy_.supportSet = clause.supportSet
copy_
}
val copy = basicopy(c)
val refinementsCopy = c.refinements.map(ref => basicopy(ref))
copy.refinements = refinementsCopy
copy
}
}
| 9,174 | 40.894977 | 160 | scala |
OLED | OLED-master/src/main/scala/oled/functions/CoreFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.functions
import java.text.DecimalFormat
import app.runutils.{Globals, RunningOptions}
import com.mongodb.casbah.MongoClient
import logic.Examples.Example
import logic._
import utils._
import utils.Implicits._
/**
* Created by nkatz on 20/6/2017.
*/
trait CoreFunctions {
/**
* This trait contains functionality used by all versions of OLED.
*
*/
def filterTriedRules(T: Theory, newRules: Iterable[Clause], logger: org.slf4j.Logger) = {
val out = newRules.filter { newRule =>
val bottomClauses = T.clauses.map(x => x.supportSet.clauses.head)
val bottom = newRule.supportSet.clauses.head
//!bottomClauses.exists(x => x.thetaSubsumes(bottom) && bottom.thetaSubsumes(x))
!bottomClauses.exists(x => x.thetaSubsumes(bottom)) // this suffices for it to be excluded.
}
if (out.size != newRules.size) logger.info("Dropped new clause (repeated bottom clause)")
out
}
/* Determines if a fluent holds initially in a mini-batch.
* This is used in the functionality that supports learning
* strongly initiated fluents. If we know that a fluent is strongly
* initiated then we don't learn new rules from mini-batches where the fluent holds initially.*/
def holdsInitially(e: Example) = {
if (e.annotation.isEmpty) {
false
} else {
val timeAll = e.narrative.map(x => Literal.parse(x).terms.last.tostring.toInt).sorted
val labelsTime = e.annotation.map(x => Literal.parse(x).terms.last.tostring.toInt).sorted
timeAll.head == labelsTime.head
}
}
def startNewRules_withInertia(e: Example) = {
if (e.annotation.isEmpty) {
false
} else {
// Try to abduce initiation & termination atoms with Inertia.
// If some abducibles are returned, construct new rules from them
}
}
/* Online pruning. */
def pruneRules(topTheory: Theory, inps: RunningOptions, logger: org.slf4j.Logger) = {
// This doesn't work
/*
val pruned = topTheory.clauses.foldLeft(List[Clause]()){ (keep, clause) =>
val epsilon = Utils.hoeffding(inps.delta, clause.seenExmplsNum)
val meanPruningScore = clause.meanScorePruning(inps.pruneThreshold)
//if (this.pruningThreshold - meanScore > epsilon && clause.seenExmplsNum > minSeenExmpls) {
if (meanPruningScore > epsilon && clause.seenExmplsNum > inps.minSeenExmpls*10) {
logger.info(s"\nPruned clause:\n${clause.tostring}\nMean score" +
s" so far: ${clause.meanScorePruning(inps.pruneThreshold)} | tps: ${clause.tps} fps: ${clause.fps}, fns: ${clause.fns}")
keep
} else {
keep :+ clause
}
}
Theory(pruned)
*/
val (keep, prune) = topTheory.clauses.foldLeft(List[Clause](), List[Clause]()) { (accum, clause) =>
if (clause.seenExmplsNum > inps.pruneAfter && clause.score < inps.pruneThreshold) {
(accum._1, accum._2 :+ clause)
} else {
(accum._1 :+ clause, accum._2)
}
}
prune.foreach(x => logger.info(s"\nPruned clause\n${x.showWithStats}"))
Theory(keep)
}
// Simply remove clauses with tps=0 (have been over-specialized, no chance to recover)
def pruneRulesNaive(topTheory: Theory, inps: RunningOptions, logger: org.slf4j.Logger) = {
val (keep, prune) = topTheory.clauses.foldLeft(List[Clause](), List[Clause]()) { (accum, clause) =>
if (clause.tps == 0) {
//val ruleSize = clause.body.length
//if (ruleSize >= 4 && clause.score <= 0.4 ) {
(accum._1, accum._2 :+ clause)
} else {
(accum._1 :+ clause, accum._2)
}
}
prune.foreach(x => logger.info(s"\nPruned clause\n${x.showWithStats}"))
Theory(keep)
}
def generateNewBottomClauses(topTheory: Theory, e: Example, initorterm: String, globals: Globals) = {
val terminatedOnly = if (initorterm == "terminatedAt") true else false
val specialBKfile = if (initorterm == "initiatedAt") globals.BK_INITIATED_ONLY else globals.BK_TERMINATED_ONLY
val (_, varKernel) =
LogicUtils.generateKernel(e.toMapASP, learningTerminatedOnly = terminatedOnly, bkFile = specialBKfile, globals = globals)
val bottomTheory = topTheory.clauses flatMap (x => x.supportSet.clauses)
val goodKernelRules = varKernel.filter(newBottomRule => !bottomTheory.exists(supportRule => newBottomRule.thetaSubsumes(supportRule)))
goodKernelRules
}
def generateNewBottomClausesNoEC(topTheory: Theory, e: Example, globals: Globals) = {
val specialBKfile = globals.BK_WHOLE
val (_, varKernel) = LogicUtils.generateKernel(e.toMapASP, bkFile = specialBKfile, globals = globals)
val bottomTheory = topTheory.clauses flatMap (x => x.supportSet.clauses)
val goodKernelRules = varKernel.filter(newBottomRule => !bottomTheory.exists(supportRule => newBottomRule.thetaSubsumes(supportRule)))
goodKernelRules
}
def getnerateNewBottomClauses_withInertia(topTheory: Theory, e: Example, targetClass: String, globals: Globals) = {
val specialBKfile = globals.ABDUCE_WITH_INERTIA
val (_, varKernel) = LogicUtils.generateKernel(e.toMapASP, bkFile = specialBKfile, globals = globals)
val filteredKernel = varKernel.filter(p => p.head.functor == targetClass)
val bottomTheory = topTheory.clauses flatMap (x => x.supportSet.clauses)
val goodKernelRules = filteredKernel.filter(newBottomRule => !bottomTheory.exists(supportRule => newBottomRule.thetaSubsumes(supportRule)))
goodKernelRules map { x =>
val c = Clause(head = x.head, body = List())
c.addToSupport(x)
c
}
}
def showInfo(c: Clause, c1: Clause, c2: Clause, hoeffding: Double, observedDiff: Double, n: Int, inps: RunningOptions) = {
def format(x: Double) = {
val defaultNumFormat = new DecimalFormat("0.############")
defaultNumFormat.format(x)
}
val showRefs = inps.showRefs
val weightLearn = inps.weightLean
if (showRefs) {
if (!weightLearn) {
s"\n===========================================================\n" +
s"\nClause (score: ${c.score} | tps: ${c.tps} fps: ${c.fps} fns: ${c.fns})\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${c1.score} | tps: ${c1.tps} fps: ${c1.fps} fns: ${c1.fns})\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => -z.score).map(x => x.tostring+" "+" | score "+x.score+" | similarity "+similarity(x)).mkString("\n")}" +
s"\nall refs: \n\n ${c.refinements.sortBy(z => (-z.score, z.body.length + 1)).map(x => x.tostring + " | score " + x.score + " (tps|fps|fns): " + (x.tps, x.fps, x.fns)).mkString("\n")}" +
s"\n===========================================================\n"
} else {
s"\n===========================================================\n" +
s"\nClause (score: ${c.score} | tps: ${c.tps} fps: ${c.fps} fns: ${c.fns} | MLN-weight: ${format(c.weight)})\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${c1.score} | tps: ${c1.tps} fps: ${c1.fps} fns: ${c1.fns} | MLN-weight: ${format(c1.weight)})\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => -z.score).map(x => x.tostring+" "+" | score "+x.score+" | similarity "+similarity(x)).mkString("\n")}" +
s"\nall refs: \n\n ${c.refinements.sortBy(z => (-z.score, z.body.length + 1)).map(x => x.tostring + " | score " + x.score + " (tps|fps|fns): " + (x.tps, x.fps, x.fns) + "| MLN-weight: " + format(x.weight)).mkString("\n")}" +
s"\n===========================================================\n"
}
} else {
s"\n===========================================================\n" +
s"\nClause (score: ${c.score} | tps: ${c.tps} fps: ${c.fps} fns: ${c.fns} | MLN-weight: ${format(c.weight)} | WM weight: ${format(c.w_pos)})\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${c1.score} | tps: ${c1.tps} fps: ${c1.fps} fns: ${c1.fns} | MLN-weight: ${format(c1.weight)} | WM weight: ${format(c1.w_pos)})\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => -z.score).map(x => x.tostring+" "+" | score "+x.score+" | similarity "+similarity(x)).mkString("\n")}" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => (-z.score,z.body.length+1)).map(x => x.tostring+" | score "+x.score+" (tps|fps|fns): "+(x.tps,x.fps,x.fns)).mkString("\n")}" +
s"\n===========================================================\n"
}
}
def crossVal(
t: Theory,
data: Iterator[Example],
handCraftedTheoryFile: String = "",
globals: Globals,
inps: RunningOptions) = {
while (data.hasNext) {
val e = data.next()
//println(e.time)
evaluateTheory(t, e, handCraftedTheoryFile, globals)
}
val stats = t.stats
(stats._1, stats._2, stats._3, stats._4, stats._5, stats._6)
}
/* Evaluate a theory on a single batch */
def eval(t: Theory, exmpl: Example, inps: RunningOptions, inputTheoryFile: String = "") = {
evaluateTheory(t, exmpl, handCraftedTheoryFile = inputTheoryFile, inps.globals)
val stats = t.stats
t.clearStats()
(stats._1, stats._2, stats._3, stats._4, stats._5, stats._6)
}
def evaluateTheory(theory: Theory, e: Example, handCraftedTheoryFile: String = "", globals: Globals): Unit = {
val varbedExmplPatterns = globals.EXAMPLE_PATTERNS_AS_STRINGS
val coverageConstr = s"${globals.TPS_RULES}\n${globals.FPS_RULES}\n${globals.FNS_RULES}"
val t =
if (theory != Theory()) {
theory.clauses.map(x => x.withTypePreds(globals).tostring).mkString("\n")
} else {
globals.INCLUDE_BK(handCraftedTheoryFile)
}
val show = globals.SHOW_TPS_ARITY_1 + globals.SHOW_FPS_ARITY_1 + globals.SHOW_FNS_ARITY_1
val ex = e.tostring
val program = ex + globals.INCLUDE_BK(globals.BK_CROSSVAL) + t + coverageConstr + show
val f = Utils.getTempFile("isConsistent", ".lp")
Utils.writeLine(program, f, "overwrite")
val answerSet = ASP.solve(task = Globals.INFERENCE, aspInputFile = f)
f.delete()
if (answerSet.nonEmpty) {
val atoms = answerSet.head.atoms
atoms.foreach { a =>
val lit = Literal.parse(a)
val inner = lit.terms.head
lit.predSymbol match {
case "tps" => theory.tps += inner.tostring
case "fps" => theory.fps += inner.tostring
case "fns" => theory.fns += inner.tostring
}
}
}
}
/*
*
* Helper method to compute the running mean for various statistics.
* Currently not used anywhere. Needs fixing, the vars should be manually updated
* with new values. In general, that's stupid, I don't know why I'm not deleting this.
* The vars where updated like this every time an example was processed:
*
* //----------------------------------------------------------------
* // Getting some runtime stats
* //val lastProcTime = res._2
* //val newMeanProcTime = getRunningMean("time",lastProcTime)
* //val newMeanGrndSize = getRunningMean("groundPrg",GlobalValues.grnd.toDouble)
* //logger.debug(s"\nExmpl ${this.exmplCounter} Mean processing time so far: $newMeanProcTime sec\nMean grnd prg size: $newMeanGrndSize")
* // Updating global vars for the next computation of running means
* //this.timeRunningMean = newMeanProcTime
* //this.groundRunningMean = newMeanGrndSize
* //this.exmplCounter += 1
* //----------------------------------------------------------------
* */
private def getRunningMean(what: String, newValue: Double) = {
// These are vars used to compute runtime statistics
// (running averages over the stream), like mean
// processing time per example and mean size of the
// ground program per example
var timeRunningMean = 0.0
var groundRunningMean = 0.0
var exmplCounter = 0
// The running average can be computed by
// ((prevAvg*n) + newValue)/(n+1)
// where n is the number of seen data points
def runningMean(prevAvg: Double, newVal: Double, n: Int) = ((prevAvg * n) + newValue) / (n + 1)
what match {
case "time" => runningMean(timeRunningMean, newValue, exmplCounter)
case "groundPrg" => runningMean(groundRunningMean, newValue, exmplCounter)
}
}
}
| 13,146 | 45.45583 | 234 | scala |
OLED | OLED-master/src/main/scala/oled/functions/DistributedOLEDFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.functions
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic.{Clause, Literal, Theory}
import oled.distributed.Structures.ClauseStats
/**
* Created by nkatz on 6/21/17.
*/
/**
*
* This object contains functionality used by the distributed version of OLED only.
*
*/
object DistributedOLEDFunctions extends CoreFunctions {
def generateNewRules(topTheory: Theory, e: Example, initorterm: String, globals: Globals, otherNodeNames: List[String]) = {
val bcs_ = generateNewBottomClauses(topTheory, e, initorterm, globals)
val bcs = bcs_.filter(p => p.head.functor.contains(initorterm))
bcs map { x =>
val c = Clause(head = x.head, body = List())
c.addToSupport(x)
otherNodeNames.foreach{ node =>
c.countsPerNode(node) = new ClauseStats(0, 0, 0, 0)
}
// In the distributed setting, refinements must be generated right after the construction of a clause,
// in order to copy them in the clause copies that will be sent to other nodes (ensure same uuid's etc.)
c.generateCandidateRefs(globals)
c
}
}
def showInfo(c: Clause, c1: Clause, c2: Clause, hoeffding: Double, observedDiff: Double, n: Int, showRefs: Boolean, onNode: String) = {
val score = (clause: Clause) => clause.distScore
if (showRefs) {
s"\n===========================================================\n" +
s"\nClause (score: ${score(c)} | ${c.showCountsPerNode(onNode)}\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${score(c1)} | ${c1.showCountsPerNode(onNode)}\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
s"\nall refs (total tp/fp/fn counts):\n\n${
c.refinements.sortBy(z => (-score(z), z.body.length + 1)).map(x => x.tostring + " | " +
"score " + score(x) + x.showCountsPerNode(onNode)).mkString("\n")
}" +
s"\n===========================================================\n"
} else {
s"\n===========================================================\n" +
s"\nClause (score: ${score(c)} | ${c.showCountsPerNode(onNode)}\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${score(c1)} | ${c1.showCountsPerNode(onNode)}\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
//s"\nall refs (total tp/fp/fn counts):\n\n${c.refinements.sortBy(z => (-score(z), z.body.length+1)).map(x => x.tostring+" | " +
// "score "+score(x)+x.showCountsPerNode(onNode)).mkString("\n")}" +
s"\n===========================================================\n"
}
}
/*
* The rest is for clause expansion in the distributed setting. This should be refactored,
* its almost the same with functions used in the monolithic setting.
* */
private def score(clause: Clause) = clause.distScore
def rightWay(parentRule: Clause, delta: Double, breakTiesThreshold: Double, minSeenExmpls: Int, minTpsRequired: Int = 0) = {
val (observedDiff, best, secondBest) = parentRule.distributedMeanDiff
val epsilon = utils.Utils.hoeffding(delta, parentRule.getTotalSeenExmpls)
val passesTest = if (epsilon < observedDiff) true else false
val tie = if (observedDiff < epsilon && epsilon < breakTiesThreshold && parentRule.getTotalSeenExmpls >= minSeenExmpls) true else false
val couldExpand = if (minTpsRequired != 0) (passesTest || tie) && best.getTotalTPs > minTpsRequired else passesTest || tie
(couldExpand, epsilon, observedDiff, best, secondBest)
}
def expandRule(parentRule: Clause, delta: Double, breakTiesThreshold: Double,
minSeenExmpls: Int, nodeName: String, params: RunningOptions, logger: org.slf4j.Logger) = {
val minTpsRequired = params.minTpsRequired
val (couldExpand, epsilon, observedDiff, best, secondBest) = rightWay(parentRule, delta, breakTiesThreshold, minSeenExmpls, minTpsRequired)
if (couldExpand) {
// This is the extra test that I added at Feedzai
val extraTest =
if (secondBest != parentRule) (score(best) > score(parentRule)) && (score(best) - score(parentRule) > epsilon)
else score(best) > score(parentRule)
if (extraTest) {
val refinedRule = best
logger.info(DistributedOLEDFunctions.showInfo(parentRule, best, secondBest, epsilon, observedDiff, parentRule.seenExmplsNum, params.showRefs, nodeName))
refinedRule.seenExmplsNum = 0 // zero the counter
refinedRule.totalSeenExmpls = 0 // zero the counter
refinedRule.supportSet = parentRule.supportSet // only one clause here
// In the distributed setting, refinements must be generated right after the construction of a clause,
// in order to copy them in the clause copies that will be sent to other nodes (ensure same uuid's etc.)
refinedRule.generateCandidateRefs(params.globals)
(true, refinedRule)
} else {
logger.info(s"Hoeffding test failed (clause ${parentRule.uuid}) not expanded")
(false, parentRule)
}
} else {
logger.info(s"Hoeffding test failed (clause ${parentRule.uuid}) not expanded")
(false, parentRule)
}
}
def shouldExpand(parentRule: Clause, delta: Double, breakTiesThreshold: Double, minSeenExmpls: Int) = {
val (couldExpand, epsilon, observedDiff, best, secondBest) = rightWay(parentRule, delta, breakTiesThreshold, minSeenExmpls)
if (couldExpand) {
val extraTest =
if (secondBest != parentRule) (score(best) > score(parentRule)) && (score(best) - score(parentRule) > epsilon)
else score(best) > score(parentRule)
if (extraTest) {
true
} else {
false
}
} else {
false
}
}
}
| 6,429 | 45.258993 | 160 | scala |
OLED | OLED-master/src/main/scala/oled/functions/NonBlockingOLEDFunctions.scala | /*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.functions
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic.{Clause, Theory}
import oled.distributed.Structures.ClauseStats
/**
* Created by nkatz on 7/10/17.
*/
object NonBlockingOLEDFunctions extends CoreFunctions {
def generateNewRules(topTheory: Theory, e: Example, initorterm: String, globals: Globals, otherNodeNames: List[String]) = {
val bcs_ = generateNewBottomClauses(topTheory, e, initorterm, globals)
val bcs = bcs_.filter(p => p.head.functor.contains(initorterm))
bcs map { x =>
val c = Clause(head = x.head, body = List())
c.addToSupport(x)
otherNodeNames.foreach{ node =>
c.countsPerNode(node) = new ClauseStats(0, 0, 0, 0)
}
// In the distributed setting, refinements must be generated right after the construction of a clause,
// in order to copy them in the clause copies that will be sent to other nodes (ensure same uuid's etc.)
c.generateCandidateRefs(globals)
c
}
}
def showInfo(c: Clause, c1: Clause, c2: Clause, hoeffding: Double, observedDiff: Double, n: Int, showRefs: Boolean) = {
val score = (clause: Clause) => clause.distScore
if (showRefs) {
s"\n===========================================================\n" +
s"\nClause (score: ${score(c)})\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${score(c1)})\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => -z.score).map(x => x.tostring+" "+" | score "+x.score+" | similarity "+similarity(x)).mkString("\n")}" +
s"\nall refs: \n\n ${c.refinements.sortBy(z => (-score(z), z.body.length + 1)).map(x => x.tostring + " | score " + score(x)).mkString("\n")}" +
s"\n===========================================================\n"
} else {
s"\n===========================================================\n" +
s"\nClause (score: ${score(c)})\n\n${c.tostring}\n\nwas refined to" +
s" (new score: ${score(c1)})\n\n${c1.tostring}\n\nε: $hoeffding, ΔG: $observedDiff, examples used: $n" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => -z.score).map(x => x.tostring+" "+" | score "+x.score+" | similarity "+similarity(x)).mkString("\n")}" +
//s"\nall refs: \n\n ${c.refinements.sortBy(z => (-z.score,z.body.length+1)).map(x => x.tostring+" | score "+x.score+" (tps|fps|fns): "+(x.tps,x.fps,x.fns)).mkString("\n")}" +
s"\n===========================================================\n"
}
}
/*
* The rest is for clause expansion in the distributed setting. This should be refactored,
* its almost the same with functions used in the monolithic setting.
* */
private def score(clause: Clause) = clause.distScore
def rightWay(parentRule: Clause, delta: Double, breakTiesThreshold: Double, minSeenExmpls: Int, minTpsRequired: Int = 0) = {
val (observedDiff, best, secondBest) = parentRule.distributedMeanDiff
val epsilon = utils.Utils.hoeffding(delta, parentRule.getTotalSeenExmpls)
val passesTest = if (epsilon < observedDiff) true else false
val tie = if (observedDiff < epsilon && epsilon < breakTiesThreshold && parentRule.getTotalSeenExmpls >= minSeenExmpls) true else false
val couldExpand = if (minTpsRequired != 0) (passesTest || tie) && best.getTotalTPs > minTpsRequired else passesTest || tie
(couldExpand, epsilon, observedDiff, best, secondBest)
}
def expandRule(parentRule: Clause, delta: Double, breakTiesThreshold: Double,
minSeenExmpls: Int, nodeName: String, params: RunningOptions,
logger: org.slf4j.Logger) = {
val minTpsRequired = params.minTpsRequired
val (couldExpand, epsilon, observedDiff, best, secondBest) = rightWay(parentRule, delta, breakTiesThreshold, minSeenExmpls, minTpsRequired)
if (couldExpand) {
// This is the extra test that I added at Feedzai
val extraTest =
if (secondBest != parentRule) (score(best) > score(parentRule)) && (score(best) - score(parentRule) > epsilon)
else score(best) > score(parentRule)
if (extraTest) {
val refinedRule = best
logger.info(NonBlockingOLEDFunctions.showInfo(parentRule, best, secondBest, epsilon, observedDiff, parentRule.seenExmplsNum, params.showRefs))
refinedRule.seenExmplsNum = 0 // zero the counter
refinedRule.totalSeenExmpls = 0 // zero the counter
refinedRule.supportSet = parentRule.supportSet // only one clause here
// In the distributed setting, refinements must be generated right after the construction of a clause,
// in order to copy them in the clause copies that will be sent to other nodes (ensure same uuid's etc.)
refinedRule.generateCandidateRefs(params.globals)
(true, refinedRule)
} else {
logger.info(s"Hoeffding test failed (clause ${parentRule.uuid}) not expanded")
(false, parentRule)
}
} else {
logger.info(s"Hoeffding test failed (clause ${parentRule.uuid}) not expanded")
(false, parentRule)
}
}
def shouldExpand(parentRule: Clause, delta: Double, breakTiesThreshold: Double, minSeenExmpls: Int) = {
val (couldExpand, epsilon, observedDiff, best, secondBest) = rightWay(parentRule, delta, breakTiesThreshold, minSeenExmpls)
if (couldExpand) {
val extraTest =
if (secondBest != parentRule) (score(best) > score(parentRule)) && (score(best) - score(parentRule) > epsilon)
else score(best) > score(parentRule)
if (extraTest) {
true
} else {
false
}
} else {
false
}
}
}
| 6,371 | 47.272727 | 183 | scala |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.