code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package basic
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
class BasicExampleSimulationSucceeds extends Simulation {
val httpProtocol = http
.baseUrl("http://google.com")
val scn = scenario("Scenario name")
.exec(
http("Get google")
.get("/")
.check(status.is(200)))
setUp(scn.inject(atOnceUsers(1)))
.protocols(httpProtocol)
.assertions(global.successfulRequests.percent.is(100))
}
| Pravoru/gatling-remote-sbt | src/sbt-test/gatling-remote-sbt/runTests/src/test/scala/basic/BasicExampleSimulationSucceeds.scala | Scala | mit | 487 |
package smtlib
package theories
import parser.Terms._
import Operations._
object Ints {
object IntSort {
def apply(): Sort = {
Sort(Identifier(SSymbol("Int")))
}
def unapply(sort: Sort): Boolean = sort match {
case Sort(Identifier(SSymbol("Int"), Seq()), Seq()) => true
case _ => false
}
}
object NumeralLit {
def apply(value: BigInt): Term = SNumeral(value)
def unapply(term: Term): Option[BigInt] = term match {
case SNumeral(value) => Some(value)
case _ => None
}
}
object Divisible {
def apply(n: BigInt, t: Term): Term =
FunctionApplication(
QualifiedIdentifier(Identifier(SSymbol("divisible"), Seq(SNumeral(n)))),
Seq(t)
)
def unapply(term: Term): Option[(BigInt, Term)] = term match {
case FunctionApplication(
QualifiedIdentifier(
Identifier(SSymbol("divisible"), Seq(SNumeral(n))),
None
), Seq(t)) => Some((n, t))
case _ => None
}
}
object Neg extends Operation1 { override val name = "-" }
object Add extends Operation2 { override val name = "+" }
object Sub extends Operation2 { override val name = "-" }
object Mul extends Operation2 { override val name = "*" }
object Div extends Operation2 { override val name = "div" }
object Mod extends Operation2 { override val name = "mod" }
object Abs extends Operation1 { override val name = "abs" }
object LessThan extends Operation2 { override val name = "<" }
object LessEquals extends Operation2 { override val name = "<=" }
object GreaterThan extends Operation2 { override val name = ">" }
object GreaterEquals extends Operation2 { override val name = ">=" }
}
| manoskouk/scala-smtlib | src/main/scala/smtlib/theories/Ints.scala | Scala | mit | 1,709 |
package models
import play.api.db._
import anorm._
import anorm.SqlParser._
import play.api.Play.current
import org.apache.commons.codec.digest.DigestUtils._
case class User(email: String, password: String)
object User {
val simple = {
get[String]("user.email") ~
get[String]("user.password") map {
case email~pass => User(email, pass)
}
}
def authenticate(email: String, password: String): Option[User] = {
findByEmail(email).filter { user => user.password == hash(password, user.email) }
}
private def hash(pass: String, salt: String): String = sha256Hex(salt.padTo('0', 256) + pass)
def findByEmail(email: String): Option[User] = {
DB.withConnection { implicit connection =>
SQL("SELECT * FROM user WHERE email = {email}").on(
'email -> email
).as(simple.singleOpt)
}
}
def findAll: Seq[User] = {
DB.withConnection { implicit connection =>
SQL("select * from user").as(simple *)
}
}
def create(user: User) {
DB.withConnection { implicit connection =>
SQL("INSERT INTO user VALUES ({email}, {pass})").on(
'email -> user.email,
'pass -> hash(user.password, user.email)
).executeUpdate()
}
}
}
| blendlabs/play20-stateless-auth | app/models/User.scala | Scala | apache-2.0 | 1,227 |
package lightning.model
import argonaut.Argonaut.casecodec3
import scalaz.Equal.equalA
case class Dependency(from: Node, to: Node, label: DependencyLabel)
object Dependency {
implicit val codec = casecodec3(Dependency.apply, Dependency.unapply)("from", "to", "label")
implicit val equal = equalA[Dependency]
} | lancewalton/lightning | model/src/main/scala/lightning/model/Dependency.scala | Scala | mit | 317 |
package com.github.sebruck
import java.io.IOException
import java.net.ServerSocket
import redis.embedded.RedisServer
import scala.annotation.tailrec
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}
trait EmbeddedRedis {
@tailrec
private[sebruck] final def getFreePort: Int = {
Try(new ServerSocket(0)) match {
case Success(socket) =>
val port = socket.getLocalPort
socket.close()
port
case Failure(_: IOException) => getFreePort
case Failure(e) => throw e
}
}
type Port = Int
def withRedis[T](port: Int = getFreePort)(f: Port => T): T = {
val redisServer = new RedisServer(port)
redisServer.start()
val result = f(port)
redisServer.stop()
result
}
def withRedisAsync[T](port: Int = getFreePort)(f: Port => Future[T])(
implicit ec: ExecutionContext): Future[T] = {
val redisServer = new RedisServer(port)
redisServer.start()
f(port).map { result =>
redisServer.stop()
result
}
}
def startRedis(port: Int = getFreePort): RedisServer = {
val redisServer = new RedisServer(port)
redisServer.start()
redisServer
}
def stopRedis(redisServer: RedisServer): Unit = redisServer.stop()
}
| Sebruck/scalatest-embedded-redis | src/main/scala/com/github/sebruck/EmbeddedRedis.scala | Scala | mit | 1,283 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.commands
import com.beust.jcommander.{JCommander, Parameters}
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.tools.commands.DescribeCommand._
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import scala.collection.JavaConversions._
class DescribeCommand(parent: JCommander) extends CommandWithCatalog(parent) with LazyLogging {
override val command = "describe"
override val params = new DescribeParameters
def execute() = {
logger.info(s"Describing attributes of feature '${params.featureName}' from catalog table '$catalog'...")
try {
val sft = ds.getSchema(params.featureName)
val sb = new StringBuilder()
sft.getAttributeDescriptors.foreach { attr =>
sb.clear()
val name = attr.getLocalName
// TypeName
sb.append(name)
sb.append(": ")
sb.append(attr.getType.getBinding.getSimpleName)
if (sft.getDtgField.exists(_ == name)) sb.append(" (ST-Time-index)")
if (sft.getGeometryDescriptor == attr) sb.append(" (ST-Geo-index)")
if (attr.isIndexed) sb.append(" (Indexed)")
if (attr.getDefaultValue != null) sb.append("- Default Value: ", attr.getDefaultValue)
println(sb.toString())
}
} catch {
case npe: NullPointerException =>
logger.error(s"Error: feature '${params.featureName}' not found. Check arguments...", npe)
case e: Exception =>
logger.error(s"Error describing feature '${params.featureName}': " + e.getMessage, e)
}
}
}
object DescribeCommand {
@Parameters(commandDescription = "Describe the attributes of a given feature in GeoMesa")
class DescribeParameters extends FeatureParams {}
}
| vpipkt/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/commands/DescribeCommand.scala | Scala | apache-2.0 | 2,367 |
import octocatcher.gpodder.api.Directory._
import scala.concurrent.ExecutionContext.Implicits._
import org.specs2.mutable._
class DirectorySpec extends Specification {
"Directory" should {
"getTopTags" in {
getTopTags(5).onSuccess {
//case x => x
case response => println(response)
}
//true must_== true
}
}
}
| fthomas/octo-catcher | src/test/scala/gpodder/api/DirectorySpec.scala | Scala | apache-2.0 | 360 |
package com.atomist.source.file
import java.io.{ByteArrayInputStream, File}
import java.nio.file.attribute.PosixFileAttributeView
import java.nio.file.{FileSystems, Files, Paths}
import com.atomist.source._
import com.atomist.source.file.ClassPathArtifactSource.{classPathResourceToFile, toArtifactSource}
import com.atomist.source.filter.{AtomistIgnoreFileFilter, GitDirFilter, GitignoreFileFilter}
import com.atomist.util.BinaryDecider.isBinaryContent
import org.apache.commons.io.FileUtils
import org.scalatest._
import scala.collection.JavaConverters._
class FileSystemArtifactSourceTest extends FlatSpec with Matchers {
import FileSystemArtifactSourceTest._
val fWriter = new FileSystemArtifactSourceWriter
"FileSystemArtifactSource" should "handle classpath directory not found" in {
an[ArtifactSourceException] should be thrownBy toArtifactSource("this is complete nonsense")
}
it should "find single file and verify contents" in {
val classpathSource = toArtifactSource("java-source/HelloWorldService.java")
val artifacts = classpathSource.artifacts
val files = artifacts.filter(_.isInstanceOf[FileArtifact])
artifacts.size should be > 0
val aFile = files.head.asInstanceOf[FileArtifact]
aFile.contentLength should be > 0L
aFile.content should have size aFile.contentLength
isBinaryContent(aFile.content) shouldBe false
if (PosixSupported)
aFile.mode should be(FileArtifact.DefaultMode)
}
it should "find single image file" in {
val classpathSource = toArtifactSource("spring-boot/web-template/src/main/resources/atomist-logo-horiz.png")
val artifacts = classpathSource.artifacts
val files = artifacts.filter(_.isInstanceOf[FileArtifact])
artifacts.size should be > 0
val aFile = files.head.asInstanceOf[FileArtifact]
aFile.contentLength should be > 0L
isBinaryContent(aFile.content) shouldBe true
}
it should "find single binary file" in {
val classpathSource = toArtifactSource("binary.dat")
val artifacts = classpathSource.artifacts
val files = artifacts.filter(_.isInstanceOf[FileArtifact])
artifacts.size should be > 0
val aFile = files.head.asInstanceOf[FileArtifact]
aFile.contentLength should be > 0L
isBinaryContent(aFile.content) shouldBe true
}
it should "find single binary executable file" in {
val classpathSource = toArtifactSource("binary-executable.dat")
val artifacts = classpathSource.artifacts
val files = artifacts.filter(_.isInstanceOf[FileArtifact])
artifacts.size should be > 0
val aFile = files.head.asInstanceOf[FileArtifact]
aFile.contentLength should be > 0L
isBinaryContent(aFile.content) shouldBe true
if (PosixSupported)
aFile.mode should be(FileArtifact.ExecutableMode)
}
it should "find directory" in {
val artifacts = AtomistTemplatesSource.artifacts
artifacts.exists(_.name contains "web-template") shouldBe true
}
it should "find empty directory" in {
val artifacts = AtomistTemplatesSource.artifacts
artifacts.exists(_.name contains "empty-dir") shouldBe true
}
it should "find all files via flatten" in {
validateTargetDirectory(AtomistTemplatesSource)
}
it should "find file under directory starting with /" in {
val dir = Files.createTempDirectory(s"tmp_${System.currentTimeMillis}").toFile
dir.deleteOnExit()
val subDir = Files.createDirectory(Paths.get(dir.getPath, "src"))
val tempFile = Files.createFile(Paths.get(subDir.toString, "tmp.txt")).toFile
FileUtils.copyToFile(new ByteArrayInputStream("contents".getBytes), tempFile)
val fid = SimpleFileSystemArtifactSourceIdentifier(dir)
val as = FileSystemArtifactSource(fid)
as.findFile(s"/src/${tempFile.getName}") shouldBe defined
FileUtils.deleteQuietly(dir)
as.findDirectory(dir.toString) shouldBe empty
}
// TODO some of these tests are more generic ArtifactSource tests
it should "be able to cache" in {
val classpathSource = AtomistTemplatesSource
classpathSource.allFiles.exists(_.isCached) shouldBe false
validateTargetDirectory(classpathSource)
val cachedCopy = classpathSource.cached
cachedCopy.allFiles.exists(!_.isCached) shouldBe false
validateTargetDirectory(cachedCopy)
}
it should "be able to filter files" in {
val s = AtomistTemplatesSource / "atomistTemplates"
val files = s.allFiles
files.exists(_.name contains ".vm") shouldBe true
val filtered = s.filter(_ => true, !_.name.contains(".vm"))
filtered.allFiles.exists(_.name contains ".vm") shouldBe false
withClue("should leave nothing after filter") {
filtered.allFiles shouldBe empty
}
}
it should "be able to filter directories" in {
val s = AtomistTemplatesSource
s.allFiles.exists(_.name contains "Application") shouldBe true
val filtered = s.filter(!_.name.contains("spring"), _ => true)
filtered.allFiles.exists(_.name contains "Java") shouldBe false
}
it should "be able to find existing directory" in {
val s = AtomistTemplatesSource
s.directories.nonEmpty shouldBe true
s.findDirectory("atomistTemplates") shouldBe defined
}
it should "not be able to find bogus directory" in {
val s = AtomistTemplatesSource
s.directories.nonEmpty shouldBe true
s.findDirectory("xsdfsdfsdfsdf") shouldBe empty
}
it should "reject bogus file rootPath" in {
val f: File = new File("/this/is/not/a/real.rootPath")
val fsid = FileSystemArtifactSourceIdentifier(f)
an[ArtifactSourceException] should be thrownBy new FileSystemArtifactSource(fsid)
}
it should "handle filtering source with no .gitignore" in {
val zid = ignoreFiles1ZipId
val zipSource = ZipFileArtifactSourceReader.fromZipSource(zid)
val tmpDir = Files.createTempDirectory(null).toFile
tmpDir.deleteOnExit()
val fid = FileSystemArtifactSourceIdentifier(tmpDir)
fWriter.write(zipSource, fid, SimpleSourceUpdateInfo(getClass.getName))
val as = FileSystemArtifactSource(fid)
as.findDirectory(".atomist/node_modules") shouldBe defined
}
it should "handle filtering source with negated 'node_modules' in .gitignore" in {
val zid = ignoreFiles2ZipId
val zipSource = ZipFileArtifactSourceReader.fromZipSource(zid)
val tmpDir = Files.createTempDirectory(null).toFile
tmpDir.deleteOnExit()
val fid = FileSystemArtifactSourceIdentifier(tmpDir)
fWriter.write(zipSource, fid, SimpleSourceUpdateInfo(getClass.getName))
val as = FileSystemArtifactSource(fid, GitignoreFileFilter(tmpDir.getPath))
as.findDirectory(".atomist/node_modules") shouldBe defined
}
it should "handle filtering source with 'node_modules' in .atomist/ignore" in {
val zid = ignoreFiles3ZipId
val zipSource = ZipFileArtifactSourceReader.fromZipSource(zid)
val tmpDir = Files.createTempDirectory(null).toFile
tmpDir.deleteOnExit()
val fid = FileSystemArtifactSourceIdentifier(tmpDir)
val f = fWriter.write(zipSource, fid, SimpleSourceUpdateInfo(getClass.getName))
val path = Paths.get(f.getAbsolutePath, "dot-atomist-ignored-node_modules").toString
val as = FileSystemArtifactSource(fid,
GitignoreFileFilter(path),
AtomistIgnoreFileFilter(path))
as.findDirectory(".atomist/node_modules") shouldBe empty
as.findDirectory("target") shouldBe empty
}
it should "handle filtering .git and target from artifact-source" in {
val rootPath = System.getProperty("user.dir")
val fid = FileSystemArtifactSourceIdentifier(Paths.get(rootPath).toFile)
// val start = System.currentTimeMillis()
val as = FileSystemArtifactSource(fid,
GitignoreFileFilter(rootPath),
GitDirFilter(rootPath))
as.findDirectory("src") shouldBe defined
as.findDirectory(".git") shouldBe empty
as.findDirectory("target") shouldBe empty
// println(s"elapsed time = ${System.currentTimeMillis() - start} ms")
}
it should "handle filtering of root-level directories prefixed with /" in {
val rootPath = System.getProperty("user.dir")
val fid = FileSystemArtifactSourceIdentifier(Paths.get(rootPath).toFile)
// val start = System.currentTimeMillis()
val as = FileSystemArtifactSource(fid,
GitignoreFileFilter(rootPath),
AtomistIgnoreFileFilter(rootPath),
GitDirFilter(rootPath))
as.findDirectory("src") shouldBe empty
as.findDirectory(".git") shouldBe empty
as.findDirectory("target") shouldBe empty
// println(s"elapsed time = ${System.currentTimeMillis() - start} ms")
}
it should "delete files by name and path" in {
val name = ".atomist/build/cli-build.yml"
val classpathSource = toArtifactSource("foo")
val filesSize = classpathSource.allFiles.size
val dirsSize = classpathSource.allDirectories.size
val artifactsSize = classpathSource.artifacts.size
val allArtifactsSize = classpathSource.allArtifacts.size
classpathSource.findFile(name) shouldBe defined
val newSource = classpathSource delete name
newSource.findFile(name) shouldBe empty
classpathSource.cachedDeltas.size shouldBe 0
newSource.cachedDeltas.size shouldBe 1
newSource.deltaFrom(classpathSource).deltas.size shouldBe 1
newSource.allFiles.size shouldBe filesSize - 1
newSource.allDirectories.size shouldBe dirsSize
newSource.artifacts.size shouldBe artifactsSize
newSource.allArtifacts.size shouldBe allArtifactsSize -1
newSource.collisions.size shouldBe 0
}
private def validateTargetDirectory(s: ArtifactSource): Unit =
s.allFiles.exists(_.name contains ".vm")
}
object FileSystemArtifactSourceTest {
val AtomistTemplatesSource: ArtifactSource = toArtifactSource("spring-boot")
val PosixSupported: Boolean = FileSystems.getDefault.getFileStores.asScala
.exists(_.supportsFileAttributeView(classOf[PosixFileAttributeView]))
def ignoreFiles1ZipId = ZipFileInput(classPathResourceToFile("ignore-files/no-dot-git.zip"))
def ignoreFiles2ZipId = ZipFileInput(classPathResourceToFile("ignore-files/dot-git-negated-node_modules.zip"))
def ignoreFiles3ZipId = ZipFileInput(classPathResourceToFile("ignore-files/dot-atomist-ignored-node_modules.zip"))
}
| atomist/artifact-source | src/test/scala/com/atomist/source/file/FileSystemArtifactSourceTest.scala | Scala | gpl-3.0 | 10,192 |
package util
import org.joda.time._
import org.joda.time.format._
import anorm._
// A custom class to support Anorm parsing of Joda DateTime.
object AnormExtension {
val dateFormatGeneration: DateTimeFormatter = DateTimeFormat.forPattern("yyyyMMddHHmmssSS");
implicit def rowToDateTime: Column[DateTime] = Column.nonNull { (value, meta) =>
val MetaDataItem(qualified, nullable, clazz) = meta
value match {
case ts: java.sql.Timestamp => Right(new DateTime(ts.getTime))
case d: java.sql.Date => Right(new DateTime(d.getTime))
case str: java.lang.String => Right(dateFormatGeneration.parseDateTime(str))
case _ => Left(TypeDoesNotMatch("Cannot convert " + value + ":" + value.asInstanceOf[AnyRef].getClass) )
}
}
implicit val dateTimeToStatement = new ToStatement[DateTime] {
def set(s: java.sql.PreparedStatement, index: Int, aValue: DateTime): Unit = {
s.setTimestamp(index, new java.sql.Timestamp(aValue.withMillisOfSecond(0).getMillis()) )
}
}
} | Pooshlmer/scala-play-crud-example | app/util/AnormExtension.scala | Scala | apache-2.0 | 1,044 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2008-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
/*
* This file is derived from a JastAdd implementation of PicoJava, created
* in the Department of Computer Science at Lund University. See the
* following web site for details:
*
* http://jastadd.cs.lth.se/examples/PicoJava/index.shtml
*/
package org.kiama
package example.picojava
import org.kiama.attribution.Attribution
trait TypeAnalyser {
self : Attribution with NameResolution with NullObjects with PredefinedTypes =>
import PicoJavaTree._
/**
* Is this declaration unknown?
*
* syn boolean Decl.isUnknown() = false;
* eq UnknownDecl.isUnknown() = true;
*/
val isUnknown : Decl => Boolean =
attr {
case UnknownDecl (_) => true
case _ => false
}
/**
* Return the type of a construct or unknownDecl if none.
*
* syn lazy TypeDecl Decl.type();
* syn lazy TypeDecl Exp.type();
* eq TypeDecl.type() = this;
* eq VarDecl.type() = getType().decl().type();
* eq IdnUse.type() = decl().type();
* eq Dot.type() = getIdnUse().type();
* eq BooleanLiteral.type() = booleanType();
*/
val tipe : PicoJavaNode => TypeDecl =
attr {
case t : TypeDecl => t
case v : VarDecl => tipe (decl (v.Type))
case i : IdnUse => tipe (decl (i))
case d : Dot => tipe (d.IdnUse)
case b : BooleanLiteral => booleanType (b)
case t => unknownDecl (t)
}
/**
* Is this declaration for a type that is a subtype of the
* provided type. The unknown type is a subtype of all types.
*
* syn lazy boolean TypeDecl.isSubtypeOf(TypeDecl typeDecl);
* eq TypeDecl.isSubtypeOf(TypeDecl typeDecl) = typeDecl.isSuperTypeOf(this);
* eq ClassDecl.isSubtypeOf(TypeDecl typeDecl) = typeDecl.isSuperTypeOfClassDecl(this);
* eq UnknownDecl.isSubtypeOf(TypeDecl typeDecl) = true;
*/
val isSubtypeOf : TypeDecl => TypeDecl => Boolean =
paramAttr {
typedecl => {
case UnknownDecl (_) => true
case c : ClassDecl => isSuperTypeOfClassDecl (c) (typedecl)
case t : TypeDecl => isSuperTypeOf (t) (typedecl)
}
}
/**
* Is this declaration for a type that is a supertype of the
* provided type? The unknown type is a supertype of all types.
*
* syn lazy boolean TypeDecl.isSuperTypeOf(TypeDecl typeDecl) = this == typeDecl;
* eq UnknownDecl.isSuperTypeOf(TypeDecl typeDecl) = true;
*/
val isSuperTypeOf : TypeDecl => TypeDecl => Boolean =
paramAttr {
typedecl => {
case UnknownDecl (_) => true
case t => t eq typedecl
}
}
/**
* Is this declaration for a type that is a supertype of the
* provided class type? The unknown type is a supertype of all
* class types.
*
* syn lazy boolean TypeDecl.isSuperTypeOfClassDecl(ClassDecl typeDecl) =
* this == typeDecl || typeDecl.superClass() != null && typeDecl.superClass().isSubtypeOf(this);
* eq UnknownDecl.isSuperTypeOfClassDecl(ClassDecl typeDecl) = true;
*/
val isSuperTypeOfClassDecl : ClassDecl => TypeDecl => Boolean =
paramAttr {
typedecl => {
case UnknownDecl (_) => true
case t : TypeDecl =>
(t eq typedecl) || (superClass (typedecl) != null) && (isSubtypeOf (superClass (typedecl)) (t))
}
}
/**
* Return the superclass of a class (or null if none).
*
* syn lazy ClassDecl ClassDecl.superClass();
* eq ClassDecl.superClass() {
* if (hasSuperclass() && getSuperclass().decl() instanceof ClassDecl && !hasCycleOnSuperclassChain())
* return (ClassDecl) getSuperclass().decl();
* else
* return null;
* }
*/
val superClass : ClassDecl => ClassDecl =
attr (
c =>
c.Superclass match {
case Some (i) =>
decl (i) match {
case sc : ClassDecl if !hasCycleOnSuperclassChain (c) => sc
case _ => null
}
case None => null
}
)
/**
* True if there is a cycle somewhere on the superclass chain, false otherwise.
*
* syn lazy boolean ClassDecl.hasCycleOnSuperclassChain() circular [true];
* eq ClassDecl.hasCycleOnSuperclassChain() {
* if (hasSuperclass() && getSuperclass().decl() instanceof ClassDecl) //First, check if there is a superclass
* return ((ClassDecl) getSuperclass().decl()).hasCycleOnSuperclassChain();
* else
* return false;
*/
val hasCycleOnSuperclassChain : ClassDecl => Boolean =
circular (true) (
c =>
c.Superclass match {
case Some (i) =>
decl (i) match {
case sc : ClassDecl => hasCycleOnSuperclassChain (sc)
case _ => false
}
case None => false
}
)
/**
* Is this expression a value or not?
*
* syn boolean Exp.isValue();
* eq Exp.isValue() = true;
* eq Dot.isValue() = getIdnUse().isValue();
* eq TypeUse.isValue() = false;
* Note! If we did not have the rewrites below, the above equation would have to instead be written as:
* eq IdnUse.isValue() = !(decl() instanceof TypeDecl)
*
* FIXME: currently using the "without rewrites" version
*/
val isValue : Exp => Boolean =
attr {
case i : IdnUse => ! decl (i).isInstanceOf[TypeDecl] // replace this one
// with this one, when the rewrites are in:
// case t : TypeUse => false
case d : Dot => isValue (d.IdnUse)
case _ => true
}
/*
* FIXME: need to do these at some point
* Rewrite rules for replacing Use-nodes based on their declaration kind
*
* rewrite Use {
* when(decl() instanceof VarDecl)
* to VariableUse new VariableUse(getName());
* }
*
* rewrite Use {
* when(decl() instanceof TypeDecl)
* to TypeUse new TypeUse(getName());
* }
*/
}
| adeze/kiama | library/src/org/kiama/example/picojava/TypeAnalyser.scala | Scala | gpl-3.0 | 7,388 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <[email protected]>
*
* Accio is free software: you can redistribute it and/or modify
* it under the ter ms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.lumos.server
import com.twitter.finatra.thrift.routing.ThriftWarmup
import com.twitter.inject.utils.Handler
import com.twitter.util.logging.Logging
import com.twitter.util.{Return, Throw}
import fr.cnrs.liris.infra.thriftserver.AuthFilter
import fr.cnrs.liris.lumos.server.LumosService.ListJobs
import javax.inject.Inject
private[server] final class ServerWarmupHandler @Inject()(warmup: ThriftWarmup)
extends Handler with Logging {
override def handle(): Unit = {
try {
AuthFilter.MasterClientId.asCurrent {
warmup.send(ListJobs, ListJobs.Args(ListJobsRequest()), times = 3) {
case Return(_) => // Warmup request was successful.
case Throw(e) => logger.warn("Warmup request failed", e)
}
}
} catch {
case e: Throwable =>
// Here we don't want a warmup failure to prevent server start-up --
// this is important if your service will call downstream services
// during warmup that could be temporarily down or unavailable.
// We don't want that unavailability to cause our server to fail
// warm-up and prevent the server from starting. So we simply log
// the error message here.
logger.error(e.getMessage, e)
}
logger.info("Warm-up done.")
}
} | privamov/accio | accio/java/fr/cnrs/liris/lumos/server/ServerWarmupHandler.scala | Scala | gpl-3.0 | 2,068 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.storage.orc.utils
import java.nio.charset.StandardCharsets
import java.util.UUID
import com.vividsolutions.jts.geom._
import org.apache.hadoop.hive.ql.exec.vector._
import org.locationtech.geomesa.features.serialization.ObjectType
import org.locationtech.geomesa.features.serialization.ObjectType.ObjectType
import org.locationtech.geomesa.fs.storage.orc.OrcFileSystemStorage
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* Writes a simple feature to a given Orc row
*/
trait OrcAttributeWriter {
def apply(sf: SimpleFeature, row: Int): Unit
}
object OrcAttributeWriter {
/**
* Create a writer for simple feature to the given vector batch
*
* @param sft simple feature type
* @param batch vector batch
* @param fid write feature id or not
* @return
*/
def apply(sft: SimpleFeatureType, batch: VectorizedRowBatch, fid: Boolean = true): OrcAttributeWriter = {
require(batch.cols.length == OrcFileSystemStorage.fieldCount(sft, fid),
s"ORC schema does not match SimpleFeatureType: ${batch.cols.map(_.getClass.getName).mkString("\n\t", "\n\t", "")}")
val builder = Seq.newBuilder[OrcAttributeWriter]
builder.sizeHint(sft.getAttributeCount + (if (fid) { 1 } else { 0 }))
var i = 0
var col = 0
while (i < sft.getAttributeCount) {
val bindings = ObjectType.selectType(sft.getDescriptor(i))
val writer = bindings.head match {
case ObjectType.GEOMETRY => col += 1; createGeometryWriter(bindings(1), batch.cols(col - 1), batch.cols(col), i)
case ObjectType.DATE => new DateWriter(batch.cols(col).asInstanceOf[TimestampColumnVector], i)
case ObjectType.STRING => new StringWriter(batch.cols(col).asInstanceOf[BytesColumnVector], i)
case ObjectType.INT => new IntWriter(batch.cols(col).asInstanceOf[LongColumnVector], i)
case ObjectType.LONG => new LongWriter(batch.cols(col).asInstanceOf[LongColumnVector], i)
case ObjectType.FLOAT => new FloatWriter(batch.cols(col).asInstanceOf[DoubleColumnVector], i)
case ObjectType.DOUBLE => new DoubleWriter(batch.cols(col).asInstanceOf[DoubleColumnVector], i)
case ObjectType.BOOLEAN => new BooleanWriter(batch.cols(col).asInstanceOf[LongColumnVector], i)
case ObjectType.BYTES => new BytesWriter(batch.cols(col).asInstanceOf[BytesColumnVector], i)
case ObjectType.JSON => new StringWriter(batch.cols(col).asInstanceOf[BytesColumnVector], i)
case ObjectType.UUID => new UuidWriter(batch.cols(col).asInstanceOf[BytesColumnVector], i)
case ObjectType.LIST => new ListWriter(batch.cols(col).asInstanceOf[ListColumnVector], i, bindings(1))
case ObjectType.MAP => new MapWriter(batch.cols(col).asInstanceOf[MapColumnVector], i, bindings(1), bindings(2))
case _ => throw new IllegalArgumentException(s"Unexpected object type ${bindings.head}")
}
builder += writer
i += 1
col += 1
}
if (fid) {
builder += new FidWriter(batch.cols(col).asInstanceOf[BytesColumnVector])
}
new SequenceWriter(builder.result)
}
// noinspection LanguageFeature
private def createGeometryWriter(binding: ObjectType, x: ColumnVector, y: ColumnVector, i: Int): OrcAttributeWriter = {
implicit def toDoubleColumnVector(vec: ColumnVector): DoubleColumnVector = vec.asInstanceOf[DoubleColumnVector]
implicit def toListColumnVector(vec: ColumnVector): ListColumnVector = vec.asInstanceOf[ListColumnVector]
binding match {
case ObjectType.POINT => new PointWriter(x, y, i)
case ObjectType.LINESTRING => new LineStringWriter(x, y, i)
case ObjectType.MULTIPOINT => new MultiPointWriter(x, y, i)
case ObjectType.POLYGON => new PolygonWriter(x, y, i)
case ObjectType.MULTILINESTRING => new MultiLineStringWriter(x, y, i)
case ObjectType.MULTIPOLYGON => new MultiPolygonWriter(x, y, i)
case _ => throw new IllegalArgumentException(s"Unexpected object type $binding")
}
}
// invokes a sequence of writers in a single call
class SequenceWriter(writers: Seq[OrcAttributeWriter]) extends OrcAttributeWriter {
override def apply(sf: SimpleFeature, row: Int): Unit = writers.foreach(_.apply(sf, row))
}
// writes a feature ID to a vector
class FidWriter(val vector: BytesColumnVector) extends OrcAttributeWriter with SetVectorString {
override def apply(sf: SimpleFeature, row: Int): Unit = setValue(sf.getID, row)
}
// writes a string attribute to a vector
class StringWriter(val vector: BytesColumnVector, val attribute: Int)
extends VectorWriterAdapter[String, BytesColumnVector] with SetVectorString
// writes a date attribute to a vector
class DateWriter(val vector: TimestampColumnVector, val attribute: Int)
extends VectorWriterAdapter[java.util.Date, TimestampColumnVector] with SetVectorDate
// writes an int attribute to a vector
class IntWriter(val vector: LongColumnVector, val attribute: Int)
extends VectorWriterAdapter[java.lang.Integer, LongColumnVector] with SetVectorInt
// writes a long attribute to a vector
class LongWriter(val vector: LongColumnVector, val attribute: Int)
extends VectorWriterAdapter[java.lang.Long, LongColumnVector] with SetVectorLong
// writes a float attribute to a vector
class FloatWriter(val vector: DoubleColumnVector, val attribute: Int)
extends VectorWriterAdapter[java.lang.Float, DoubleColumnVector] with SetVectorFloat
// writes a double attribute to a vector
class DoubleWriter(val vector: DoubleColumnVector, val attribute: Int)
extends VectorWriterAdapter[java.lang.Double, DoubleColumnVector] with SetVectorDouble
// writes a boolean attribute to a vector
class BooleanWriter(val vector: LongColumnVector, val attribute: Int)
extends VectorWriterAdapter[java.lang.Boolean, LongColumnVector] with SetVectorBoolean
// writes a byte array attribute to a vector
class BytesWriter(val vector: BytesColumnVector, val attribute: Int)
extends VectorWriterAdapter[Array[Byte], BytesColumnVector] with SetVectorBytes
// writes a UUID attribute to a vector
class UuidWriter(val vector: BytesColumnVector, val attribute: Int)
extends OrcAttributeWriter with SetVectorString {
override def apply(sf: SimpleFeature, row: Int): Unit =
setValue(Option(sf.getAttribute(attribute).asInstanceOf[UUID]).map(_.toString).orNull, row)
}
/**
* Writes a point attribute to a vector
*
* @param x x coordinates
* @param y y coordinates
* @param attribute simple feature attribute index
*/
class PointWriter(x: DoubleColumnVector, y: DoubleColumnVector, attribute: Int) extends OrcAttributeWriter {
override def apply(sf: SimpleFeature, row: Int): Unit = {
val value = sf.getAttribute(attribute).asInstanceOf[Point]
if (value != null) {
x.vector(row) = value.getX
y.vector(row) = value.getY
} else {
x.noNulls = false
y.noNulls = false
x.isNull(row) = true
y.isNull(row) = true
}
}
}
/**
* Writes a linestring attribute to a vector. A linestring is modeled as a list of points.
*
* @see PointWriter
*
* @param xx outer list vector for x coordinates, containing a double vector for points
* @param yy outer list vector for y coordinates, containing a double vector for points
* @param attribute simple feature attribute index
*/
class LineStringWriter(xx: ListColumnVector, yy: ListColumnVector, attribute: Int) extends OrcAttributeWriter {
private val x = xx.child.asInstanceOf[DoubleColumnVector]
private val y = yy.child.asInstanceOf[DoubleColumnVector]
override def apply(sf: SimpleFeature, row: Int): Unit = {
val value = sf.getAttribute(attribute).asInstanceOf[LineString]
if (value != null) {
val length = value.getNumPoints
Seq(xx, yy).foreach { vector =>
vector.child.ensureSize(vector.childCount + length, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = length
}
var i = 0
while (i < length) {
val pt = value.getCoordinateN(i)
x.vector(xx.childCount + i) = pt.x
y.vector(yy.childCount + i) = pt.y
i += 1
}
xx.childCount += length
yy.childCount += length
} else {
xx.noNulls = false
yy.noNulls = false
xx.isNull(row) = true
yy.isNull(row) = true
}
}
}
/**
* Writes a multi-point attribute to a vector. A multi-point is modeled as a list of points.
*
* @see PointWriter
*
* @param xx outer list vector for x coordinates, containing a double vector for points
* @param yy outer list vector for y coordinates, containing a double vector for points
* @param attribute simple feature attribute index
*/
class MultiPointWriter(xx: ListColumnVector, yy: ListColumnVector, attribute: Int) extends OrcAttributeWriter {
private val x = xx.child.asInstanceOf[DoubleColumnVector]
private val y = yy.child.asInstanceOf[DoubleColumnVector]
override def apply(sf: SimpleFeature, row: Int): Unit = {
val value = sf.getAttribute(attribute).asInstanceOf[MultiPoint]
if (value != null) {
val length = value.getNumPoints
Seq(xx, yy).foreach { vector =>
vector.child.ensureSize(vector.childCount + length, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = length
}
var i = 0
while (i < length) {
val pt = value.getGeometryN(i).asInstanceOf[Point]
x.vector(xx.childCount + i) = pt.getX
y.vector(yy.childCount + i) = pt.getY
i += 1
}
xx.childCount += length
yy.childCount += length
} else {
xx.noNulls = false
yy.noNulls = false
xx.isNull(row) = true
yy.isNull(row) = true
}
}
}
/**
* Writes a polygon attribute to a vector. A polygon is modeled as a list of lines, with the first
* value being the shell, and any subsequent values being interior holes.
*
* @see LineStringWriter
*
* @param xxx outer list vector for x coordinates, containing a list vector for individual lines
* @param yyy outer list vector for y coordinates, containing a list vector for individual lines
* @param attribute simple feature attribute index
*/
class PolygonWriter(xxx: ListColumnVector, yyy: ListColumnVector, attribute: Int) extends OrcAttributeWriter {
// list of points for each line
private val xx = xxx.child.asInstanceOf[ListColumnVector]
private val yy = yyy.child.asInstanceOf[ListColumnVector]
// points
private val x = xx.child.asInstanceOf[DoubleColumnVector]
private val y = yy.child.asInstanceOf[DoubleColumnVector]
override def apply(sf: SimpleFeature, row: Int): Unit = {
val polygon = sf.getAttribute(attribute).asInstanceOf[Polygon]
if (polygon != null) {
val lineCount = polygon.getNumInteriorRing + 1
Seq(xxx, yyy).foreach { vector =>
vector.child.ensureSize(vector.childCount + lineCount, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = lineCount
}
var j = 0
while (j < lineCount) {
val line = if (j == 0) { polygon.getExteriorRing } else { polygon.getInteriorRingN(j - 1) }
val length = line.getNumPoints
Seq(xx, yy).foreach { vector =>
vector.child.ensureSize(vector.childCount + length, true)
vector.offsets(xxx.childCount + j) = vector.childCount
vector.lengths(xxx.childCount + j) = length
}
var i = 0
while (i < length) {
val pt = line.getCoordinateN(i)
x.vector(xx.childCount + i) = pt.x
y.vector(yy.childCount + i) = pt.y
i += 1
}
xx.childCount += length
yy.childCount += length
j += 1
}
xxx.childCount += lineCount
yyy.childCount += lineCount
} else {
xxx.noNulls = false
yyy.noNulls = false
xxx.isNull(row) = true
yyy.isNull(row) = true
}
}
}
/**
* Writes a multi-linestring attribute to a vector. A multi-linestring is modeled as a list of lines.
*
* @see LineStringWriter
*
* @param xxx outer list vector for x coordinates, containing a list vector for individual lines
* @param yyy outer list vector for y coordinates, containing a list vector for individual lines
* @param attribute simple feature attribute index
*/
class MultiLineStringWriter(xxx: ListColumnVector, yyy: ListColumnVector, attribute: Int) extends OrcAttributeWriter {
// list of points for each line
private val xx = xxx.child.asInstanceOf[ListColumnVector]
private val yy = yyy.child.asInstanceOf[ListColumnVector]
// points
private val x = xx.child.asInstanceOf[DoubleColumnVector]
private val y = yy.child.asInstanceOf[DoubleColumnVector]
override def apply(sf: SimpleFeature, row: Int): Unit = {
val value = sf.getAttribute(attribute).asInstanceOf[MultiLineString]
if (value != null) {
val lineCount = value.getNumGeometries
Seq(xxx, yyy).foreach { vector =>
vector.child.ensureSize(vector.childCount + lineCount, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = lineCount
}
var j = 0
while (j < lineCount) {
val line = value.getGeometryN(j).asInstanceOf[LineString]
val length = line.getNumPoints
Seq(xx, yy).foreach { vector =>
vector.child.ensureSize(vector.childCount + length, true)
vector.offsets(xxx.childCount + j) = vector.childCount
vector.lengths(xxx.childCount + j) = length
}
var i = 0
while (i < length) {
val pt = line.getCoordinateN(i)
x.vector(xx.childCount + i) = pt.x
y.vector(yy.childCount + i) = pt.y
i += 1
}
xx.childCount += length
yy.childCount += length
j += 1
}
xxx.childCount += lineCount
yyy.childCount += lineCount
} else {
xxx.noNulls = false
yyy.noNulls = false
xxx.isNull(row) = true
yyy.isNull(row) = true
}
}
}
/**
* Writes a multi-polygon attribute to a vector. A multi-polygon is modeled as a list of polygons.
*
* @see PolygonWriter
*
* @param xxxx outer list vector for x coordinates, containing a list vector for individual polygons
* @param yyyy outer list vector for y coordinates, containing a list vector for individual polygons
* @param attribute simple feature attribute index
*/
class MultiPolygonWriter(xxxx: ListColumnVector, yyyy: ListColumnVector, attribute: Int) extends OrcAttributeWriter {
// list of lines for each polygon
private val xxx = xxxx.child.asInstanceOf[ListColumnVector]
private val yyy = yyyy.child.asInstanceOf[ListColumnVector]
// list of points for each line
private val xx = xxx.child.asInstanceOf[ListColumnVector]
private val yy = yyy.child.asInstanceOf[ListColumnVector]
// points
private val x = xx.child.asInstanceOf[DoubleColumnVector]
private val y = yy.child.asInstanceOf[DoubleColumnVector]
override def apply(sf: SimpleFeature, row: Int): Unit = {
val multiPolygon = sf.getAttribute(attribute).asInstanceOf[MultiPolygon]
if (multiPolygon != null) {
val polygonCount = multiPolygon.getNumGeometries
Seq(xxxx, yyyy).foreach { vector =>
vector.child.ensureSize(vector.childCount + polygonCount, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = polygonCount
}
var k = 0
while (k < polygonCount) {
val polygon = multiPolygon.getGeometryN(k).asInstanceOf[Polygon]
val lineCount = polygon.getNumInteriorRing + 1
Seq(xxx, yyy).foreach { vector =>
vector.child.ensureSize(vector.childCount + lineCount, true)
vector.offsets(xxxx.childCount + k) = vector.childCount
vector.lengths(xxxx.childCount + k) = lineCount
}
var j = 0
while (j < lineCount) {
val line = if (j == 0) { polygon.getExteriorRing } else { polygon.getInteriorRingN(j - 1) }
val length = line.getNumPoints
Seq(xx, yy).foreach { vector =>
vector.child.ensureSize(vector.childCount + length, true)
vector.offsets(xxx.childCount + j) = vector.childCount
vector.lengths(xxx.childCount + j) = length
}
var i = 0
while (i < length) {
val pt = line.getCoordinateN(i)
x.vector(xx.childCount + i) = pt.x
y.vector(yy.childCount + i) = pt.y
i += 1
}
xx.childCount += length
yy.childCount += length
j += 1
}
xxx.childCount += lineCount
yyy.childCount += lineCount
k += 1
}
xxxx.childCount += polygonCount
yyyy.childCount += polygonCount
} else {
xxxx.noNulls = false
yyyy.noNulls = false
xxxx.isNull(row) = true
yyyy.isNull(row) = true
}
}
}
/**
* Writes a java.util.List attribute to a vector
*
* @param vector vector
* @param attribute simple feature attribute index
* @param binding list value type
*/
class ListWriter(vector: ListColumnVector, attribute: Int, binding: ObjectType) extends OrcAttributeWriter {
private val writer = getInnerWriter(binding, vector.child)
override def apply(sf: SimpleFeature, row: Int): Unit = {
val value = sf.getAttribute(attribute).asInstanceOf[java.util.List[AnyRef]]
if (value != null) {
val length = value.size
vector.child.ensureSize(vector.childCount + length, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = length
var i = 0
while (i < length) {
writer.setValue(value.get(i), vector.childCount + i)
i += 1
}
vector.childCount += length
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
/**
* Writes a java.util.Map attribute to a vector
*
* @param vector vector
* @param attribute simple feature attribute index
* @param keyBinding map key type
* @param valueBinding map value type
*/
class MapWriter(vector: MapColumnVector, attribute: Int, keyBinding: ObjectType, valueBinding: ObjectType)
extends OrcAttributeWriter {
private val keyWriter = getInnerWriter(keyBinding, vector.keys)
private val valueWriter = getInnerWriter(valueBinding, vector.values)
override def apply(sf: SimpleFeature, row: Int): Unit = {
import scala.collection.JavaConversions._
val value = sf.getAttribute(attribute).asInstanceOf[java.util.Map[AnyRef, AnyRef]]
if (value != null) {
val length = value.size
vector.keys.ensureSize(vector.childCount + length, true)
vector.values.ensureSize(vector.childCount + length, true)
vector.offsets(row) = vector.childCount
vector.lengths(row) = length
var i = 0
value.foreach { case (k, v) =>
keyWriter.setValue(k, vector.childCount + i)
valueWriter.setValue(v, vector.childCount + i)
i += 1
}
vector.childCount += length
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a typed value to a vector
trait SetVectorValue[T <: AnyRef, U <: ColumnVector] {
protected val vector: U
def setValue(value: T, row: Int): Unit
}
// gets a value from a simple feature and writes it to a vector
trait VectorWriterAdapter[T <: AnyRef, U <: ColumnVector] extends OrcAttributeWriter with SetVectorValue[T, U] {
def attribute: Int
override def apply(sf: SimpleFeature, row: Int): Unit = setValue(sf.getAttribute(attribute).asInstanceOf[T], row)
}
// writes a string to a vector
trait SetVectorString extends SetVectorValue[String, BytesColumnVector] {
override def setValue(value: String, row: Int): Unit = {
if (value != null) {
val bytes = value.getBytes(StandardCharsets.UTF_8)
vector.setRef(row, bytes, 0, bytes.length)
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a date to a vector
trait SetVectorDate extends SetVectorValue[java.util.Date, TimestampColumnVector] {
override def setValue(value: java.util.Date, row: Int): Unit = {
if (value != null) {
vector.time(row) = value.getTime
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes an int to a vector
trait SetVectorInt extends SetVectorValue[java.lang.Integer, LongColumnVector] {
override def setValue(value: java.lang.Integer, row: Int): Unit = {
if (value != null) {
vector.vector(row) = value.longValue
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a long to a vector
trait SetVectorLong extends SetVectorValue[java.lang.Long, LongColumnVector] {
override def setValue(value: java.lang.Long, row: Int): Unit = {
if (value != null) {
vector.vector(row) = value.longValue
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a float to a vector
trait SetVectorFloat extends SetVectorValue[java.lang.Float, DoubleColumnVector] {
override def setValue(value: java.lang.Float, row: Int): Unit = {
if (value != null) {
vector.vector(row) = value.doubleValue
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
trait SetVectorDouble extends SetVectorValue[java.lang.Double, DoubleColumnVector] {
override def setValue(value: java.lang.Double, row: Int): Unit = {
if (value != null) {
vector.vector(row) = value.doubleValue
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a boolean to a vector
trait SetVectorBoolean extends SetVectorValue[java.lang.Boolean, LongColumnVector] {
override def setValue(value: java.lang.Boolean, row: Int): Unit = {
if (value != null) {
vector.vector(row) = if (value) { 1L } else { 0L }
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a byte array to a vector
trait SetVectorBytes extends SetVectorValue[Array[Byte], BytesColumnVector] {
override def setValue(value: Array[Byte], row: Int): Unit = {
if (value != null) {
vector.setRef(row, value, 0, value.length)
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
// writes a UUID to a vector
trait SetVectorUuid extends SetVectorValue[UUID, BytesColumnVector] {
override def setValue(value: UUID, row: Int): Unit = {
if (value != null) {
val bytes = value.toString.getBytes(StandardCharsets.UTF_8)
vector.setRef(row, bytes, 0, bytes.length)
} else {
vector.noNulls = false
vector.isNull(row) = true
}
}
}
/**
* Gets a writer for setting a value directly into a vector
*
* @param binding binding
* @param vec vector
* @return
*/
private def getInnerWriter(binding: ObjectType, vec: ColumnVector): SetVectorValue[AnyRef, ColumnVector] = {
val writer = binding match {
case ObjectType.DATE => new SetVectorDate { override val vector: TimestampColumnVector = vec.asInstanceOf[TimestampColumnVector] }
case ObjectType.STRING => new SetVectorString { override val vector: BytesColumnVector = vec.asInstanceOf[BytesColumnVector] }
case ObjectType.INT => new SetVectorInt { override val vector: LongColumnVector = vec.asInstanceOf[LongColumnVector] }
case ObjectType.LONG => new SetVectorLong { override val vector: LongColumnVector = vec.asInstanceOf[LongColumnVector] }
case ObjectType.FLOAT => new SetVectorFloat { override val vector: DoubleColumnVector = vec.asInstanceOf[DoubleColumnVector] }
case ObjectType.DOUBLE => new SetVectorDouble { override val vector: DoubleColumnVector = vec.asInstanceOf[DoubleColumnVector] }
case ObjectType.BOOLEAN => new SetVectorBoolean { override val vector: LongColumnVector = vec.asInstanceOf[LongColumnVector] }
case ObjectType.BYTES => new SetVectorBytes { override val vector: BytesColumnVector = vec.asInstanceOf[BytesColumnVector] }
case ObjectType.JSON => new SetVectorString { override val vector: BytesColumnVector = vec.asInstanceOf[BytesColumnVector] }
case ObjectType.UUID => new SetVectorUuid { override val vector: BytesColumnVector = vec.asInstanceOf[BytesColumnVector] }
case _ => throw new IllegalArgumentException(s"Unexpected object type $binding")
}
writer.asInstanceOf[SetVectorValue[AnyRef, ColumnVector]]
}
}
| ddseapy/geomesa | geomesa-fs/geomesa-fs-storage/geomesa-fs-storage-orc/src/main/scala/org/locationtech/geomesa/fs/storage/orc/utils/OrcAttributeWriter.scala | Scala | apache-2.0 | 26,019 |
package fpinscala.gettingstarted
// A comment!
/* Another comment */
/** A documentation comment */
object MyModule {
def abs(n: Int): Int =
if (n < 0) -n
else n
private def formatAbs(x: Int) = {
val msg = "The absolute value of %d is %d"
msg.format(x, abs(x))
}
def main(args: Array[String]): Unit =
println(formatAbs(-42))
// A definition of factorial, using a local, tail recursive function
def factorial(n: Int): Int = {
@annotation.tailrec
def go(n: Int, acc: Int): Int =
if (n <= 0) acc
else go(n-1, n*acc)
go(n, 1)
}
// Another implementation of `factorial`, this time with a `while` loop
def factorial2(n: Int): Int = {
var acc = 1
var i = n
while (i > 0) { acc *= i; i -= 1 }
acc
}
// Exercise 1: Write a function to compute the nth fibonacci number
// 0 and 1 are the first two numbers in the sequence,
// so we start the accumulators with those.
// At every iteration, we add the two numbers to get the next one.
def fib(n: Int): Int = {
@annotation.tailrec
def loop(n: Int, prev: Int, cur: Int): Int =
if (n == 0) prev
else loop(n - 1, cur, prev + cur)
loop(n, 0, 1)
}
// This definition and `formatAbs` are very similar..
private def formatFactorial(n: Int) = {
val msg = "The absolute value of %d is %d."
msg.format(n, factorial(n))
}
// We can generalize `formatAbs` and `formatFactorial` to
// accept a _function_ as a parameter
def formatResult(name: String, n: Int, f: Int => Int) = {
val msg = "The %s of %d is %d."
msg.format(name, n, f(n))
}
}
object FormatAbsAndFactorial {
import MyModule._
// Now we can use our general `formatResult` function
// with both `abs` and `factorial`
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
}
}
// Functions get passed around so often in FP that it's
// convenient to have syntax for constructing a function
// *without* having to give it a name
object AnonymousFunctions {
import MyModule._
// Some examples of anonymous functions:
def main(args: Array[String]): Unit = {
println(formatResult("absolute value", -42, abs))
println(formatResult("factorial", 7, factorial))
println(formatResult("increment", 7, (x: Int) => x + 1))
println(formatResult("increment2", 7, (x) => x + 1))
println(formatResult("increment3", 7, x => x + 1))
println(formatResult("increment4", 7, _ + 1))
println(formatResult("increment5", 7, x => { val r = x + 1; r }))
}
}
object MonomorphicBinarySearch {
// First, a findFirst, specialized to `String`.
// Ideally, we could generalize this to work for any `Array` type.
def findFirst(ss: Array[String], key: String): Int = {
@annotation.tailrec
def loop(n: Int): Int =
// If `n` is past the end of the array, return `-1`
// indicating the key doesn't exist in the array.
if (n >= ss.length) -1
// `ss(n)` extracts the n'th element of the array `ss`.
// If the element at `n` is equal to the key, return `n`
// indicating that the element appears in the array at that index.
else if (ss(n) == key) n
else loop(n + 1) // Otherwise increment `n` and keep looking.
// Start the loop at the first element of the array.
loop(0)
}
}
object PolymorphicFunctions {
// Here's a polymorphic version of `findFirst`, parameterized on
// a function for testing whether an `A` is the element we want to find.
// Instead of hard-coding `String`, we take a type `A` as a parameter.
// And instead of hard-coding an equality check for a given key,
// we take a function with which to test each element of the array.
def findFirst[A](as: Array[A], p: A => Boolean): Int = {
@annotation.tailrec
def loop(n: Int): Int =
if (n >= as.length) -1
// If the function `p` matches the current element,
// we've found a match and we return its index in the array.
else if (p(as(n))) n
else loop(n + 1)
loop(0)
}
// Exercise 2: Implement a polymorphic function to check whether
// an `Array[A]` is sorted
def isSorted[A](as: Array[A], gt: (A,A) => Boolean): Boolean = {
@annotation.tailrec
def go(i: Int, prev: A): Boolean =
if (i == as.length) true
else if (gt(as(i), prev)) go(i + 1, as(i))
else false
if (as.length == 0) true
else go(1, as(0))
}
// Polymorphic functions are often so constrained by their type
// that they only have one implementation! Here's an example:
def partial1[A,B,C](a: A, f: (A,B) => C): B => C =
(b: B) => f(a, b)
// Exercise 3: Implement `curry`.
// Note that `=>` associates to the right, so we could
// write the return type as `A => B => C`
def curry[A,B,C](f: (A, B) => C): A => (B => C) =
a => b => f(a, b)
// NB: The `Function2` trait has a `curried` method already
// Exercise 4: Implement `uncurry`
def uncurry[A,B,C](f: A => B => C): (A, B) => C =
(a, b) => f(a)(b)
/*
NB: There is a method on the `Function` object in the standard library,
`Function.uncurried` that you can use for uncurrying.
Note that we can go back and forth between the two forms. We can curry
and uncurry and the two forms are in some sense "the same". In FP jargon,
we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form),
a term we inherit from category theory.
*/
// Exercise 5: Implement `compose`
def compose[A,B,C](f: B => C, g: A => B): A => C =
a => f(g(a))
}
| fpinscala-muc/fpinscala-LithiumTD | answers/src/main/scala/fpinscala/gettingstarted/GettingStarted.scala | Scala | mit | 5,605 |
/*
* Copyright 2018 Akamai Technologies, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.akamai.edgegrid.signer.gatling
import com.akamai.edgegrid.signer.ClientCredential
import com.akamai.edgegrid.signer.ahc.AsyncHttpClientEdgeGridSignatureCalculator
import io.gatling.core.Predef.{Simulation, _}
import io.gatling.http.Predef.http
abstract class OpenApiSimulation(credential: ClientCredential) extends Simulation {
val httpConf = http
.signatureCalculator(new AsyncHttpClientEdgeGridSignatureCalculator(credential))
.baseURL("https://" + credential.getHost)
}
| akamai-open/edgegrid-auth-java | edgegrid-signer-gatling/src/main/scala/com/akamai/edgegrid/signer/gatling/OpenApiSimulation.scala | Scala | apache-2.0 | 1,130 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import java.nio.charset.StandardCharsets
import io.gatling.BaseSpec
class FastByteArrayInputStreamSpec extends BaseSpec {
private val bytes = "test string".getBytes(StandardCharsets.UTF_8)
"FastByteArrayInputStream" should "signal eof when all bytes are read" in {
val byteStream = new FastByteArrayInputStream(bytes)
byteStream.read(bytes, 0, bytes.length)
byteStream.read(bytes, 0, 1) shouldBe -1
}
it should "not allow to read more than available bytes" in {
val byteStream = new FastByteArrayInputStream(bytes)
byteStream.read(bytes, 0, bytes.length + 1) shouldBe bytes.length
}
}
| gatling/gatling | gatling-commons/src/test/scala/io/gatling/commons/util/FastByteArrayInputStreamSpec.scala | Scala | apache-2.0 | 1,273 |
println(List(1, 2) zip List(1, 2, 3))
println("あまりが消えました")
| mactkg/learn-scala | getting_started/day1/myanswer/p6.scala | Scala | mit | 77 |
package gpiocfg
import com.typesafe.config.Config
import gpiocfg.GpioCfg.Modes.{analog, digital, pwm}
import gpiocfg.GpioCfg._
import net.ceedubs.ficus.Ficus._
import scala.collection.JavaConversions._
// todo use json4s
object GpioCfgModels {
/**
* models a Pin from the config
*/
case class PinCfg(num: Int,
mode: Mode,
dir: Direction,
state: Option[AnyRef],
pull: Option[Pull])
/**
*/
implicit class RichPins(conf: Config) {
def pins: Seq[PinCfg] = conf.getConfigList("pins").map { cfg =>
PinCfg(cfg.getInt("number"),
modes(cfg),
direction(cfg),
initialState(cfg),
pulls(cfg))
}
}
private[gpiocfg] def layout(str: String): Option[Layout] = str match {
case pi4j.uid => Option(pi4j)
case bcom.uid => Option(bcom)
case _ => None
}
private[gpiocfg] def modes(cfg: Config) = cfg.getString("mode") match {
case digital.uid => digital
case analog.uid => analog
case pwm.uid => pwm
}
private[gpiocfg] def direction(cfg: Config): Direction = cfg.getString("direction") match {
case input.uid => input
case output.uid => output
}
private[gpiocfg] def pulls(cfg: Config): Option[Pull] = cfg.getAs[String]("pull") match {
case Some(off.uid) => Option(off)
case Some(up.uid) => Option(up)
case Some(down.uid) => Option(down)
case _ => None
}
private[gpiocfg] def initialState(cfg: Config): Option[AnyRef] = digitalState(cfg).orElse {
if (cfg.hasPath("set")) Option(Double.box(cfg.getDouble("set")))
else None
}
private[gpiocfg] def digitalState(cfg: Config): Option[DigitalState] = cfg.getAs[String]("set") match {
case Some(high.uid) => Option(high)
case Some(low.uid) => Option(low)
case _ => None
}
}
| jw3/gpiocfg | src/main/scala/gpiocfg/GpioCfgModels.scala | Scala | apache-2.0 | 1,843 |
package com.originate.scalypher.where
import com.originate.scalypher.types.IdentifiableMap
import com.originate.scalypher.types.Identifiable
case class AndOrCondition(operator: BooleanOperator, condition: Condition) {
def toQuery(identifiableMap: IdentifiableMap): String =
Seq(operator.toQuery, condition.toQuery(identifiableMap)) mkString " "
}
case class Where(startCondition: Condition, conditions: Seq[AndOrCondition] = Seq.empty) {
val identifiables: Set[Identifiable] = {
val cs = (conditions map (_.condition)) :+ startCondition
cs.flatMap(_.identifiables).toSet
}
def and(condition: Condition): Where =
copy(conditions = conditions :+ AndOrCondition(And, condition))
def or(condition: Condition): Where =
copy(conditions = conditions :+ AndOrCondition(Or, condition))
def toQuery(identifiableMap: IdentifiableMap): String = {
val firstCondition = startCondition.toQuery(identifiableMap)
val rest =
if (conditions.isEmpty) ""
else " " + (conditions map (_.toQuery(identifiableMap)) mkString " ")
s"$firstCondition$rest"
}
}
| Originate/scalypher | src/main/scala/where/Where.scala | Scala | mit | 1,099 |
package com.arcusys.valamis.lesson.scorm.service.sequencing
import com.arcusys.valamis.lesson.scorm.model.manifest._
import com.arcusys.valamis.lesson.scorm.model.sequencing.{ SequencingRequestType, SequencingResponseDelivery, SequencingResponseInvalid }
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class ChoiceSequencingRequestTest extends SequencingRequestServiceTestBase(SequencingRequestType.Choice) {
"Choice sequencing request" should "fail if target cannot be reached via available children (3.1.1.1)" in {
pending
}
it should "fail if target or one of its ancestors is hidden from choice via precondition rules (3.3.1)" in {
val hidingRuleSet = Seq(new PreConditionRule(RuleConditionSet(new ConditionRuleItem(ConditionType.Always)), PreConditionAction.HiddenFromChoice))
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(rightRightPreConditionRules = hidingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(rightPreConditionRules = hidingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(rootPreConditionRules = hidingRuleSet), _.children(1).children(1).item.activity.id)
)
}
it should "fail if sequencing control choice is disabled for parent of target (4.1.1)" in {
val parentPermissions = new SequencingPermissions(choiceForChildren = false, choiceForNonDescendants = true, flowForChildren = true, forwardOnlyForChildren = false)
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(rightPermissions = parentPermissions), _.children(1).children(1).item.activity.id),
(threeLevelTree(rootPermissions = parentPermissions), _.children(1).item.activity.id)
)
}
it should "succeed with target activity delivery if target activity is leaf and equals to current activity (7.1, 12.1, case #1)" in {
val parentPermissions = new SequencingPermissions(choiceForChildren = true, choiceForNonDescendants = false, flowForChildren = false, forwardOnlyForChildren = true)
val tree = threeLevelTree(currentLevel = Some(2), leftPermissions = parentPermissions)
expectResultWithTarget(SequencingResponseDelivery(tree.children(0).children(0)),
(tree, _.children(0).children(0).item.activity.id)
)
}
it should "fail if target activity is sibling ahead and current activity has stop forward traversal precondition rule (8.5.2.1, case #2)" in {
val stoppingRuleSet = Seq(new PreConditionRule(RuleConditionSet(new ConditionRuleItem(ConditionType.Always)), PreConditionAction.StopForwardTraversal))
val tree = twoLevelWideTree(currentLevel = Some(1), leftPreConditionRules = stoppingRuleSet)
expectResultWithTarget(SequencingResponseInvalid,
(tree, _.children(2).item.activity.id)
)
}
it should "fail if target activity is sibling ahead and one of activites between current and targer has stop forward traversal precondition rule (8.5.2.1, case #2)" in {
val stoppingRuleSet = Seq(new PreConditionRule(RuleConditionSet(new ConditionRuleItem(ConditionType.Always)), PreConditionAction.StopForwardTraversal))
val tree = twoLevelWideTree(currentLevel = Some(1), midPreConditionRules = stoppingRuleSet)
expectResultWithTarget(SequencingResponseInvalid,
(tree, _.children(2).item.activity.id)
)
}
it should "succeed with target activity delivery if target activity is sibling ahead and nothing stops forward traversal (case #2)" in {
val tree = twoLevelWideTree(currentLevel = Some(1))
expectResultWithTarget(SequencingResponseDelivery(tree.children(2)),
(tree, _.children(2).item.activity.id)
)
}
it should "fail if target activity is sibling behind and parent has forward only for children set (8.5.2.1, case #2)" in {
val parentPermissions = new SequencingPermissions(choiceForChildren = true, choiceForNonDescendants = true, flowForChildren = true, forwardOnlyForChildren = true)
val tree = twoLevelWideTree(rootPermissions = parentPermissions)
tree.currentActivity = Some(tree.children(2))
expectResultWithTarget(SequencingResponseInvalid,
(tree, _.children(0).item.activity.id)
)
}
it should "fail if target is below current and one of target's ancestors up to current has stop forward traversal precondition rule (9.3.2.1, case #3)" in {
val stoppingRuleSet = Seq(new PreConditionRule(RuleConditionSet(new ConditionRuleItem(ConditionType.Always)), PreConditionAction.StopForwardTraversal))
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(currentLevel = Some(0), rootPreConditionRules = stoppingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(0), rightPreConditionRules = stoppingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(0), rootPreConditionRules = stoppingRuleSet), _.children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(1), leftPreConditionRules = stoppingRuleSet), _.children(0).children(1).item.activity.id)
)
}
it should "fail if target is below current and one of target's ancestors up to but not including current is not active and has prevent children activation set (9.3.3.1, case #3)" in {
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(currentLevel = Some(0), rightPreventChildrenActivation = true), _.children(1).children(1).item.activity.id)
)
}
it should "fail if current is not defined and one of target's ancestors has stop forward traversal precondition rule (9.3.2.1., case #3)" in {
val stoppingRuleSet = Seq(new PreConditionRule(RuleConditionSet(new ConditionRuleItem(ConditionType.Always)), PreConditionAction.StopForwardTraversal))
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(rootPreConditionRules = stoppingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(rightPreConditionRules = stoppingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(rootPreConditionRules = stoppingRuleSet), _.children(1).item.activity.id)
)
}
it should "fail if current is not defined and one of target's ancestors not including root is not active and has prevent children activation set (9.3.3.1, case #3)" in {
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(rightPreventChildrenActivation = true), _.children(1).children(1).item.activity.id)
)
}
it should "fail if target is above current and current or one of its ancestors not including target has sequencing control choice exit = false (10.3.1.1.1, case #4)" in {
val cannotExitPermissions = new SequencingPermissions(choiceForChildren = true, choiceForNonDescendants = false, flowForChildren = true, forwardOnlyForChildren = false)
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(currentLevel = Some(2), leftLeftPermissions = cannotExitPermissions), _.item.activity.id),
(threeLevelTree(currentLevel = Some(1), leftPermissions = cannotExitPermissions), _.item.activity.id),
(threeLevelTree(currentLevel = Some(2), leftPermissions = cannotExitPermissions), _.item.activity.id),
(threeLevelTree(currentLevel = Some(2), leftLeftPermissions = cannotExitPermissions), _.children(0).item.activity.id)
)
}
it should "deliver current if target is above current and target has sequencing control choice exit = false (10.3.1.1.1, case #4)" in {
val cannotExitPermissions = new SequencingPermissions(choiceForChildren = true, choiceForNonDescendants = false, flowForChildren = true, forwardOnlyForChildren = false)
val canExitAndFlowPermissions = new SequencingPermissions(choiceForChildren = true, choiceForNonDescendants = true, flowForChildren = true, forwardOnlyForChildren = false)
val tree = threeLevelTree(currentLevel = Some(2), rootPermissions = cannotExitPermissions, leftPermissions = canExitAndFlowPermissions)
expectResultWithTarget(SequencingResponseDelivery(tree.children(0).children(0)), (tree, _.item.activity.id))
}
it should "fail if target is in other branch with current and current or one of its ancestors not including common ancestor has sequencing control choice exit = false (11.4.1.1, case #5)" in {
val cannotExitPermissions = new SequencingPermissions(choiceForChildren = true, choiceForNonDescendants = false, flowForChildren = true, forwardOnlyForChildren = false)
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(currentLevel = Some(2), leftLeftPermissions = cannotExitPermissions), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(1), leftPermissions = cannotExitPermissions), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(2), leftPermissions = cannotExitPermissions), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(2), leftLeftPermissions = cannotExitPermissions), _.children(1).item.activity.id)
)
}
it should "fail if target is in other branch with current, forward in tree, and one of target's ancestors has stop forward traversal precondition rule (11.8.1.2.1, case #5)" in {
val stoppingRuleSet = Seq(new PreConditionRule(RuleConditionSet(new ConditionRuleItem(ConditionType.Always)), PreConditionAction.StopForwardTraversal))
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(currentLevel = Some(1), rootPreConditionRules = stoppingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(1), rightPreConditionRules = stoppingRuleSet), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(2), rootPreConditionRules = stoppingRuleSet), _.children(1).item.activity.id)
)
}
it should "fail if target is in other branch with current, forward in tree, and one of target's ancestors (excluding common) is not active and has prevent children activation set (11.8.1.3.1, case #5)" in {
expectResultWithTarget(SequencingResponseInvalid,
(threeLevelTree(currentLevel = Some(1), rightPreventChildrenActivation = true), _.children(1).children(1).item.activity.id),
(threeLevelTree(currentLevel = Some(2), rightPreventChildrenActivation = true), _.children(1).children(1).item.activity.id)
)
}
it should "fail if target is in other branch with current, backward in tree, and one of target's ancestors (excluding common) is not active and has prevent children activation set (11.8.1.3.1, case #5)" in {
val tree = threeLevelTree(leftPreventChildrenActivation = true)
tree.currentActivity = Some(tree.children(1).children(1))
expectResultWithTarget(SequencingResponseInvalid, (tree, _.children(0).children(0).item.activity.id))
}
it should "fail if target is in other branch with current, backward in tree, and target is not active and has prevent children activation set (11.8.1.3.1, case #5)" in {
val tree = threeLevelTree(leftLeftPreventChildrenActivation = true)
tree.currentActivity = Some(tree.children(1).children(1))
expectResultWithTarget(SequencingResponseInvalid, (tree, _.children(0).children(0).item.activity.id))
}
} | igor-borisov/valamis | valamis-scorm-lesson/src/test/scala/com/arcusys/valamis/lesson/scorm/service/sequencing/ChoiceSequencingRequestTest.scala | Scala | gpl-3.0 | 11,243 |
package gitlog
import java.io.{BufferedWriter, FileWriter, FileOutputStream, File}
import config.Configuration
import gitlog.ChangeType._
/**
* Created by chanjinpark on 15. 4. 7..
*/
class FileWiseChange(n: String) {
val name = n
var changes = List[(Int, Int, ChangeType)]() // order, amount, changetype, 언제, 얼마나
def getChangesAmount = changes.foldLeft(0)((s, c) => s + c._2)
def getChangesOrdered = changes.sortBy(_._1)
var renamedfrom: FileWiseChange = null
def todata = (name, changes.size, getChangesAmount, getChangesOrdered)
def tocsv = name + ", " + changes.size + ", " + getChangesAmount + ", " + getChangesOrdered.mkString(", ")
override def toString = name + ": " + changes.size + " times, " + getChangesAmount + " lines\\n\\t" + getChangesOrdered.mkString(", ")
}
class CommitWiseChange(ord: Int) {
val order = ord
var changes = List[(String, Int, ChangeType)]() // file, amount, changetype
def getChangesAmount = changes.foldLeft(0)((s, c) => s + c._2)
def getChangesOrdered = changes.sortBy(_._1)
def todata = (order, changes.size, getChangesAmount, getChangesOrdered)
def tocsv = order + ", " + changes.size + ", " + getChangesAmount + ", " + getChangesOrdered.mkString(", ")
override def toString = order + ", " + changes.size + " files, " + getChangesAmount + " lines, " + getChangesOrdered.mkString(", ")
}
class FileChangeData(p: String) {
val commits = GitCommitLog.getCommits(p)
val details = GitCommitDetails.getall(p, commits.values.toList.sortWith((c, c1) => c.isBefore(c1)))
def tocsvheader = "name, times, amount, changelogs"
val paths = Configuration.classpath(p)
def filterExtension(file: String) = {
paths.exists(p => file.startsWith(p)) && Configuration.extension.exists(ext => file.endsWith(ext))
}
def commitwiseChanges = {
var cchanges = Map[Int, CommitWiseChange]()
commits.values.foreach(c => {
val cc = new CommitWiseChange(c.order)
cchanges += (c.order -> cc)
val diffs = details(c.commitid)
diffs.foreach(d => {
if (filterExtension(d.from) || filterExtension(d.to)) {
d.changetype match {
case Added => cc.changes ::= (d.to, d.nadd, Added)
case Deleted => cc.changes ::= (d.from, 0, Deleted)
case Modified => cc.changes ::= (d.to, d.ndel + d.nadd, Modified)
case Renamed => {
cc.changes ::= (d.from, 0, Deleted)
cc.changes ::= (d.to, d.nadd + d.ndel, Renamed)
}
case _ => {
assert(false)
}
}
}
})
})
cchanges.filter(kv => kv._2.changes.size > 0)
}
def filewiseChanges = {
var fchanges = Map[String, FileWiseChange]()
def getFileChange(f: String) = {
if ( fchanges.contains(f) ) fchanges(f) else {
val newfc = new FileWiseChange(f)
fchanges += (f -> newfc)
newfc
}
}
commits.values.foreach(c => {
val diffs = details(c.commitid)
val cochfiles = c.changedfs // List[String, Char], Char는 ACDMRTUX
diffs.foreach(d => {
if (filterExtension(d.from) || filterExtension(d.to)) {
d.changetype match {
case Added => getFileChange(d.to).changes ::=(c.order, d.nadd, Added)
case Deleted => getFileChange(d.from).changes ::=(c.order, 0, Deleted)
case Modified => getFileChange(d.to).changes ::=(c.order, d.ndel + d.nadd, Modified)
case Renamed => {
val fcfrom = getFileChange(d.from)
fcfrom.changes ::=(c.order, 0, Deleted)
val fc = getFileChange(d.to)
fc.changes ::=(c.order, d.nadd + d.ndel, Renamed)
fc.renamedfrom = fcfrom
}
case _ => {
assert(false)
}
}
}
})
})
fchanges.filter(kv => kv._2.changes.size > 0)
}
def commitfileChanges = {
}
def generateFilewiseCSV = {
val writer = new BufferedWriter(new FileWriter(new File(Configuration.csvfile(p, "filewise"))))
filewiseChanges.values.foreach(c => writer.write(c.tocsv + "\\n"))
writer.close()
}
def generateCommitwiseCSV = {
val writer = new BufferedWriter(new FileWriter(new File(Configuration.csvfile(p, "commitwise"))))
commitwiseChanges.values.foreach(c => writer.write(c.tocsv + "\\n"))
writer.close()
}
def printChanges = {
val fcs = filewiseChanges.values.toList.sortBy(_.name)
fcs.foreach(f => println(f.toString))
println("---------------------")
val ccs = commitwiseChanges.values.toList.sortBy(_.order)
ccs.foreach(c => println(c.toString))
println("---------------------")
println("Total changes (file# x change times, amount) filewise - " + fcs.foldLeft((0, 0))((s, fc) => (s._1 + fc.changes.size,s._2 + fc.getChangesAmount)))
println("Total changes (file# x change times, amount) commitwise - " + ccs.foldLeft((0, 0))((s, cc) => (s._1 + cc.changes.size,s._2 + cc.getChangesAmount)))
}
def changesOverall = {
}
}
object FileChangeData {
def main(args: Array[String]) = {
val p = "junit"
val csv = new FileChangeData(p)
//csv.printChanges
csv.generateFilewiseCSV
csv.generateCommitwiseCSV
}
}
| chanjin/DesignEvolution | src/main/scala/gitlog/FileChangeData.scala | Scala | apache-2.0 | 5,272 |
// @SOURCE:D:/git/trask/glowroot/agent-parent/plugins/play-plugin/tmp-router-files/conf/routes
// @HASH:bf7129340517c7ffddb4d1f70c86ba09e64cd48e
// @DATE:Sat Apr 09 17:39:42 PDT 2016
import play.core._
import play.core.Router._
import play.core.j._
import play.api.mvc._
import Router.queryString
object Routes extends Router.Routes {
private var _prefix = "/"
def setPrefix(prefix: String) {
_prefix = prefix
List[(String,Routes)]().foreach {
case (p, router) => router.setPrefix(prefix + (if(prefix.endsWith("/")) "" else "/") + p)
}
}
def prefix = _prefix
lazy val defaultPrefix = { if(Routes.prefix.endsWith("/")) "" else "/" }
// @LINE:5
private[this] lazy val controllers_HomeController_index0 = Route("GET", PathPattern(List(StaticPart(Routes.prefix))))
// @LINE:6
private[this] lazy val controllers_AsyncController_message1 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("message"))))
// @LINE:7
private[this] lazy val controllers_StreamController_stream2 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("stream"))))
// @LINE:8
private[this] lazy val controllers_Assets_at3 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("assets/"),DynamicPart("file", """.+"""))))
// @LINE:9
private[this] lazy val controllers_BadController_bad4 = Route("GET", PathPattern(List(StaticPart(Routes.prefix),StaticPart(Routes.defaultPrefix),StaticPart("bad"))))
def documentation = List(("""GET""", prefix,"""controllers.HomeController.index"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """message""","""controllers.AsyncController.message"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """stream""","""controllers.StreamController.stream"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """assets/$file<.+>""","""controllers.Assets.at(path:String = "/public", file:String)"""),("""GET""", prefix + (if(prefix.endsWith("/")) "" else "/") + """bad""","""controllers.BadController.bad""")).foldLeft(List.empty[(String,String,String)]) { (s,e) => e match {
case r @ (_,_,_) => s :+ r.asInstanceOf[(String,String,String)]
case l => s ++ l.asInstanceOf[List[(String,String,String)]]
}}
def routes:PartialFunction[RequestHeader,Handler] = {
// @LINE:5
case controllers_HomeController_index0(params) => {
call {
invokeHandler(controllers.HomeController.index, HandlerDef(this, "controllers.HomeController", "index", Nil,"GET", """""", Routes.prefix + """"""))
}
}
// @LINE:6
case controllers_AsyncController_message1(params) => {
call {
invokeHandler(controllers.AsyncController.message, HandlerDef(this, "controllers.AsyncController", "message", Nil,"GET", """""", Routes.prefix + """message"""))
}
}
// @LINE:7
case controllers_StreamController_stream2(params) => {
call {
invokeHandler(controllers.StreamController.stream, HandlerDef(this, "controllers.StreamController", "stream", Nil,"GET", """""", Routes.prefix + """stream"""))
}
}
// @LINE:8
case controllers_Assets_at3(params) => {
call(Param[String]("path", Right("/public")), params.fromPath[String]("file", None)) { (path, file) =>
invokeHandler(controllers.Assets.at(path, file), HandlerDef(this, "controllers.Assets", "at", Seq(classOf[String], classOf[String]),"GET", """""", Routes.prefix + """assets/$file<.+>"""))
}
}
// @LINE:9
case controllers_BadController_bad4(params) => {
call {
invokeHandler(controllers.BadController.bad, HandlerDef(this, "controllers.BadController", "bad", Nil,"GET", """""", Routes.prefix + """bad"""))
}
}
}
}
| trask/glowroot | agent/plugins/play-plugin/src/test/app-2.1.0-scala/scala/routes_routing.scala | Scala | apache-2.0 | 3,845 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import java.net.{BindException, ServerSocket}
import java.net.{URI, URL}
import java.util.Locale
import javax.servlet._
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import scala.io.Source
import org.eclipse.jetty.servlet.{ServletContextHandler, ServletHolder}
import org.mockito.Mockito.{mock, when}
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.LocalSparkContext._
import org.apache.spark.internal.config.UI
import org.apache.spark.util.Utils
class UISuite extends SparkFunSuite {
/**
* Create a test SparkContext with the SparkUI enabled.
* It is safe to `get` the SparkUI directly from the SparkContext returned here.
*/
private def newSparkContext(): SparkContext = {
val conf = new SparkConf()
.setMaster("local")
.setAppName("test")
.set(UI.UI_ENABLED, true)
val sc = new SparkContext(conf)
assert(sc.ui.isDefined)
sc
}
private def sslDisabledConf(): (SparkConf, SecurityManager, SSLOptions) = {
val conf = new SparkConf
val securityMgr = new SecurityManager(conf)
(conf, securityMgr, securityMgr.getSSLOptions("ui"))
}
private def sslEnabledConf(sslPort: Option[Int] = None):
(SparkConf, SecurityManager, SSLOptions) = {
val keyStoreFilePath = getTestResourcePath("spark.keystore")
val conf = new SparkConf()
.set("spark.ssl.ui.enabled", "true")
.set("spark.ssl.ui.keyStore", keyStoreFilePath)
.set("spark.ssl.ui.keyStorePassword", "123456")
.set("spark.ssl.ui.keyPassword", "123456")
sslPort.foreach { p =>
conf.set("spark.ssl.ui.port", p.toString)
}
val securityMgr = new SecurityManager(conf)
(conf, securityMgr, securityMgr.getSSLOptions("ui"))
}
ignore("basic ui visibility") {
withSpark(newSparkContext()) { sc =>
// test if the ui is visible, and all the expected tabs are visible
eventually(timeout(10.seconds), interval(50.milliseconds)) {
val html = Utils.tryWithResource(Source.fromURL(sc.ui.get.webUrl))(_.mkString)
assert(!html.contains("random data that should not be present"))
assert(html.toLowerCase(Locale.ROOT).contains("stages"))
assert(html.toLowerCase(Locale.ROOT).contains("storage"))
assert(html.toLowerCase(Locale.ROOT).contains("environment"))
assert(html.toLowerCase(Locale.ROOT).contains("executors"))
}
}
}
ignore("visibility at localhost:4040") {
withSpark(newSparkContext()) { sc =>
// test if visible from http://localhost:4040
eventually(timeout(10.seconds), interval(50.milliseconds)) {
val html = Utils.tryWithResource(Source.fromURL("http://localhost:4040"))(_.mkString)
assert(html.toLowerCase(Locale.ROOT).contains("stages"))
}
}
}
test("jetty selects different port under contention") {
var server: ServerSocket = null
var serverInfo1: ServerInfo = null
var serverInfo2: ServerInfo = null
val (conf, _, sslOptions) = sslDisabledConf()
try {
server = new ServerSocket(0)
val startPort = server.getLocalPort
serverInfo1 = JettyUtils.startJettyServer("0.0.0.0", startPort, sslOptions, conf)
serverInfo2 = JettyUtils.startJettyServer("0.0.0.0", startPort, sslOptions, conf)
// Allow some wiggle room in case ports on the machine are under contention
val boundPort1 = serverInfo1.boundPort
val boundPort2 = serverInfo2.boundPort
assert(boundPort1 != startPort)
assert(boundPort2 != startPort)
assert(boundPort1 != boundPort2)
} finally {
stopServer(serverInfo1)
stopServer(serverInfo2)
closeSocket(server)
}
}
test("jetty with https selects different port under contention") {
var server: ServerSocket = null
var serverInfo1: ServerInfo = null
var serverInfo2: ServerInfo = null
try {
server = new ServerSocket(0)
val startPort = server.getLocalPort
val (conf, _, sslOptions) = sslEnabledConf()
serverInfo1 = JettyUtils.startJettyServer("0.0.0.0", startPort, sslOptions, conf, "server1")
serverInfo2 = JettyUtils.startJettyServer("0.0.0.0", startPort, sslOptions, conf, "server2")
// Allow some wiggle room in case ports on the machine are under contention
val boundPort1 = serverInfo1.boundPort
val boundPort2 = serverInfo2.boundPort
assert(boundPort1 != startPort)
assert(boundPort2 != startPort)
assert(boundPort1 != boundPort2)
} finally {
stopServer(serverInfo1)
stopServer(serverInfo2)
closeSocket(server)
}
}
test("jetty binds to port 0 correctly") {
var socket: ServerSocket = null
var serverInfo: ServerInfo = null
val (conf, _, sslOptions) = sslDisabledConf()
try {
serverInfo = JettyUtils.startJettyServer("0.0.0.0", 0, sslOptions, conf)
val server = serverInfo.server
val boundPort = serverInfo.boundPort
assert(server.getState === "STARTED")
assert(boundPort != 0)
intercept[BindException] {
socket = new ServerSocket(boundPort)
}
} finally {
stopServer(serverInfo)
closeSocket(socket)
}
}
test("jetty with https binds to port 0 correctly") {
var socket: ServerSocket = null
var serverInfo: ServerInfo = null
try {
val (conf, _, sslOptions) = sslEnabledConf()
serverInfo = JettyUtils.startJettyServer("0.0.0.0", 0, sslOptions, conf)
val server = serverInfo.server
val boundPort = serverInfo.boundPort
assert(server.getState === "STARTED")
assert(boundPort != 0)
assert(serverInfo.securePort.isDefined)
intercept[BindException] {
socket = new ServerSocket(boundPort)
}
} finally {
stopServer(serverInfo)
closeSocket(socket)
}
}
test("verify webUrl contains the scheme") {
withSpark(newSparkContext()) { sc =>
val ui = sc.ui.get
val uiAddress = ui.webUrl
assert(uiAddress.startsWith("http://") || uiAddress.startsWith("https://"))
}
}
test("verify webUrl contains the port") {
withSpark(newSparkContext()) { sc =>
val ui = sc.ui.get
val splitUIAddress = ui.webUrl.split(':')
val boundPort = ui.boundPort
assert(splitUIAddress(2).toInt == boundPort)
}
}
test("verify proxy rewrittenURI") {
val prefix = "/worker-id"
val target = "http://localhost:8081"
val path = "/worker-id/json"
var rewrittenURI = JettyUtils.createProxyURI(prefix, target, path, null)
assert(rewrittenURI.toString() === "http://localhost:8081/json")
rewrittenURI = JettyUtils.createProxyURI(prefix, target, path, "test=done")
assert(rewrittenURI.toString() === "http://localhost:8081/json?test=done")
rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/worker-id", null)
assert(rewrittenURI.toString() === "http://localhost:8081")
rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/worker-id/test%2F", null)
assert(rewrittenURI.toString() === "http://localhost:8081/test%2F")
rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/worker-id/%F0%9F%98%84", null)
assert(rewrittenURI.toString() === "http://localhost:8081/%F0%9F%98%84")
rewrittenURI = JettyUtils.createProxyURI(prefix, target, "/worker-noid/json", null)
assert(rewrittenURI === null)
}
test("SPARK-33611: Avoid encoding twice on the query parameter of proxy rewrittenURI") {
val prefix = "/worker-id"
val target = "http://localhost:8081"
val path = "/worker-id/json"
val rewrittenURI =
JettyUtils.createProxyURI(prefix, target, path, "order%5B0%5D%5Bcolumn%5D=0")
assert(rewrittenURI.toString === "http://localhost:8081/json?order%5B0%5D%5Bcolumn%5D=0")
}
test("verify rewriting location header for reverse proxy") {
val clientRequest = mock(classOf[HttpServletRequest])
var headerValue = "http://localhost:4040/jobs"
val targetUri = URI.create("http://localhost:4040")
when(clientRequest.getScheme()).thenReturn("http")
when(clientRequest.getHeader("host")).thenReturn("localhost:8080")
when(clientRequest.getPathInfo()).thenReturn("/proxy/worker-id/jobs")
var newHeader = JettyUtils.createProxyLocationHeader(headerValue, clientRequest, targetUri)
assert(newHeader.toString() === "http://localhost:8080/proxy/worker-id/jobs")
headerValue = "http://localhost:4041/jobs"
newHeader = JettyUtils.createProxyLocationHeader(headerValue, clientRequest, targetUri)
assert(newHeader === null)
}
test("add and remove handlers with custom user filter") {
val (conf, securityMgr, sslOptions) = sslDisabledConf()
conf.set("spark.ui.filters", classOf[TestFilter].getName())
conf.set(s"spark.${classOf[TestFilter].getName()}.param.responseCode",
HttpServletResponse.SC_NOT_ACCEPTABLE.toString)
val serverInfo = JettyUtils.startJettyServer("0.0.0.0", 0, sslOptions, conf)
try {
val path = "/test"
val url = new URL(s"http://localhost:${serverInfo.boundPort}$path/root")
assert(TestUtils.httpResponseCode(url) === HttpServletResponse.SC_NOT_FOUND)
val (servlet, ctx) = newContext(path)
serverInfo.addHandler(ctx, securityMgr)
assert(TestUtils.httpResponseCode(url) === HttpServletResponse.SC_NOT_ACCEPTABLE)
// Try a request with bad content in a parameter to make sure the security filter
// is being added to new handlers.
val badRequest = new URL(
s"http://localhost:${serverInfo.boundPort}$path/root?bypass&invalid<=foo")
assert(TestUtils.httpResponseCode(badRequest) === HttpServletResponse.SC_OK)
assert(servlet.lastRequest.getParameter("invalid<") === null)
assert(servlet.lastRequest.getParameter("invalid<") !== null)
serverInfo.removeHandler(ctx)
assert(TestUtils.httpResponseCode(url) === HttpServletResponse.SC_NOT_FOUND)
} finally {
stopServer(serverInfo)
}
}
test("SPARK-32467: Avoid encoding URL twice on https redirect") {
val (conf, securityMgr, sslOptions) = sslEnabledConf()
val serverInfo = JettyUtils.startJettyServer("0.0.0.0", 0, sslOptions, conf)
try {
val serverAddr = s"http://localhost:${serverInfo.boundPort}"
val (_, ctx) = newContext("/ctx1")
serverInfo.addHandler(ctx, securityMgr)
TestUtils.withHttpConnection(new URL(s"$serverAddr/ctx%281%29?a%5B0%5D=b")) { conn =>
assert(conn.getResponseCode() === HttpServletResponse.SC_FOUND)
val location = Option(conn.getHeaderFields().get("Location"))
.map(_.get(0)).orNull
val expectedLocation = s"https://localhost:${serverInfo.securePort.get}/ctx(1)?a[0]=b"
assert(location == expectedLocation)
}
} finally {
stopServer(serverInfo)
}
}
test("http -> https redirect applies to all URIs") {
val (conf, securityMgr, sslOptions) = sslEnabledConf()
val serverInfo = JettyUtils.startJettyServer("0.0.0.0", 0, sslOptions, conf)
try {
Seq(newContext("/"), newContext("/test1")).foreach { case (_, ctx) =>
serverInfo.addHandler(ctx, securityMgr)
}
assert(serverInfo.server.getState === "STARTED")
val (_, testContext) = newContext("/test2")
serverInfo.addHandler(testContext, securityMgr)
val httpPort = serverInfo.boundPort
val tests = Seq(
("http", serverInfo.boundPort, HttpServletResponse.SC_FOUND),
("https", serverInfo.securePort.get, HttpServletResponse.SC_OK))
tests.foreach { case (scheme, port, expected) =>
val urls = Seq(
s"$scheme://localhost:$port/root",
s"$scheme://localhost:$port/test1/root",
s"$scheme://localhost:$port/test2/root")
urls.foreach { url =>
val rc = TestUtils.httpResponseCode(new URL(url))
assert(rc === expected, s"Unexpected status $rc for $url")
}
}
} finally {
stopServer(serverInfo)
}
}
test("specify both http and https ports separately") {
var socket: ServerSocket = null
var serverInfo: ServerInfo = null
try {
socket = new ServerSocket(0)
// Make sure the SSL port lies way outside the "http + 400" range used as the default.
val baseSslPort = Utils.userPort(socket.getLocalPort(), 10000)
val (conf, _, sslOptions) = sslEnabledConf(sslPort = Some(baseSslPort))
serverInfo = JettyUtils.startJettyServer("0.0.0.0", socket.getLocalPort() + 1,
sslOptions, conf, serverName = "server1")
val notAllowed = Utils.userPort(serverInfo.boundPort, 400)
assert(serverInfo.securePort.isDefined)
assert(serverInfo.securePort.get != Utils.userPort(serverInfo.boundPort, 400))
} finally {
stopServer(serverInfo)
closeSocket(socket)
}
}
test("redirect with proxy server support") {
val proxyRoot = "https://proxy.example.com:443/prefix"
val (conf, securityMgr, sslOptions) = sslDisabledConf()
conf.set(UI.PROXY_REDIRECT_URI, proxyRoot)
val serverInfo = JettyUtils.startJettyServer("0.0.0.0", 0, sslOptions, conf)
try {
val serverAddr = s"http://localhost:${serverInfo.boundPort}"
val (_, ctx) = newContext("/ctx1")
serverInfo.addHandler(ctx, securityMgr)
val redirect = JettyUtils.createRedirectHandler("/src", "/dst")
serverInfo.addHandler(redirect, securityMgr)
// Test Jetty's built-in redirect to add the trailing slash to the context path.
TestUtils.withHttpConnection(new URL(s"$serverAddr/ctx1")) { conn =>
assert(conn.getResponseCode() === HttpServletResponse.SC_FOUND)
val location = Option(conn.getHeaderFields().get("Location"))
.map(_.get(0)).orNull
assert(location === s"$proxyRoot/ctx1/")
}
// Test with a URL handled by the added redirect handler, and also including a path prefix.
val headers = Seq("X-Forwarded-Context" -> "/prefix")
TestUtils.withHttpConnection(
new URL(s"$serverAddr/src/"),
headers = headers) { conn =>
assert(conn.getResponseCode() === HttpServletResponse.SC_FOUND)
val location = Option(conn.getHeaderFields().get("Location"))
.map(_.get(0)).orNull
assert(location === s"$proxyRoot/prefix/dst")
}
// Not really used by Spark, but test with a relative redirect.
val relative = JettyUtils.createRedirectHandler("/rel", "root")
serverInfo.addHandler(relative, securityMgr)
TestUtils.withHttpConnection(new URL(s"$serverAddr/rel/")) { conn =>
assert(conn.getResponseCode() === HttpServletResponse.SC_FOUND)
val location = Option(conn.getHeaderFields().get("Location"))
.map(_.get(0)).orNull
assert(location === s"$proxyRoot/rel/root")
}
} finally {
stopServer(serverInfo)
}
}
/**
* Create a new context handler for the given path, with a single servlet that responds to
* requests in `$path/root`.
*/
private def newContext(path: String): (CapturingServlet, ServletContextHandler) = {
val servlet = new CapturingServlet()
val ctx = new ServletContextHandler()
ctx.setContextPath(path)
ctx.addServlet(new ServletHolder(servlet), "/root")
(servlet, ctx)
}
def stopServer(info: ServerInfo): Unit = {
if (info != null) info.stop()
}
def closeSocket(socket: ServerSocket): Unit = {
if (socket != null) socket.close
}
/** Test servlet that exposes the last request object for GET calls. */
private class CapturingServlet extends HttpServlet {
@volatile var lastRequest: HttpServletRequest = _
override def doGet(req: HttpServletRequest, res: HttpServletResponse): Unit = {
lastRequest = req
res.sendError(HttpServletResponse.SC_OK)
}
}
}
// Filter for testing; returns a configurable code for every request.
private[spark] class TestFilter extends Filter {
private var rc: Int = HttpServletResponse.SC_OK
override def destroy(): Unit = { }
override def init(config: FilterConfig): Unit = {
if (config.getInitParameter("responseCode") != null) {
rc = config.getInitParameter("responseCode").toInt
}
}
override def doFilter(req: ServletRequest, res: ServletResponse, chain: FilterChain): Unit = {
if (req.getParameter("bypass") == null) {
res.asInstanceOf[HttpServletResponse].sendError(rc, "Test.")
} else {
chain.doFilter(req, res)
}
}
}
| witgo/spark | core/src/test/scala/org/apache/spark/ui/UISuite.scala | Scala | apache-2.0 | 17,338 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Tue May 13 16:18:42 EDT 2014
* @see LICENSE (MIT style license file).
*
* Intended to make switching GUI's easier.
*/
package scalation.scala2d
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Frame` is a convenience class for `JFrame`.
*/
class Frame (title: String) extends javax.swing.JFrame (title)
{
setDefaultCloseOperation (javax.swing.JFrame.EXIT_ON_CLOSE);
} // Frame
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Panel` is a convenience class for `JPanel`.
*/
class Panel extends javax.swing.JPanel
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `ScrollPane` is a convenience class for `JScrollPane`.
*/
class ScrollPane (table: Table) extends javax.swing.JScrollPane (table)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** `Table` is a convenience class for `JTable`.
*/
class Table (rows: Int, cols: Int) extends javax.swing.JTable (rows, cols)
| mvnural/scalation | src/main/scala/scalation/scala2d/Widgets.scala | Scala | mit | 1,169 |
package example
class ForComprehension/*<=example.ForComprehension#*/ {
for {
a/*<=local0*/ <- List/*=>scala.collection.immutable.List.*/(1)
b/*<=local1*/ <- List/*=>scala.collection.immutable.List.*/(1)
if b/*=>local1*/ >/*=>scala.Int#`>`(+3).*/ 1
c/*<=local3*/ = a/*=>local0*/ +/*=>scala.Int#`+`(+4).*/ b/*=>local1*/
} yield (a/*=>local0*/, b/*=>local1*/, c/*=>local3*/)
for {
a/*<=local4*/ <- List/*=>scala.collection.immutable.List.*/(1)
b/*<=local5*/ <- List/*=>scala.collection.immutable.List.*/(a/*=>local4*/)
if (
a/*=>local4*/,
b/*=>local5*/
) ==/*=>java.lang.Object#`==`().*/ (1, 2)
(
c/*<=local11*/,
d/*<=local12*/
) <- List/*=>scala.collection.immutable.List.*/((a/*=>local4*/, b/*=>local5*/))
if (
a/*=>local4*/,
b/*=>local5*/,
c/*=>local11*/,
d/*=>local12*/
) ==/*=>java.lang.Object#`==`().*/ (1, 2, 3, 4)
e/*<=local14*/ = (
a/*=>local4*/,
b/*=>local5*/,
c/*=>local11*/,
d/*=>local12*/
)
if e/*=>local14*/ ==/*=>java.lang.Object#`==`().*/ (1, 2, 3, 4)
f/*<=local15*/ <- List/*=>scala.collection.immutable.List.*/(e/*=>local14*/)
} yield {
(
a/*=>local4*/,
b/*=>local5*/,
c/*=>local11*/,
d/*=>local12*/,
e/*=>local14*/,
f/*=>local15*/
)
}
}
| scalameta/scalameta | tests/jvm/src/test/resources/example/ForComprehension.scala | Scala | bsd-3-clause | 1,345 |
package net.yefremov.sleipnirsample
import play.api.mvc._
/**
* Controller for the index page.
*/
object IndexController extends Controller {
def index = Action {
Ok("Welcome to the Sleipnir sample app!")
}
}
| dmitriy-yefremov/sleipnir-sample | app/net/yefremov/sleipnirsample/IndexController.scala | Scala | apache-2.0 | 223 |
package hanabi
import scala.scalajs.js.annotation.JSExportAll
// clues are used to record information given by hints
trait Clue {
val position: Int
def update(newPos: Int): Clue
def matches(c: Card): Boolean
}
@JSExportAll
case class ColorClue(color: Color, position: Int) extends Clue {
def matches(c: Card) = c.color == color
def update(newPos: Int) = copy(position = newPos)
}
@JSExportAll
case class LevelClue(level: Int, position: Int) extends Clue {
def matches(c: Card) = c.level == level
def update(newPos: Int) = copy(position = newPos)
}
| wl-seclin-hashcode/hanabi | src/main/scala/hanabi/Clue.scala | Scala | mit | 570 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.selftraining
/**
* Created by nkatz at 24/11/2018
*/
class DataBatch(val pos: Set[String], val neg: Set[String], val unknown: Set[String], unkown: Set[String]) {
}
| nkatzz/OLED | src/main/scala/oled/selftraining/DataBatch.scala | Scala | gpl-3.0 | 880 |
/*
* Copyright 2015 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.index
import java.util
import java.util.Comparator
import com.netflix.atlas.core.model.Query
import com.netflix.atlas.core.model.Tag
import com.netflix.atlas.core.model.TagKey
import com.netflix.atlas.core.model.TaggedItem
import com.netflix.atlas.core.util.Interner
import gnu.trove.map.hash.TIntIntHashMap
import gnu.trove.map.hash.TObjectIntHashMap
import gnu.trove.procedure.TIntIntProcedure
import gnu.trove.procedure.TObjectIntProcedure
import gnu.trove.set.hash.TIntHashSet
import org.slf4j.LoggerFactory
object LazyTagIndex {
private val logger = LoggerFactory.getLogger(getClass)
def empty[T <: TaggedItem: Manifest]: LazyTagIndex[T] = {
new LazyTagIndex(new Array[T](0))
}
}
class LazyTagIndex[T <: TaggedItem](items: Array[T], interner: Interner[String])
extends TagIndex[T] {
import com.netflix.atlas.core.index.LazyTagIndex._
def this(items: Array[T]) = this(items, Interner.forStrings)
type LazyValueMap = util.IdentityHashMap[String, LazySet]
type LazyKeyMap = util.IdentityHashMap[String, LazyValueMap]
type ValueMap = util.IdentityHashMap[String, TIntHashSet]
type KeyMap = util.IdentityHashMap[String, ValueMap]
// Comparator for ordering tagged items using the id
private val idComparator = new Comparator[T] {
def compare(t1: T, t2: T): Int = t1.id compareTo t2.id
}
// Precomputed set of all items
private val all = LazySet.all(items.length)
// Primary indexes to search for a tagged item:
// * itemIds: sorted array of item ids
// * itemIndex: key -> value -> set, the set contains indexes to the items array
// * keyIndex: key -> set, precomputed union of all sets for a given key
private val (itemIds, itemIndex, keyIndex) = buildItemIndex()
// Indexes to search for matching tags
// * tagsIndex: sorted array of all unique tags and overall counts
// * itemTags: itemTags(i) has an array of indexes into tags array for items(i)
private val (tagIndex, itemTags) = buildTagIndex()
private def buildItemIndex(): (Array[String], LazyKeyMap, LazyValueMap) = {
// Sort items array based on the id, allows for efficient paging of requests using the id
// as the offset
logger.debug(s"building index with ${items.length} items, starting sort")
util.Arrays.sort(items, idComparator)
val itemIds = new Array[String](items.length)
// Build the main index
logger.debug(s"building index with ${items.length} items, create main key map")
val kidx = new ValueMap
val idx = new KeyMap
var pos = 0
while (pos < items.length) {
itemIds(pos) = items(pos).idString
items(pos).foreach { (k, v) =>
val internedK = interner.intern(k)
var vidx = idx.get(internedK)
if (vidx == null) {
vidx = new ValueMap
idx.put(internedK, vidx)
}
// Add to value index
val internedV = interner.intern(v)
var matchSet = vidx.get(internedV)
if (matchSet == null) {
matchSet = new TIntHashSet
vidx.put(internedV, matchSet)
}
matchSet.add(pos)
// Add to key index
matchSet = kidx.get(internedK)
if (matchSet == null) {
matchSet = new TIntHashSet
kidx.put(internedK, matchSet)
}
matchSet.add(pos)
}
pos += 1
}
// Build final item index
logger.debug(s"building index with ${items.length} items, create lazy index")
val lazyIdx = new LazyKeyMap
val keys = idx.entrySet.iterator
while (keys.hasNext) {
val lazyVidx = new LazyValueMap
val keyEntry = keys.next()
val values = keyEntry.getValue.entrySet.iterator
while (values.hasNext) {
val valueEntry = values.next()
val lazySet = LazySet(valueEntry.getValue.toArray)
lazyVidx.put(valueEntry.getKey, lazySet)
}
lazyIdx.put(keyEntry.getKey, lazyVidx)
}
// Build final key index
logger.debug(s"building index with ${items.length} items, create key index")
val lazyKidx = new LazyValueMap
val keyIter = kidx.entrySet.iterator
while (keyIter.hasNext) {
val keyEntry = keyIter.next()
val lazySet = LazySet(keyEntry.getValue.toArray)
lazyKidx.put(keyEntry.getKey, lazySet)
}
(itemIds, lazyIdx, lazyKidx)
}
private def buildTagIndex(): (Array[Tag], Array[Array[Int]]) = {
// Count how many times each tag occurs
logger.debug(s"building tag index with ${items.length} items, compute tag counts")
val tagCounts = new TObjectIntHashMap[(String, String)]
var pos = 0
while (pos < items.length) {
items(pos).foreach { (k, v) =>
val t = k -> v
tagCounts.adjustOrPutValue(t, 1, 1)
}
pos += 1
}
// Create sorted array with tags and the overall counts
logger.debug(s"building tag index with ${items.length} items, sort and overall counts")
val tagsArrayBuilder = new TagsArrayBuilder(tagCounts.size)
tagCounts.forEachEntry(tagsArrayBuilder)
val tagsArray = tagsArrayBuilder.result
util.Arrays.sort(tagsArray.asInstanceOf[Array[AnyRef]])
// Create map of tag to position in tags array
logger.debug(s"building tag index with ${items.length} items, tag to position map")
val posMap = new TObjectIntHashMap[(String, String)]
pos = 0
while (pos < tagsArray.length) {
val t = tagsArray(pos)
posMap.put(t.key -> t.value, pos)
pos += 1
}
// Build array of the tags for a given item
logger.debug(s"building tag index with ${items.length} items, create item to tag ints map")
val itemTags = new Array[Array[Int]](items.length)
pos = 0
while (pos < items.length) {
val tags = items(pos).tags
val tagsArray = new Array[Int](tags.size)
var i = 0
items(pos).foreach { (k, v) =>
tagsArray(i) = posMap.get(k -> v)
i += 1
}
itemTags(pos) = tagsArray
pos += 1
}
(tagsArray, itemTags)
}
private final class TagsArrayBuilder(sz: Int) extends TObjectIntProcedure[(String, String)] {
val result = new Array[Tag](sz)
var next = 0
def execute(k: (String, String), v: Int): Boolean = {
result(next) = Tag(interner.intern(k._1), interner.intern(k._2), v)
next += 1
true
}
}
private final class TagIndexBuilder(sz: Int) extends TObjectIntProcedure[(String, String)] {
val result = new Array[(String, String)](sz)
def execute(k: (String, String), v: Int): Boolean = {
result(v - 1) = interner.intern(k._1) -> interner.intern(k._2)
true
}
}
private final class TagListBuilder extends TIntIntProcedure {
val list = List.newBuilder[Tag]
def execute(k: Int, v: Int): Boolean = {
val t = tagIndex(k)
list += Tag(t.key, t.value, v)
true
}
def result: List[Tag] = list.result
}
private final class FindTagsBuilder(size: Int) extends TIntIntProcedure {
val buffer = new Array[Tag](size)
var next = 0
def execute(k: Int, v: Int): Boolean = {
val t = tagIndex(k)
buffer(next) = Tag(t.key, t.value, v)
next += 1
true
}
def result: Array[Tag] = buffer
}
private[index] def findImpl(query: Query, offset: Int, andSet: Option[LazySet]): LazySet = {
import com.netflix.atlas.core.model.Query._
query match {
case And(q1, q2) => and(q1, q2, offset, andSet)
case Or(q1, q2) => or(q1, q2, offset, andSet)
case Not(q) => all.diff(findImpl(q, offset, None))
case Equal(k, v) => equal(k, v, offset)
case GreaterThan(k, v) => greaterThan(k, v, false)
case GreaterThanEqual(k, v) => greaterThan(k, v, true)
case LessThan(k, v) => lessThan(k, v, false)
case LessThanEqual(k, v) => lessThan(k, v, true)
case q: In => findImpl(q.toOrQuery, offset, andSet)
case q: PatternQuery => strPattern(q, offset, andSet)
case HasKey(k) => hasKey(k, offset)
case True => all
case False => LazySet.empty
}
}
private def and(q1: Query, q2: Query, offset: Int, andSet: Option[LazySet]): LazySet = {
val s1 = findImpl(q1, offset, andSet)
if (s1.isEmpty) LazySet.empty else {
val s = if (andSet.isDefined) Some(andSet.get.intersect(s1)) else Some(s1)
val s2 = findImpl(q2, offset, s)
if (s1.isInstanceOf[BitMaskSet]) {
s1.asInstanceOf[BitMaskSet].mutableIntersect(s2)
s1
} else if (s2.isInstanceOf[BitMaskSet]) {
s2.asInstanceOf[BitMaskSet].mutableIntersect(s1)
s2
} else {
s1.intersect(s2)
}
}
}
private def or(q1: Query, q2: Query, offset: Int, andSet: Option[LazySet]): LazySet = {
val s1 = findImpl(q1, offset, andSet)
val s2 = findImpl(q2, offset, andSet)
if (s1.isInstanceOf[BitMaskSet]) {
s1.asInstanceOf[BitMaskSet].mutableUnion(s2)
s1
} else if (s2.isInstanceOf[BitMaskSet]) {
s2.asInstanceOf[BitMaskSet].mutableUnion(s1)
s2
} else {
s1.union(s2)
}
}
private def equal(k: String, v: String, offset: Int): LazySet = {
val internedK = interner.intern(k)
val vidx = itemIndex.get(internedK)
if (vidx == null) LazySet.empty else {
val internedV = interner.intern(v)
val matchSet = vidx.get(internedV)
if (matchSet == null) LazySet.empty else matchSet.offset(offset).workingCopy
}
}
private def greaterThan(k: String, v: String, orEqual: Boolean): LazySet = {
val internedK = interner.intern(k)
val vidx = itemIndex.get(internedK)
if (vidx == null) LazySet.empty else {
val set = LazySet.emptyBitMaskSet
val tag = Tag(internedK, v, -1)
var i = tagOffset(tag)
// Skip if equal
if (!orEqual && i < tagIndex.length && tagIndex(i).key == internedK && tagIndex(i).value == v) {
i += 1
}
// Data is sorted, no need to perform a check for each entry if key matches
while (i < tagIndex.length && tagIndex(i).key == internedK) {
set.mutableUnion(vidx.get(tagIndex(i).value))
i += 1
}
set
}
}
private def lessThan(k: String, v: String, orEqual: Boolean): LazySet = {
val internedK = interner.intern(k)
val vidx = itemIndex.get(internedK)
if (vidx == null) LazySet.empty else {
val set = LazySet.emptyBitMaskSet
val tag = Tag(internedK, v, -1)
var i = tagOffset(tag)
// Skip if equal
if (!orEqual && i >= 0 && tagIndex(i).key == internedK && tagIndex(i).value == v) {
i -= 1
}
// Data is sorted, no need to perform a check for each entry if key matches
while (i >= 0 && tagIndex(i).key == internedK) {
set.mutableUnion(vidx.get(tagIndex(i).value))
i -= 1
}
set
}
}
private def strPattern(q: Query.PatternQuery, offset: Int, andSet: Option[LazySet]): LazySet = {
val internedK = interner.intern(q.k)
val vidx = itemIndex.get(internedK)
if (vidx == null) LazySet.empty else {
val set = LazySet.emptyBitMaskSet
if (q.pattern.prefix.isDefined) {
val prefix = q.pattern.prefix.get
val tag = Tag(internedK, prefix, -1)
var i = tagOffset(tag)
while (i < tagIndex.length &&
tagIndex(i).key == internedK &&
tagIndex(i).value.startsWith(prefix)) {
if (q.check(tagIndex(i).value)) {
set.mutableUnion(vidx.get(tagIndex(i).value))
}
i += 1
}
} else {
if (andSet.isDefined) {
val s = andSet.get
val iter = s.iterator
while (iter.hasNext) {
val i = iter.next()
val item = items(i)
val value = item.tags.get(internedK)
if (value.isDefined && q.check(value.get)) {
set.mask.set(i)
}
}
} else {
val entries = vidx.entrySet.iterator
while (entries.hasNext) {
val entry = entries.next()
if (q.check(entry.getKey))
set.mutableUnion(entry.getValue.offset(offset))
}
}
}
set
}
}
private def hasKey(k: String, offset: Int): LazySet = {
val internedK = interner.intern(k)
val matchSet = keyIndex.get(internedK)
if (matchSet == null) LazySet.empty else matchSet.offset(offset).workingCopy
}
private def itemOffset(v: String): Int = {
if (v == null || v == "") 0 else {
val pos = util.Arrays.binarySearch(itemIds.asInstanceOf[Array[AnyRef]], v)
if (pos < 0) -pos - 1 else pos
}
}
private def tagOffset(v: Tag): Int = {
if (v == null || v.key == "") 0 else {
val pos = util.Arrays.binarySearch(tagIndex.asInstanceOf[Array[AnyRef]], v)
if (pos == -1) 0 else if (pos < -1) -pos - 1 else pos
}
}
def findTags(query: TagQuery): List[Tag] = {
import com.netflix.atlas.core.model.Query._
val q = query.query
val k = query.key
val offset = tagOffset(query.offsetTag)
if (q.isDefined || k.isDefined) {
// If key is restricted add a has query to search
val finalQ = if (!k.isDefined) q.get else {
if (q.isDefined) And(HasKey(k.get), q.get) else HasKey(k.get)
}
// Count how many tags match the query
val counts = new TIntIntHashMap
val itemSet = findImpl(finalQ, 0, None)
val iter = itemSet.iterator
while (iter.hasNext) {
val tags = itemTags(iter.next())
var i = 0
while (i < tags.length) {
val t = tags(i)
if (t >= offset && (!k.isDefined || tagIndex(t).key == k.get)) {
counts.adjustOrPutValue(t, 1, 1)
}
i += 1
}
}
// Create array with final set of matching tags
val builder = new FindTagsBuilder(counts.size)
counts.forEachEntry(builder)
val result = builder.result
util.Arrays.sort(result.asInstanceOf[Array[AnyRef]])
// Create list based on limit per page
val limit = math.min(query.limit, result.length)
val listBuilder = List.newBuilder[Tag]
var i = 0
while (i < limit) {
listBuilder += result(i)
i += 1
}
listBuilder.result
} else {
// If no query, use precomputed array of all tags
val limit = math.min(query.extendedLimit(offset), tagIndex.length)
val listBuilder = List.newBuilder[Tag]
var i = offset
while (i < limit) {
listBuilder += tagIndex(i)
i += 1
}
listBuilder.result
}
}
def findKeys(query: TagQuery): List[TagKey] = {
findValues(query).map { v => TagKey(v, -1) }
}
def findValues(query: TagQuery): List[String] = {
val k = query.key
if (k.isDefined) {
val offset = k.get + "," + query.offset
val tags = findTags(TagQuery(query.query, k, offset, query.limit))
tags.map(_.value)
} else {
import scala.collection.JavaConversions._
val tags = findTags(TagQuery(query.query))
val dedupedKeys = new java.util.HashSet[String]
tags.foreach { t =>
if (t.key > query.offset) dedupedKeys.add(t.key)
}
dedupedKeys.toList.sortWith(_ < _).take(query.limit)
}
}
def findItems(query: TagQuery): List[T] = {
val offset = itemOffset(query.offset)
val limit = query.limit
val list = List.newBuilder[T]
val intSet = query.query.fold(all.offset(offset))(q => findImpl(q, offset, None))
val iter = intSet.iterator
var count = 0
while (iter.hasNext && count < limit) {
list += items(iter.next())
count += 1
}
list.result
}
val size: Int = items.length
}
| jasimmk/atlas | atlas-core/src/main/scala/com/netflix/atlas/core/index/LazyTagIndex.scala | Scala | apache-2.0 | 16,369 |
package breeze.linalg
/**
* Marker trait for exceptions thrown from the [[breeze.linalg]] package.
*/
trait LinearAlgebraException extends RuntimeException
/**
* Exception thrown if a routine has not converged.
*/
class NotConvergedException(val reason: NotConvergedException.Reason, msg: String = "")
extends RuntimeException(msg) with LinearAlgebraException
object NotConvergedException {
trait Reason
object Iterations extends Reason
object Divergence extends Reason
object Breakdown extends Reason
}
class MatrixNotSymmetricException extends IllegalArgumentException("Matrix is not symmetric") with LinearAlgebraException
class MatrixNotSquareException extends IllegalArgumentException("Matrix is not square") with LinearAlgebraException
class MatrixEmptyException extends IllegalArgumentException("Matrix is empty") with LinearAlgebraException
/**
* Thrown when trying to solve using a singular matrix.
*
* @author dramage, dlwh
*/
class MatrixSingularException(msg : String="") extends RuntimeException(msg) with LinearAlgebraException
| wavelets/breeze | src/main/scala/breeze/linalg/LinearAlgebraException.scala | Scala | apache-2.0 | 1,069 |
package org.dohrm.toolkit.utils
import com.github.tototoshi.slick.{PostgresJodaSupport, GenericJodaSupport}
import org.dohrm.toolkit.actor.response.{InvalidRequestError, ExceptionError, Error}
import org.dohrm.toolkit.context.{ConfigContext, JdbcConfig, JdbcContext}
import org.postgresql.util.PSQLException
import slick.driver.JdbcProfile
import slick.jdbc.JdbcBackend
/**
* @author michaeldohr
* @since 29/05/16
*/
trait PostgresSupport extends JdbcContext {
self: ConfigContext =>
private val DbConfig = config.getConfig("postgres")
private lazy val lazyDb = JdbcBackend.Database.forURL(
url = s"jdbc:postgresql://${DbConfig.getString("url")}:${DbConfig.getString("port")}/${DbConfig.getString("database")}",
user = DbConfig.getString("user"),
password = DbConfig.getString("password"),
driver = DbConfig.getString("driver"),
keepAliveConnection = DbConfig.getBoolean("keepAliveConnection")
)
override implicit lazy val jdbcConfig: JdbcConfig = new JdbcConfig {
override val jodaSupport: GenericJodaSupport = PostgresJodaSupport
override def db: JdbcBackend.DatabaseDef = lazyDb
override val driver: JdbcProfile = slick.driver.PostgresDriver
override def exceptionToErrorMapper: PartialFunction[Throwable, Error] = {
case e: PSQLException if e.getServerErrorMessage.getSQLState == "23505"=>
InvalidRequestError(Seq(s"${e.getServerErrorMessage.getTable.toLowerCase}.unique_violation"))
case e: PSQLException =>
println(e.getServerErrorMessage)
ExceptionError(e)
}
}
}
| dohr-michael/storyline | src/main/scala/org/dohrm/toolkit/utils/PostgresSupport.scala | Scala | mit | 1,574 |
package at.magiun.core.service
import java.util.concurrent.atomic.AtomicInteger
import at.magiun.core.model.BlockType.{AddColumn, DataSetReader, DatabaseReader, DropColumn, FileReader, FileWriter, LinearRegression}
import at.magiun.core.model._
import org.apache.spark.sql.SparkSession
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
class ExecutionService(
spark: SparkSession,
blockService: BlockService,
val executionContext: ExecutionContext,
dataSetService: DataSetService
) {
private val idGenerator = new AtomicInteger(1)
def execute(execution: Execution): Future[ExecutionResult] = {
blockService.find(execution.blockId)
.map(finalBlock => {
val blocks = loadBlocks(finalBlock)
val output: StageOutput = execute(blocks, finalBlock)
output match {
case DatasetOutput(ds) => ds.cache()
}
val execId = getNextId
executionContext.registerExecution(execId, output)
ExecutionResult(execId)
})
}
// HACK: this should be done asynchronously but it turns out is not that easy
private def loadBlocks(block: Block): Map[String, Block] = {
if (block.inputs.isEmpty) {
Map(block.id -> block)
} else {
val blockIds = block.inputs.map(_.blockId)
val blocks = blockIds.map(blockService.find).map(Await.result(_, 2.seconds))
val blockMap = blocks.map(loadBlocks).foldLeft(Map[String, Block]()) { (acc, m) => acc ++ m }
blockMap + ((block.id, block))
}
}
def execute(blocks: Map[String, Block], finalBlock: Block): StageOutput = {
val stage = buildStages(blocks, finalBlock)
stage.perform
}
private def buildStages(blocks: Map[String, Block], block: Block): Stage = {
block.`type` match {
case FileReader =>
new FileReaderStage(spark, block.params("fileName"))
case DataSetReader =>
new DataSetReaderStage(dataSetService, block.params("dataSetId"))
case DatabaseReader => ???
case FileWriter =>
val nextBlock = blocks(block.inputs.head.blockId)
val stage = buildStages(blocks, nextBlock)
new FileWriterStage(StageInput(stage), block.params("fileName"))
case DropColumn =>
val nextBlock = blocks(block.inputs.head.blockId)
val stage = buildStages(blocks, nextBlock)
new DropColumnStage(StageInput(stage), block.params("columnName"))
case AddColumn =>
val nextBlock = blocks(block.inputs.head.blockId)
val stage = buildStages(blocks, nextBlock)
new AddColumnStage(StageInput(stage), block.params("newColumnName"), block.params("expression"))
case LinearRegression => ???
}
}
private def getNextId: String = {
"mem-" + idGenerator.getAndIncrement()
}
}
| Mihai238/magiun | core/src/main/scala/at/magiun/core/service/ExecutionService.scala | Scala | apache-2.0 | 2,956 |
class C0
trait T { self: C0 =>
def test = {
object Local
class C2 {
class C1 {
Local
}
T.this.toString
new C1
}
new C2()
}
}
object Test extends C0 with T {
def main(args: Array[String]): Unit = {
test
}
}
| felixmulder/scala | test/files/run/t9920c.scala | Scala | bsd-3-clause | 268 |
object StaticQuery extends Query {
import Database.FilterExpr
def query(db: Seq[Product],
projections: Seq[Int]=Seq.empty,
filter: FilterExpr=null): Seq[Row] =
db.flatMap { case (name: String, birthYear: Int, dissertation: String) =>
if (birthYear < 1910 && name != null)
Some(DissertationRow(name=Some(name), dissertation=Some(dissertation)))
else
None
}
}
| devth/devth.github.com | _code/compiled-query/src/main/scala/StaticQuery.scala | Scala | mit | 424 |
package macroHList
import scala.language.experimental.macros
import scala.language.implicitConversions
import scala.language.existentials
import scala.language.higherKinds
import scala.reflect.macros.Context
import scala.reflect.ClassTag
import TypeOperators._
import Poly._
//object HList {
trait HList {
type Self <: HList
def length: Int
def isEmpty: Boolean
def nonEmpty = !isEmpty
def ::[E](e: E) = macro HList.prepend[Self, E]
def :+[E](e: E) = macro HList.append[Self, E]
def last: Any = macro HList.last[Self]
def reverse: Any = macro HList.reverse[Self]
def init: Any = macro HList.init[Self]
/** Concatenate two HLists together.
*/
def ++[L2 <: HList](l2: L2): Any = macro HList.++[Self, L2]
def updated[E](i: Int, e: E): Any = macro HList.updated[Self, E]
/** Tell whether the HList contains an element of type E or not.
*/
def containsType[E]: Boolean = macro HList.containsType[Self, E]
def contains[E](e: E): Boolean = macro HList.contains[Self, E]
def diff[L2 <: HList](l2: L2): Any = macro HList.diff[Self, L2]
/** Find first element of type E in this HList
*/
def findType[E]: Any = macro HList.findType[Self, E]
def find[E](f: E => Boolean): Any = macro HList.find[Self, E]
/** Filter the HList so only elements of type E remains.
* If E is an existential type, it is used as witness.
* Combined with TypeOperators, it enables to build complex filters,
* such as conjunctions, disjunctions, etc
*/
def filter[E]: Any = macro HList.filter[Self, E]
/** The complementary of filter
*/
def filterNot[E]: Any = macro HList.filterNot[Self, E]
/** Transform this HList by applying hf
*/
//def map[HF <: HList](hf: HF) = macro HList.map[Self, HF]
def map[HF](hf: HF) = macro HList.map[Self, HF]
def flatMap[HF](hf: HF) = macro HList.flatMap[Self, HF]
/** Flatten an HList of HLists to an HList.
*/
def flatten = macro HList.flatten[Self]
/** Get the i-th element of this HList. Only compile time known index is allowed.
*/
def apply(i: Int): Any = macro HList.getIndex[Self]
/** Returns the index of the first element of type E in this HList.
*/
def indexOfType[E]: Int = macro HList.indexOfType[Self, E]
def indexOfType[E](from: Int): Int = macro HList.indexOfTypeFrom[Self, E]
def indexOf[E](e: E): Int = macro HList.indexOf[Self, E]
def indexOf[E](e: E, from: Int): Int = macro HList.indexOfFrom[Self, E]
/** Returns the index of the last element of type E in this HList.
*/
def lastIndexOf[E]: Int = macro HList.lastIndexOf[Self, E]
def lastIndexOf[E](end: Int): Int = macro HList.lastIndexOfEnd[Self, E]
/** Take the first i elements of this HList. Only compile time known number is allowed
* as argument.
*/
def take(i: Int): Any = macro HList.take[Self]
def takeRight(i: Int): Any = macro HList.takeRight[Self]
def drop(i: Int): Any = macro HList.drop[Self]
def dropRight(i: Int): Any = macro HList.dropRight[Self]
def takeWhile[E]: Any = macro HList.takeWhile[Self, E]
def dropWhile[E]: Any = macro HList.dropWhile[Self, E]
def span[E]: Any = macro HList.span[Self, E]
def splitAt(i: Int): Any = macro HList.splitAt[Self]
/** Unzip an HList of tuples to a tuple of HLists. Does not compile if the HList
* does not only contains tuples.
*/
def unzip: Any = macro HList.unzip[Self]
/** Zip two HLists to an HList of tuples.
*/
def zip[L2 <: HList](l2: L2): Any = macro HList.zip[Self, L2]
def zipAll[L2 <: HList, E1, E2](l2: L2, e1: E2, e2: E2): Any = macro HList.zipAll[Self, L2, E1, E2]
def zipWithIndex: Any = macro HList.zipWithIndex[Self]
/** Transform this HList to a standard List of the least upper bound type of the HList elements.
*/
def toList: Any = macro HList.toList[Self]
/** Transform this HList to a standard Array of the least upper bound type of the HList elements.
*/
def toArray: Any = macro HList.toArray[Self]
def startsWith[L2 <: HList](l2: L2): Boolean = macro HList.startsWith[Self, L2]
def endsWith[L2 <: HList](l2: L2): Boolean = macro HList.endsWith[Self, L2]
def count[HF](hf: HF): Int = macro HList.count[Self, HF]
def mkString(start: String, sep: String, end: String): String
def mkString(sep: String): String
def mkString: String
def toTuple: Any = macro HList.toTuple[Self]
def toClass: Any = macro HList.toClass[Self]
def reduceLeft[F](f: F): Any = macro HList.reduceLeft[Self, F]
def reduceRight[F](f: F): Any = macro HList.reduceRight[Self, F]
def foldLeft[T, F](t: T)(f: F): Any = macro HList.foldLeft[Self, T, F]
def foldRight[T, F](t: T)(f: F): Any = macro HList.foldRight[Self, T, F]
}
case class ::[H, T <: HList](head: H, tail: T) extends HList {
type Self = H :: T
type Head = H
def length = 1 + tail.length
def isEmpty = false
def mkString(start: String, sep: String, end: String) =
if(tail.isEmpty)
start + head + end
else
start + head + sep + tail.mkString(sep) + end
def mkString(sep: String) = mkString("", sep, "")
def mkString = mkString("", "", "")
override def toString = mkString("HList(", ", ", ")")
}
trait HNil extends HList {
type Self = HNil
def length = 0
def isEmpty = true
def mkString(start: String, sep: String, end: String) = start + end
def mkString(sep: String) = ""
def mkString = ""
override def toString = "HNil"
}
case object HNil extends HNil {
}
class ListOps[A](l: List[A]) {
//def toHList: Any = macro HList.toHList[List[A]]
}
//implicit def mkListOps[A](l: List[A]): ListOps[A] = new ListOps[A](l)
trait HListContext extends RichContext {
import c.universe._
abstract class ListExpr(tree: Tree, tpe: Type) extends AbsExpr(tree, tpe) {
def head: AbsExpr
def tail: ListExpr
def ::(e: AbsExpr): ListExpr
def :+(e: AbsExpr): ListExpr
def ++(l: ListExpr): ListExpr
def reverse: ListExpr
def last: AbsExpr
def init: ListExpr
def contains(t: Type): Expr[Boolean]
def contains(e: AbsExpr): Expr[Boolean]
def diff(l: ListExpr): ListExpr
def filter(t: Type): ListExpr
def filterNot(t: Type): ListExpr
def find(t: Type): AbsExpr
def find(f: Type => Boolean): AbsExpr
def find(f: AbsExpr): AbsExpr
def apply(i: Expr[Int]): AbsExpr
def length: Expr[Int]
def indexOf(t: Type): Expr[Int]
def indexOf(t: Type, from: Expr[Int], offset: Expr[Int] = reify(0)): Expr[Int]
def indexOf(e: AbsExpr): Expr[Int]
def indexOf(e: AbsExpr, from: Expr[Int]): Expr[Int]
def lastIndexOf(t: Type): Expr[Int]
def lastIndexOf(t: Type, end: Expr[Int], offset: Expr[Int] = reify(0)): Expr[Int]
def take(i: Expr[Int]): ListExpr
def takeRight(i: Expr[Int]): ListExpr
def drop(i: Expr[Int]): ListExpr
def dropRight(i: Expr[Int]): ListExpr
def takeWhile(t: Type): ListExpr
def dropWhile(t: Type): ListExpr
def span(t: Type): TupleExpr
def splitAt(i: Expr[Int]): TupleExpr
def unzip: TupleExpr
def updated(i: Expr[Int], e: AbsExpr): ListExpr
def zip(l: ListExpr): ListExpr
def zipAll(l: ListExpr, e1: AbsExpr, e2: AbsExpr): ListExpr
def zipWithIndex: ListExpr
def toList: AbsExpr
def toArray: AbsExpr
def tupled: AbsExpr
def unify: ListExpr
def startsWith(l: ListExpr): Expr[Boolean]
def endsWith(l: ListExpr): Expr[Boolean]
//def map(hf: ListExpr): ListExpr
def map(hf: AbsExpr): ListExpr
def flatten: ListExpr
def flatMap(hf: AbsExpr): ListExpr
def foldLeft(e: AbsExpr)(l: AbsExpr): AbsExpr
def foldRight(e: AbsExpr)(l: AbsExpr): AbsExpr
def reduceLeft(l: AbsExpr): AbsExpr
def reduceRight(l: AbsExpr): AbsExpr
def reduce(f: AbsExpr): AbsExpr = {
val res = treeBuild.mkMethodCall(f.tree, trees)
AbsExpr(c.Expr(res))
}
def count(hf: AbsExpr): Expr[Int]
def toTuple: AbsExpr
def toClass: AbsExpr
def trees: List[Tree]
def tpes: List[Type]
}
object ListExpr {
def apply[T: WeakTypeTag](expr: Expr[T]): ListExpr = {
val tpe = tpeFromExpr(expr)
if(tpe <:< typeOf[HNil])
HNilExpr
else if(tpe <:< typeOf[_ :: _])
HListExpr(expr.tree, tpe)
else
sys.error("Unknown HList type " + tpe)
}
def apply(tree: Tree, tpe: Type): ListExpr = {
if(tpe <:< typeOf[HNil])
HNilExpr
else if(tpe <:< typeOf[_ :: _])
HListExpr(tree, tpe)
else
sys.error("Unknown HList type " + tpe)
}
def apply(expr: AbsExpr): ListExpr = ListExpr(expr.tree, expr.tpe)
}
class HListExpr(tree: Tree, tpe: Type) extends ListExpr(tree, tpe) {
val (headTpe, tailTpe) = tpe match {
case TypeRef(_, cons, List(h, t)) => (h ,t)
}
def head: AbsExpr = {
def genHead[H: WeakTypeTag, T <: HList: WeakTypeTag]: AbsExpr =
AbsExpr(reify(c.Expr[H :: T](tree).splice.head))
genHead(c.WeakTypeTag(headTpe), c.WeakTypeTag(tailTpe))
}
def tail: ListExpr = {
def genTail[H: WeakTypeTag, T <: HList: WeakTypeTag]: ListExpr =
ListExpr(reify(c.Expr[H :: T](tree).splice.tail))
genTail(c.WeakTypeTag(headTpe), c.WeakTypeTag(tailTpe))
}
def ::(e: AbsExpr): ListExpr = {
def genCons[E: WeakTypeTag, L <: HList: WeakTypeTag]: ListExpr =
ListExpr(reify(new ::(c.Expr[E](e.tree).splice, c.Expr[L](tree).splice)))
genCons(c.WeakTypeTag(e.tpe), c.WeakTypeTag(tpe))
}
def reverse: ListExpr = tail.reverse :+ head
def last: AbsExpr = reverse.head
def init: ListExpr = reverse.tail.reverse
def :+(e: AbsExpr) = head :: (tail :+ e)
def ++(l: ListExpr) = init ++ (last :: l)
def typeLookup(t: Type, r: Type): Tree = {
def replaceWildcard(t: Type, r: Type): Type = t match {
case ExistentialType(List(t1, _*), TypeRef(pre, sym, l)) => TypeRef(pre, sym, l.map(tt =>
if(tt =:= t1.asType.toType)
r
else
replaceWildcard(tt, r)
))
case _ => t
}
val hiding =
if(t.typeConstructor.takesTypeArgs)
replaceWildcard(t, r)
else
appliedType(typeOf[_ <:< _], List(r, t))
c.echo(c.enclosingPosition, "Looking for " + hiding)
c.inferImplicitValue(hiding)
}
def contains(t: Type): Expr[Boolean] = {
val found = typeLookup(t, head.tpe)
if(found != EmptyTree)
reify(true)
else
tail.contains(t)
}
def contains(e: AbsExpr): Expr[Boolean] = {
reify(c.Expr[Any](head.tree).splice == c.Expr[Any](e.tree).splice ||
c.Expr[Boolean](tail.contains(e).tree).splice)
}
// if we have diff, we have distinct
def diff(l: ListExpr): ListExpr = ???
/*{
if()
tail.diff(l)
else
head :: tail.diff(l)
}*/
// For internal purposes
def find(f: Type => Boolean): AbsExpr = {
if(f(head.tpe))
head
else
tail.find(f)
}
def find(t: Type): AbsExpr =
find((t: Type) => {
lazy val res = typeLookup(t, head.tpe) == EmptyTree
res
})
def find(f: AbsExpr): AbsExpr = {
val tpe = f.tpe match {
case TypeRef(_, _, List(e, b)) => e
}
def genFind[E: WeakTypeTag] =
AbsExpr(reify(c.Expr[List[E]](filter(tpe).toList.tree).splice.find(c.Expr[E => Boolean](f.tree).splice)))
genFind(c.WeakTypeTag(tpe))
}
def filter(t: Type): ListExpr = {
val found = typeLookup(t, head.tpe)
if(found != EmptyTree)
head :: tail.filter(t)
else
tail.filter(t)
}
def filterNot(t: Type): ListExpr = {
val found = typeLookup(t, head.tpe)
if(found == EmptyTree)
head :: tail.filterNot(t)
else
tail.filterNot(t)
}
def apply(i: Expr[Int]): AbsExpr = {
if(c.eval(c.Expr[Int](c.resetAllAttrs(i.tree.duplicate))) == 0)
head
else
tail.apply(reify(i.splice - 1))
}
def indexOf(t: Type): Expr[Int] = indexOf(t, reify(0))
def indexOf(t: Type, from: Expr[Int], offset: Expr[Int] = reify(0)): Expr[Int] = {
if(c.eval(c.Expr[Int](c.resetAllAttrs(reify(offset.splice - from.splice).tree))) >= 0) {
val found = typeLookup(t, head.tpe)
if(found != EmptyTree)
return offset
}
tail.indexOf(t, from, reify(offset.splice + 1))
}
def indexOf(e: AbsExpr, from: Expr[Int]): Expr[Int] = {
def genIndexOf[L: WeakTypeTag, E: WeakTypeTag] =
reify(c.Expr[List[L]](toList.tree).splice.indexOf(c.Expr[E](e.tree).splice, from.splice))
//c.echo(c.enclosingPosition, "e.tpe " + e.tpe)
genIndexOf(c.WeakTypeTag(lub(tpes)), c.WeakTypeTag(e.tpe))
}
def indexOf(e: AbsExpr): Expr[Int] = indexOf(e, reify(0))
def lastIndexOf(t: Type, end: Expr[Int], offset: Expr[Int] = reify(0)): Expr[Int] = {
val i = reverse.indexOf(t, reify(length.splice - 1 - end.splice), offset)
reify{
if(i.splice >= 0)
length.splice - 1 - i.splice
else
i.splice
}
}
def lastIndexOf(t: Type): Expr[Int] = lastIndexOf(t, reify(length.splice - 1))
def length: Expr[Int] = reify(1 + tail.length.splice)
def take(i: Expr[Int]): ListExpr = {
if(c.eval(c.Expr[Int](c.resetAllAttrs(i.tree.duplicate))) <= 0)
HNilExpr
else
head :: tail.take(reify(i.splice - 1))
}
def takeRight(i: Expr[Int]): ListExpr = reverse.take(i).reverse
def drop(i: Expr[Int]): ListExpr = takeRight(reify(length.splice - i.splice))
def dropRight(i: Expr[Int]): ListExpr = take(reify(length.splice - i.splice))
def takeWhile(t: Type): ListExpr = {
val found = typeLookup(t, head.tpe)
if(found != EmptyTree)
head :: tail.takeWhile(t)
else
HNilExpr
}
def dropWhile(t: Type): ListExpr = {
val found = typeLookup(t, head.tpe)
if(found != EmptyTree)
tail.dropWhile(t)
else
this
}
def span(t: Type): TupleExpr = TupleExpr(takeWhile(t), dropWhile(t))
def splitAt(i: Expr[Int]): TupleExpr = TupleExpr(take(i), drop(i))
def unzip: TupleExpr = {
val headTup = TupleExpr(head)
val tailTup = tail.unzip
TupleExpr(headTup(1) :: ListExpr(tailTup(1)),
headTup(2) :: ListExpr(tailTup(2)))
}
def updated(i: Expr[Int], e: AbsExpr): ListExpr = {
if(c.eval(c.Expr[Int](c.resetAllAttrs(i.tree.duplicate))) == 0)
e :: tail
else
head :: tail.updated(reify(i.splice - 1), e)
}
def zip(l: ListExpr): ListExpr = {
if(l == HNilExpr)
HNilExpr
else
TupleExpr(head, l.head) :: tail.zip(l.tail)
}
def zipAll(l: ListExpr, e1: AbsExpr, e2: AbsExpr): ListExpr = {
if(l == HNilExpr)
TupleExpr(head, e1) :: tail.zipAll(HNilExpr, e1, e2)
else
TupleExpr(head, l.head) :: tail.zipAll(l.tail, e1, e2)
}
protected def reverseIndexes: ListExpr = tail match {
case HNilExpr => AbsExpr(reify(0)) :: HNilExpr
case hltail @ HListExpr(_, _) => reify(length.splice - 1) :: hltail.reverseIndexes
}
def zipWithIndex: ListExpr = zip(reverseIndexes.reverse)
def toList: AbsExpr = {
def genList[A: WeakTypeTag]: AbsExpr =
AbsExpr(reify(c.Expr[A](head.tree).splice :: c.Expr[List[A]](tail.toList.tree).splice))
genList(c.WeakTypeTag(lub(tpes)))
}
def toArray: AbsExpr = {
val found = c.inferImplicitValue(appliedType(typeOf[ClassTag[_]].typeConstructor, List(lub(tpes))))
if(found == EmptyTree)
sys.error("No ClassTag found for " + lub(tpes))
def genArray[A: WeakTypeTag]: AbsExpr =
AbsExpr(reify(c.Expr[List[A]](toList.tree).splice.toArray[A](c.Expr[ClassTag[A]](found).splice)))
genArray(c.WeakTypeTag(lub(tpes)))
}
def tupled: AbsExpr = toTuple
def unify: ListExpr = ??? // reify(toList.splice).toHList
def startsWith(l: ListExpr): Expr[Boolean] = {
if(l == HNilExpr)
reify(true)
else
reify(c.Expr[Any](head.tree).splice == c.Expr[Any](l.head.tree).splice &&
c.Expr[Boolean](tail.startsWith(l.tail).tree).splice)
}
def endsWith(l: ListExpr): Expr[Boolean] = reverse.startsWith(l.reverse)
def map(hf: AbsExpr): ListExpr = PolyExpr(hf).apply(List(head)) :: tail.map(hf)
def count(hf: AbsExpr): Expr[Int] =
reify(
{
if(c.Expr[Boolean](PolyExpr(hf).apply(List(head)).tree).splice)
1
else
0
} + tail.count(hf).splice
)
//reify(c.Expr[List[Boolean]](map(hf).toList.tree).splice.map(b => if(b) 1 else 0).reduceLeft(_ + _))
def flatten: ListExpr = ListExpr(head.tree, head.tpe) ++ tail.flatten
def flatMap(hf: AbsExpr): ListExpr = map(hf).flatten
def reduceLeft(hf: AbsExpr): AbsExpr =
if(tail == HNilExpr)
head
else
(PolyExpr(hf).apply(List(head, tail.head)) :: tail.tail).reduceLeft(hf)
def reduceRight(hf: AbsExpr): AbsExpr = reverse.reduceLeft(PolyExpr(hf).reverse)
def foldLeft(e: AbsExpr)(f: AbsExpr): AbsExpr = (e :: this).reduceLeft(f)
def foldRight(e: AbsExpr)(f: AbsExpr): AbsExpr = (this :+ e).reduceRight(f)
def trees: List[Tree] = head.tree :: tail.trees
def tpes: List[Type] = head.tpe :: tail.tpes
def toTuple: AbsExpr = {
val length = c.eval(c.Expr[Int](this.length.tree))
// Get tuple symbol we're interested in
val tupSym = rootMirror.staticModule("scala.Tuple" + length)
val tupTree = treeBuild.mkMethodCall(tupSym, newTermName("apply"), tpes, trees)
AbsExpr(c.Expr(tupTree))
}
/** Recursive class building based on companion object apply method
* which is supposed to be stored at the HList head, while the
* arguments constitute the tail.
*/
def toClass: AbsExpr = {
val applySym = head.tpe.member(newTermName("apply")).asMethod
def genArgTrees(l: ListExpr): List[Tree] = {
if(l == HNilExpr)
Nil
else {
if(!(l.head.tpe <:< typeOf[HList]))
l.head.tree :: genArgTrees(l.tail)
else
ListExpr(l.head).toClass.tree :: genArgTrees(l.tail)
}
}
val argTrees = genArgTrees(tail)
c.info(NoPosition, "Generated arg trees:\\n" + argTrees.mkString("\\n"),
System.getProperty("force", "false").toBoolean)
AbsExpr(treeBuild.mkMethodCall(applySym, argTrees), applySym.returnType)
}
}
object HListExpr {
def apply[T](expr: Expr[T]): HListExpr = new HListExpr(expr.tree, tpeFromExpr(expr))
def apply(tree: Tree, tpe: Type): HListExpr = new HListExpr(tree, tpe)
def unapply(hl: HListExpr): Option[(Tree, Type)] = Some((hl.tree, hl.tpe))
}
implicit def exprToHList[H, T <: HList](expr: Expr[H :: T]): HListExpr =
HListExpr(expr.tree, tpeFromExpr(expr))
/*
*/
case object HNilExpr extends ListExpr(reify(HNil).tree, typeOf[HNil]) {
def ::(e: AbsExpr): ListExpr = {
def genCons[E: WeakTypeTag]: ListExpr =
HListExpr(reify(new ::(c.Expr[E](e.tree).splice, HNil)))
genCons(c.WeakTypeTag(e.tpe))
}
def :+(e: AbsExpr): ListExpr = ::(e)
def ++(l: ListExpr): ListExpr = l
def tail: ListExpr = sys.error("Tail of HNil does not exist")
def head: AbsExpr = sys.error("Head of HNil does not exist")
def reverse: ListExpr = HNilExpr
def last: AbsExpr = sys.error("Last of HNil does not exist")
def init: ListExpr = sys.error("Init of HNil does not exist")
def contains(t: Type): Expr[Boolean] = reify(false)
def contains(e: AbsExpr): Expr[Boolean] = reify(false)
def diff(l: ListExpr): ListExpr = HNilExpr
def find(t: Type): AbsExpr = sys.error("Element of type " + t + " not found")
def find(f: Type => Boolean): AbsExpr = sys.error("Element not found")
def find(f: AbsExpr): AbsExpr = reify(None)
def filter(t: Type): ListExpr = HNilExpr
def filterNot(t: Type): ListExpr = HNilExpr
def apply(i: Expr[Int]): AbsExpr = sys.error("HNil has no element")
def indexOf(t: Type): Expr[Int] = reify(-1)
def indexOf(t: Type, from: Expr[Int], offset: Expr[Int] = reify(0)): Expr[Int] = reify(-1)
def indexOf(e: AbsExpr): Expr[Int] = reify(-1)
def indexOf(e: AbsExpr, from: Expr[Int]) = reify(-1)
def lastIndexOf(t: Type): Expr[Int] = reify(-1)
def lastIndexOf(t: Type, from: Expr[Int], offset: Expr[Int] = reify(0)): Expr[Int] = reify(-1)
def length: Expr[Int] = reify(0)
def take(i: Expr[Int]): ListExpr = HNilExpr
def takeRight(i: Expr[Int]): ListExpr = HNilExpr
def drop(i: Expr[Int]): ListExpr = HNilExpr
def dropRight(i: Expr[Int]): ListExpr = HNilExpr
def takeWhile(t: Type): ListExpr = HNilExpr
def dropWhile(t: Type): ListExpr = HNilExpr
def unzip: TupleExpr = TupleExpr(HNilExpr, HNilExpr)
def updated(i: Expr[Int], e: AbsExpr): ListExpr = sys.error("HNil can not be updated")
def span(t: Type): TupleExpr = TupleExpr(HNilExpr, HNilExpr)
def splitAt(i: Expr[Int]): TupleExpr = TupleExpr(HNilExpr, HNilExpr)
def zip(l: ListExpr): ListExpr = HNilExpr
def zipAll(l: ListExpr, e1: AbsExpr, e2: AbsExpr): ListExpr =
if(l == HNilExpr)
HNilExpr
else
TupleExpr(e2, l.head) :: zipAll(l.tail, e1, e2)
def zipWithIndex: ListExpr = HNilExpr
def toList: AbsExpr = AbsExpr(reify(Nil))
def toArray: AbsExpr = sys.error("HNil can not convert to Array")
def tupled: AbsExpr = sys.error("HNil can not be tupled")
def unify: ListExpr = HNilExpr
def startsWith(l: ListExpr): Expr[Boolean] =
if(l == HNilExpr) reify(true) else reify(false)
def endsWith(l: ListExpr): Expr[Boolean] =
if(l == HNilExpr) reify(true) else reify(false)
//def map(hf: ListExpr): ListExpr = HNilExpr
def map(hf: AbsExpr): ListExpr = HNilExpr
def flatten: ListExpr = HNilExpr
def flatMap(hf: AbsExpr): ListExpr = HNilExpr
def foldLeft(e: AbsExpr)(f: AbsExpr): AbsExpr = e
def foldRight(e: AbsExpr)(f: AbsExpr): AbsExpr = e
def reduceLeft(f: AbsExpr): AbsExpr = sys.error("HNil can not be reduced")
def reduceRight(f: AbsExpr): AbsExpr = sys.error("HNil can not be reduced")
def count(hf: AbsExpr): Expr[Int] = reify(0)
def toTuple: AbsExpr = sys.error("HNil can not be converted to a tuple")
def toClass: AbsExpr = sys.error("HNil can not be converted to a class instance")
def trees: List[Tree] = Nil
def tpes: List[Type] = Nil
override def toString = "HNilExpr"
}
def fromTraversable(list: AbsExpr): ListExpr = {
def genSize[L <: Traversable[_]: WeakTypeTag] =
reify(c.Expr[L](list.tree).splice.size)
val size = genSize(c.WeakTypeTag(list.tpe))
if(c.eval(c.Expr[Int](c.resetAllAttrs(size.tree.duplicate))) > 0) {
def genList[L <: Traversable[_]: WeakTypeTag] =
AbsExpr(reify(c.Expr[L](list.tree).splice.head)) ::
fromTraversable(AbsExpr(reify(c.Expr[L](list.tree).splice.tail)))
//reify(c.Expr[Int](size.tree).splice - 1))
genList(c.WeakTypeTag(list.tpe))
}
else
HNilExpr
}
def fromTuple(tup: AbsExpr): ListExpr =
TupleExpr.fromTuple(tup).exprs.foldRight(HNilExpr: ListExpr)(_ :: _)
/** Generate an HList with class constructor at its head and constructor arguments as tail.
* TODO: Recursive on args, as long as it can, i.e. a unique apply and unapply function is found
* in companion object.
*
* FIXME: Crashes the compiler if function with wildcard is passed as argument
*/
def fromClass(clazz: AbsExpr, unapply0: AbsExpr): ListExpr = {
// "reducing" functions to values
def normUnapply[F: WeakTypeTag] =
AbsExpr(reify{val unapply = c.Expr[F](unapply0.tree).splice; unapply})
val unapply = normUnapply(c.WeakTypeTag(unapply0.tpe))
c.info(NoPosition, "Normalized unapply to: " + unapply, System.getProperty("force", "false").toBoolean)
// Unapplying the clazz
def genArgs[C: WeakTypeTag, T: WeakTypeTag] =
AbsExpr(reify(
c.Expr[C => Option[T]](unapply.tree).splice.apply(c.Expr[C](clazz.tree).splice).get
))
val tupTpe = unapply.tpe match {
case TypeRef(_, _, fun) => fun.last match {
case TypeRef(_, _, List(t)) => t
}
}
val args = genArgs(c.WeakTypeTag(clazz.tpe), c.WeakTypeTag(tupTpe))
c.info(NoPosition, "Found class unapply args: " + args, System.getProperty("force", "false").toBoolean)
val argsList = fromTuple(args)
c.info(NoPosition, "Converted tuple to ListExpr: " + argsList,
System.getProperty("force", "false").toBoolean)
val clazzExpr = AbsExpr(c.Expr(treeBuild.mkAttributedIdent(clazz.tpe.typeSymbol.companionSymbol)))
clazzExpr :: argsList
}
/** FIXME: case class with a single field
*/
def fromClass(clazz: AbsExpr): Option[ListExpr] = {
// getting companion object symbol
val companionSym = clazz.tpe.typeSymbol.companionSymbol
if(companionSym == NoSymbol) {
c.info(NoPosition, "Haven't found a companion object for: " + clazz.tpe,
System.getProperty("force", "false").toBoolean)
return None
}
// getting companion object type
val companionTpe = SingleType(NoPrefix, clazz.tpe.typeSymbol.companionSymbol)
// gettings unapply methods of the companion object
val unapplyMethod = companionTpe.member(newTermName("unapply"))
if(unapplyMethod == NoSymbol) {
c.info(NoPosition, "Haven't found an unapply method for: " + clazz.tpe,
System.getProperty("force", "false").toBoolean)
return None
}
c.info(NoPosition, "Found unapply method with type signature " + unapplyMethod.asMethod.typeSignature,
System.getProperty("force", "false").toBoolean)
val argsOption = c.Expr(treeBuild.mkMethodCall(unapplyMethod, List(clazz.tree)))
val clazzTypeParams = clazz.tpe match {
case TypeRef(_, _, params) => params
case _ => Nil
}
c.info(NoPosition, "Found class parameters: " + clazzTypeParams,
System.getProperty("force", "false").toBoolean)
val argsList = unapplyMethod.asMethod.returnType match {
case TypeRef(_, _, List(returnTpe)) => { // This is an option
def genArgs[T: WeakTypeTag] =
AbsExpr(reify(c.Expr[Option[T]](argsOption.tree).splice.get))
val args = genArgs(c.WeakTypeTag(appliedType(returnTpe, clazzTypeParams)))
fromTuple(args)
}
case t if t =:= typeOf[Boolean] => HNilExpr
}
//c.info(NoPosition, "Generated args from unapply: " + args,
// System.getProperty("force", "false").toBoolean)
//val argsList = fromTuple(args)
// Attempt to further hlistify argsList
def genArgsClass(l: ListExpr): ListExpr = {
if(l == HNilExpr)
l
else {
fromClass(l.head).getOrElse(l.head) :: genArgsClass(l.tail)
}
}
val clazzExpr = AbsExpr(c.Expr(treeBuild.mkAttributedIdent(clazz.tpe.typeSymbol.companionSymbol)))
Some(clazzExpr :: genArgsClass(argsList))
}
}
object HList {
/** Enriched macro context with HList useful reification functions
*/
def hListContext(c0: Context) =
new {
val c: c0.type = c0
} with HListContext
/** Now mapping functions working on AbsExpr to macro implementations
* which are working on plain Expr
*/
def prepend[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(e: c.Expr[E]) = {
val hl = hListContext(c)
(hl.AbsExpr(e) :: hl.ListExpr(c.Expr[L](c.prefix.tree))).toExpr
}
def append[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(e: c.Expr[E]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)) :+ hl.AbsExpr(e)).toExpr
}
def reverse[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).reverse.toExpr
def last[L <: HList: c.WeakTypeTag](c: Context): c.Expr[Any] =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).last.toExpr
def init[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).init.toExpr
def ++[L <: HList: c.WeakTypeTag, L2 <: HList: c.WeakTypeTag](c: Context)(l2: c.Expr[L2]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).++(hl.ListExpr(l2))).toExpr
}
def updated[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(i: c.Expr[Int], e: c.Expr[E]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).updated(i, hl.AbsExpr(e))).toExpr
}
def containsType[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).contains(c.weakTypeOf[E])
def contains[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(e: c.Expr[E]) = {
val hl = hListContext(c)
hl.ListExpr(c.Expr[L](c.prefix.tree)).contains(hl.AbsExpr(e))
}
def diff[L <: HList: c.WeakTypeTag, L2 <: HList: c.WeakTypeTag](c: Context)(l2: c.Expr[L2]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).diff(hl.ListExpr(l2))).toExpr
}
def findType[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).find(c.weakTypeOf[E]).toExpr
def find[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(f: c.Expr[E => Boolean]) = {
val hl = hListContext(c)
hl.ListExpr(c.Expr[L](c.prefix.tree)).find(hl.AbsExpr(f)).toExpr
}
def filter[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).filter(c.weakTypeOf[E]).toExpr
def filterNot[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).filterNot(c.weakTypeOf[E]).toExpr
def map[L <: HList: c.WeakTypeTag, HF: c.WeakTypeTag](c: Context)(hf: c.Expr[HF]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).map(hl.AbsExpr(hf))).toExpr
}
def flatMap[L <: HList: c.WeakTypeTag, HF: c.WeakTypeTag](c: Context)(hf: c.Expr[HF]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).flatMap(hl.AbsExpr(hf))).toExpr
}
def flatten[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).flatten.toExpr
def getIndex[L <: HList: c.WeakTypeTag](c: Context)(i: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).apply(i).toExpr
def indexOfType[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).indexOf(c.weakTypeOf[E])
def indexOfTypeFrom[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(from: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).indexOf(c.weakTypeOf[E], from)
def indexOf[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(e: c.Expr[E]) = {
val hl = hListContext(c)
hl.ListExpr(c.Expr[L](c.prefix.tree)).indexOf(hl.AbsExpr(e))
}
def indexOfFrom[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(e: c.Expr[E], from: c.Expr[Int]) = {
val hl = hListContext(c)
hl.ListExpr(c.Expr[L](c.prefix.tree)).indexOf(hl.AbsExpr(e), from)
}
def lastIndexOf[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).lastIndexOf(c.weakTypeOf[E])
def lastIndexOfEnd[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context)(end: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).lastIndexOf(c.weakTypeOf[E], end)
def take[L <: HList: c.WeakTypeTag](c: Context)(i: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).take(i).toExpr
def takeRight[L <: HList: c.WeakTypeTag](c: Context)(i: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).takeRight(i).toExpr
def drop[L <: HList: c.WeakTypeTag](c: Context)(i: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).drop(i).toExpr
def dropRight[L <: HList: c.WeakTypeTag](c: Context)(i: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).dropRight(i).toExpr
def takeWhile[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).takeWhile(c.weakTypeOf[E]).toExpr
def dropWhile[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).dropWhile(c.weakTypeOf[E]).toExpr
def span[L <: HList: c.WeakTypeTag, E: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).span(c.weakTypeOf[E]).toExpr
def splitAt[L <: HList: c.WeakTypeTag](c: Context)(i: c.Expr[Int]) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).splitAt(i).toExpr
def unzip[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).unzip.toExpr
def zip[L <: HList: c.WeakTypeTag, L2 <: HList: c.WeakTypeTag](c: Context)(l2: c.Expr[L2]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).zip(hl.ListExpr(l2))).toExpr
}
def zipAll[L <: HList: c.WeakTypeTag, L2 <: HList: c.WeakTypeTag,
E1: c.WeakTypeTag, E2: c.WeakTypeTag](c: Context)(
l2: c.Expr[L2], e1: c.Expr[E1], e2: c.Expr[E2]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).zipAll(hl.ListExpr(l2), hl.AbsExpr(e1), hl.AbsExpr(e2))).toExpr
}
def zipWithIndex[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).zipWithIndex.toExpr
def toList[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).toList.toExpr
def toArray[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).toArray.toExpr
def toTuple[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).toTuple.toExpr
def toClass[L <: HList: c.WeakTypeTag](c: Context) =
hListContext(c).ListExpr(c.Expr[L](c.prefix.tree)).toClass.toExpr
def startsWith[L <: HList: c.WeakTypeTag, L2 <: HList: c.WeakTypeTag](c: Context)(l2: c.Expr[L2]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).startsWith(hl.ListExpr(l2)))
}
def endsWith[L <: HList: c.WeakTypeTag, L2 <: HList: c.WeakTypeTag](c: Context)(l2: c.Expr[L2]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).endsWith(hl.ListExpr(l2)))
}
def count[L <: HList: c.WeakTypeTag, HF: c.WeakTypeTag](c: Context)(hf: c.Expr[HF]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).count(hl.AbsExpr(hf)))
}
def reduceLeft[L <: HList: c.WeakTypeTag, F: c.WeakTypeTag](c: Context)(f: c.Expr[F]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).reduceLeft(hl.AbsExpr(f))).toExpr
}
def reduceRight[L <: HList: c.WeakTypeTag, F: c.WeakTypeTag](c: Context)(f: c.Expr[F]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).reduceRight(hl.AbsExpr(f))).toExpr
}
def foldLeft[L <: HList: c.WeakTypeTag, T: c.WeakTypeTag, F: c.WeakTypeTag](c: Context)(
t: c.Expr[T])(f: c.Expr[F]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).foldLeft(hl.AbsExpr(t))(hl.AbsExpr(f))).toExpr
}
def foldRight[L <: HList: c.WeakTypeTag, T: c.WeakTypeTag, F: c.WeakTypeTag](c: Context)(
t: c.Expr[T])(f: c.Expr[F]) = {
val hl = hListContext(c)
(hl.ListExpr(c.Expr[L](c.prefix.tree)).foldRight(hl.AbsExpr(t))(hl.AbsExpr(f))).toExpr
}
/** Converts a tuple of any arity to an HList.
* TODO: once SI-5923 is fixed, an implicit conversion function can be defined on Tuples ;))
* Update: I've found a workaround by using existential type
*/
def fromTuple[T](tup: T) = macro fromTupleImpl[T]
def fromTupleImpl[T: c.WeakTypeTag](c: Context)(tup: c.Expr[T]) = {
val hl = hListContext(c)
hl.fromTuple(hl.AbsExpr(tup)).toExpr
}
def fromClass[C, U](clazz: C, unapply: U) = macro fromClassImpl[C, U]
def fromClassImpl[C: c.WeakTypeTag, U: c.WeakTypeTag](c: Context)(clazz: c.Expr[C],
unapply: c.Expr[U]) = {
val hl = hListContext(c)
hl.fromClass(hl.AbsExpr(clazz), hl.AbsExpr(unapply)).toExpr
}
def fromClass[C](clazz: C) = macro fromClassDirectImpl[C]
def fromClassDirectImpl[C: c.WeakTypeTag](c: Context)(clazz: c.Expr[C]) = {
val hl = hListContext(c)
hl.fromClass(hl.AbsExpr(clazz)).get.toExpr
}
def fromTraversable[T <: Traversable[_]](list: T) = macro fromTraversableImpl[T]
def fromTraversableImpl[T <: Traversable[_]: c.WeakTypeTag](c: Context)(list: c.Expr[T]) = {
val hl = hListContext(c)
hl.fromTraversable(hl.AbsExpr(list)).toExpr
}
}
//}
| leonardschneider/macroHList | src/main/scala/HList.scala | Scala | apache-2.0 | 41,129 |
package akka.persistence.pg.util
import java.sql.Savepoint
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import akka.util.Timeout
import com.typesafe.config.Config
import org.scalatest._
import org.scalatest.funsuite.FixtureAnyFunSuiteLike
import slick.jdbc.JdbcBackend
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.Try
/**
* Base class for testing a persistent actor
* db sessions are rolled back after each test, maintaining a clean db state
* This also means the actorsystem needs to be recreated for each test
*/
trait PersistentActorTest extends FixtureAnyFunSuiteLike with BeforeAndAfterEach {
def config: Config
implicit val defaultTimeout = Timeout(10, TimeUnit.SECONDS)
implicit var system: ActorSystem = _
var testProbe: TestProbe = _
override protected def beforeEach(): Unit = {
system = ActorSystem("PersistentActorTest", config)
testProbe = TestProbe()
}
type FixtureParam = JdbcBackend.DatabaseDef
override protected def withFixture(test: OneArgTest): Outcome = {
val possibleOutcome = Try {
PgPluginTestUtil.withTransactionRollback { db =>
withFixture(test.toNoArgTest(db))
}
}
//akka shutdown must be done in this way instead of using afterEach
system.terminate()
Await.result(system.whenTerminated, Duration.Inf)
possibleOutcome.get
}
def savepoint()(implicit db: JdbcBackend.DatabaseDef): Savepoint = db.createSession().conn.setSavepoint()
def rollback(savepoint: Savepoint)(implicit db: JdbcBackend.DatabaseDef) = db.createSession().conn.rollback(savepoint)
}
| WegenenVerkeer/akka-persistence-postgresql | modules/akka-persistence-pg/src/test/scala/akka/persistence/pg/util/PersistentActorTest.scala | Scala | mit | 1,685 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit
import java.io.{File, StringWriter}
import java.nio.charset.MalformedInputException
import java.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.io.{Codec, Source}
import io.fabric8.kubernetes.api.model.{ConfigMap, ConfigMapBuilder, KeyToPath}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.{Config, Constants, KubernetesUtils}
import org.apache.spark.deploy.k8s.Constants.ENV_SPARK_CONF_DIR
import org.apache.spark.internal.Logging
private[spark] object KubernetesClientUtils extends Logging {
// Config map name can be 63 chars at max.
def configMapName(prefix: String): String = s"${prefix.take(54)}-conf-map"
val configMapNameExecutor: String = configMapName(s"spark-exec-${KubernetesUtils.uniqueID()}")
val configMapNameDriver: String = configMapName(s"spark-drv-${KubernetesUtils.uniqueID()}")
private def buildStringFromPropertiesMap(configMapName: String,
propertiesMap: Map[String, String]): String = {
val properties = new Properties()
propertiesMap.foreach { case (k, v) =>
properties.setProperty(k, v)
}
val propertiesWriter = new StringWriter()
properties.store(propertiesWriter,
s"Java properties built from Kubernetes config map with name: $configMapName")
propertiesWriter.toString
}
/**
* Build, file -> 'file's content' map of all the selected files in SPARK_CONF_DIR.
*/
def buildSparkConfDirFilesMap(
configMapName: String,
sparkConf: SparkConf,
resolvedPropertiesMap: Map[String, String]): Map[String, String] = synchronized {
val loadedConfFilesMap = KubernetesClientUtils.loadSparkConfDirFiles(sparkConf)
// Add resolved spark conf to the loaded configuration files map.
if (resolvedPropertiesMap.nonEmpty) {
val resolvedProperties: String = KubernetesClientUtils
.buildStringFromPropertiesMap(configMapName, resolvedPropertiesMap)
loadedConfFilesMap ++ Map(Constants.SPARK_CONF_FILE_NAME -> resolvedProperties)
} else {
loadedConfFilesMap
}
}
def buildKeyToPathObjects(confFilesMap: Map[String, String]): Seq[KeyToPath] = {
confFilesMap.map {
case (fileName: String, _: String) =>
val filePermissionMode = 420 // 420 is decimal for octal literal 0644.
new KeyToPath(fileName, filePermissionMode, fileName)
}.toList.sortBy(x => x.getKey) // List is sorted to make mocking based tests work
}
/**
* Build a Config Map that will hold the content for environment variable SPARK_CONF_DIR
* on remote pods.
*/
def buildConfigMap(configMapName: String, confFileMap: Map[String, String],
withLabels: Map[String, String] = Map()): ConfigMap = {
new ConfigMapBuilder()
.withNewMetadata()
.withName(configMapName)
.withLabels(withLabels.asJava)
.endMetadata()
.withImmutable(true)
.addToData(confFileMap.asJava)
.build()
}
private def orderFilesBySize(confFiles: Seq[File]): Seq[File] = {
val fileToFileSizePairs = confFiles.map(f => (f, f.getName.length + f.length()))
// sort first by name and then by length, so that during tests we have consistent results.
fileToFileSizePairs.sortBy(f => f._1).sortBy(f => f._2).map(_._1)
}
// exposed for testing
private[submit] def loadSparkConfDirFiles(conf: SparkConf): Map[String, String] = {
val confDir = Option(conf.getenv(ENV_SPARK_CONF_DIR)).orElse(
conf.getOption("spark.home").map(dir => s"$dir/conf"))
val maxSize = conf.get(Config.CONFIG_MAP_MAXSIZE)
if (confDir.isDefined) {
val confFiles: Seq[File] = listConfFiles(confDir.get, maxSize)
val orderedConfFiles = orderFilesBySize(confFiles)
var truncatedMapSize: Long = 0
val truncatedMap = mutable.HashMap[String, String]()
val skippedFiles = mutable.HashSet[String]()
var source: Source = Source.fromString("") // init with empty source.
for (file <- orderedConfFiles) {
try {
source = Source.fromFile(file)(Codec.UTF8)
val (fileName, fileContent) = file.getName -> source.mkString
if ((truncatedMapSize + fileName.length + fileContent.length) < maxSize) {
truncatedMap.put(fileName, fileContent)
truncatedMapSize = truncatedMapSize + (fileName.length + fileContent.length)
} else {
skippedFiles.add(fileName)
}
} catch {
case e: MalformedInputException =>
logWarning(
s"Unable to read a non UTF-8 encoded file ${file.getAbsolutePath}. Skipping...", e)
None
} finally {
source.close()
}
}
if (truncatedMap.nonEmpty) {
logInfo(s"Spark configuration files loaded from $confDir :" +
s" ${truncatedMap.keys.mkString(",")}")
}
if (skippedFiles.nonEmpty) {
logWarning(s"Skipped conf file(s) ${skippedFiles.mkString(",")}, due to size constraint." +
s" Please see, config: `${Config.CONFIG_MAP_MAXSIZE.key}` for more details.")
}
truncatedMap.toMap
} else {
Map.empty[String, String]
}
}
private def listConfFiles(confDir: String, maxSize: Long): Seq[File] = {
// At the moment configmaps do not support storing binary content (i.e. skip jar,tar,gzip,zip),
// and configMaps do not allow for size greater than 1.5 MiB(configurable).
// https://etcd.io/docs/v3.4.0/dev-guide/limit/
def testIfTooLargeOrBinary(f: File): Boolean = (f.length() + f.getName.length > maxSize) ||
f.getName.matches(".*\\\\.(gz|zip|jar|tar)")
// We exclude all the template files and user provided spark conf or properties,
// Spark properties are resolved in a different step.
def testIfSparkConfOrTemplates(f: File) = f.getName.matches(".*\\\\.template") ||
f.getName.matches("spark.*(conf|properties)")
val fileFilter = (f: File) => {
f.isFile && !testIfTooLargeOrBinary(f) && !testIfSparkConfOrTemplates(f)
}
val confFiles: Seq[File] = {
val dir = new File(confDir)
if (dir.isDirectory) {
dir.listFiles.filter(x => fileFilter(x)).toSeq
} else {
Nil
}
}
confFiles
}
}
| wangmiao1981/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientUtils.scala | Scala | apache-2.0 | 7,079 |
package quizleague.domain
import java.util.Date
import java.time.LocalDate
import java.time.LocalTime
import java.time.Duration
import io.circe.generic._
import quizleague.util.json.codecs.ScalaTimeCodecs._
import quizleague.util.json.codecs.DomainCodecs._
@JsonCodec
sealed trait Competition extends Entity
{
val name:String
val text:Ref[Text]
val icon:Option[String]
override val retired = false
}
case class LeagueCompetition(
id:String,
name:String,
startTime:LocalTime,
duration:Duration,
text:Ref[Text],
textName:String = "league-comp",
icon:Option[String] = None
) extends Competition with MainLeagueCompetition
case class CupCompetition(
id:String,
name:String,
startTime:LocalTime,
duration:Duration,
text:Ref[Text],
textName:String,
icon:Option[String] = None
) extends Competition with KnockoutCompetition
case class SubsidiaryLeagueCompetition(
id:String,
name:String,
text:Ref[Text],
textName:String = "beer-comp",
icon:Option[String] = None
) extends Competition with SubsidiaryCompetition with CompetitionTables with FixturesCompetition
case class SingletonCompetition(
id:String,
name:String,
event:Option[Event],
textName:String,
text:Ref[Text],
icon:Option[String] = None
) extends Competition with BaseSingletonCompetition
object Competition
trait BaseSingletonCompetition{
val event:Option[Event]
val textName:String
}
trait ScheduledCompetition{
val startTime:LocalTime
val duration:Duration
}
trait FixturesCompetition{
}
trait TeamCompetition extends FixturesCompetition{
val textName:String
}
trait CompetitionTables{
}
trait BaseLeagueCompetition extends TeamCompetition with ScheduledCompetition with CompetitionTables{
val win = 2
val draw = 1
val loss = 0
}
trait MainLeagueCompetition extends BaseLeagueCompetition{
}
trait KnockoutCompetition extends TeamCompetition with ScheduledCompetition
trait SubsidiaryCompetition{
val textName:String
}
| gumdrop/quizleague-maintain | shared/src/main/scala/quizleague/domain/Competition.scala | Scala | mit | 2,017 |
/*
* Copyright (c) 2013, Martin Zuber
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* - Neither the name of the TU Berlin nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import org.scalatest.FunSpec
import Fresh._
import ObjectLanguage._
/**
* Test spec for swapping atoms.
*/
trait SwappingSpec extends FunSpec {
val test = it
/**
* Description for the used `swap' implementation.
*/
def testedImplementation: String
/* Test fixtures: Some fresh names */
val x: Name[Ide] = fresh()
val y: Name[Ide] = fresh()
val z: Name[Ide] = fresh()
describe("Testing " + testedImplementation) {
test("swapping x and y in <<x>>y should result in <<y>>x") {
assert(swap(x, y, <<(x)>> y) === <<(y)>> x)
}
test("swapping x and z in <<x>>y should result in <<z>>y") {
assert(swap(x, z, <<(x)>> y) === <<(z)>> y)
}
test("swapping x and y in <<z>>z should result in <<z>>z") {
assert(swap(x, y, <<(z)>> z) === <<(z)>> z)
}
test("swapping x and y in <<x>>(<<y>>x) should result in <<y>>(<<x>>y)") {
assert(swap(x, y, <<(x)>>(<<(y)>> x)) === <<(y)>>(<<(x)>> y))
}
test("swapping y and z in Fun(<<(y)>> Var(x)) should result in Fun(<<(z)>> Var(x))") {
assert(swap(y, z, Fun(<<(y)>> Var(x))) === Fun(<<(z)>> Var(x)))
}
test("swapping [x,y] and [z,z] in App(Var(x), Var(y)) should result in App(Var(z), Var(z))") {
assert(Fresh.swap(List(x,y), List(z,z), App(Var(x), Var(y))) === App(Var(y), Var(z)))
}
}
}
| mzuber/fresh-scala | core/src/test/scala/SwappingSpec.scala | Scala | bsd-3-clause | 2,919 |
package models.service.oauth
import models.util.Constants
import play.api.Play.current
import play.api.libs.json.{JsValue, Json}
import play.api.libs.ws.{WS, WSRequest, WSResponse}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
trait PlayListRetrieval {
protected def playlistRequest(accessToken:String):Future[WSResponse]
protected def trackListLinks(js:JsValue):List[String]
protected def getNextPage(trackJs: JsValue):(Boolean,String)
protected def authenticateTrackRetrievalRequest(wsRequest: WSRequest, accessToken:String):WSRequest
private def retrievePlaylists(accessToken: String):Future[List[List[JsValue]]] = {
playlistRequest(accessToken) flatMap { resp =>
val js = Json.parse(resp.body)
val links = trackListLinks(js)
Future.sequence {
links.map { trackListLink =>
retrieveTracks(accessToken, trackListLink, Nil)
}
}
}
}
protected def extractTracksFromJs(trackJs: JsValue) = trackJs
private def retrieveTracks(accessToken: String, link:String, responses: List[JsValue]): Future[List[JsValue]] = {
authenticateTrackRetrievalRequest(WS.url(link), accessToken).get() flatMap { trackResponse =>
val trackJs = Json.parse(trackResponse.body)
val (tobeContinued, next) = getNextPage(trackJs)
val js = extractTracksFromJs(trackJs)
if(tobeContinued) retrieveTracks(accessToken, next, js :: responses)
else Future.successful(js :: responses)
}
}
def requestPlaylists(token:Option[String]):Future[Seq[JsValue]] = {
token match {
case Some(accessToken) =>
retrievePlaylists(accessToken).map(x => x.flatten)
case None => throw new Exception (Constants.accessTokenRetrievalError)
}
}
}
| haffla/stream-compare | app/models/service/oauth/PlayListRetrieval.scala | Scala | gpl-3.0 | 1,780 |
package controllers
import play.api._
import play.api.mvc._
import play.api.data._
import play.api.data.Forms._
import models._
object Application extends Controller {
def index = Action {
Redirect(routes.Quotes.list())
}
} | dtinblack/Play-TestDB | app/controllers/Application.scala | Scala | mit | 243 |
package org.jetbrains.sbt.settings
import com.intellij.openapi.externalSystem.service.settings.AbstractExternalSystemConfigurable
import com.intellij.openapi.project.Project
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.settings.Context.Configuration
import org.jetbrains.sbt.project.settings._
/**
* User: Dmitry Naydanov
* Date: 11/25/13
*/
class SbtExternalSystemConfigurable(project: Project)
extends AbstractExternalSystemConfigurable[SbtProjectSettings, SbtProjectSettingsListener, SbtSystemSettings](project, SbtProjectSystem.Id) {
override def createProjectSettingsControl(settings: SbtProjectSettings): SbtProjectSettingsControl = new SbtProjectSettingsControl(Configuration, settings)
override def createSystemSettingsControl(settings: SbtSystemSettings): SbtSystemSettingsControl = new SbtSystemSettingsControl(settings)
override def newProjectSettings(): SbtProjectSettings = new SbtProjectSettings()
override def getId: String = "sbt.project.settings.configurable"
}
| triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/settings/SbtExternalSystemConfigurable.scala | Scala | apache-2.0 | 1,037 |
package org.genericConfig.admin.models.component
import org.genericConfig.admin.controllers.websocket.WebClient
import org.genericConfig.admin.models.logic.RidToHash
import org.genericConfig.admin.models.persistence.orientdb.PropertyKeys
import org.genericConfig.admin.models.{CommonFunction, common}
import org.genericConfig.admin.shared.Actions
import org.genericConfig.admin.shared.component.{ComponentConfigPropertiesDTO, ComponentDTO, ComponentParamsDTO}
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAfterAll
import play.api.Logger
import play.api.libs.json.{JsResult, JsValue, Json}
/**
* Copyright (C) 2016 Gennadi Heimann [email protected]
*
* Created by Gennadi Heimann 12.06.2020
*/
class ConnectComponentToStep extends Specification
with BeforeAfterAll
with CommonFunction {
val wC: WebClient = WebClient.init
var rConnectComponentToStep: JsResult[ComponentDTO] = _
var stepId_S1 : Option[String] = _
var stepId_S2 : Option[String] = _
var componentId_C1 : Option[String] = _
var componentId_C2: Option[String] = _
def beforeAll(): Unit = {
before()
}
def afterAll(): Unit = {
val errorComponent_C1: Option[common.Error] = deleteVertex(RidToHash.getRId(componentId_C1.get).get, PropertyKeys.VERTEX_COMPONENT)
require(errorComponent_C1 == None, "Beim Loeschen des Components ein Fehler aufgetretten")
val errorComponent_C2: Option[common.Error] = deleteVertex(RidToHash.getRId(componentId_C2.get).get, PropertyKeys.VERTEX_COMPONENT)
require(errorComponent_C2 == None, "Beim Loeschen des Components ein Fehler aufgetretten")
val errorStep_S1: Option[common.Error] = deleteVertex(RidToHash.getRId(stepId_S1.get).get, PropertyKeys.VERTEX_STEP)
require(errorStep_S1 == None, "Beim Loeschen des Components ein Fehler aufgetretten" + errorStep_S1.get.name)
val errorStep_S2: Option[common.Error] = deleteVertex(RidToHash.getRId(stepId_S2.get).get, PropertyKeys.VERTEX_STEP)
require(errorStep_S2 == None, "Beim Loeschen des Components ein Fehler aufgetretten")
}
"Der Benutzer veraendert die Komponente" >> {
"Es wird nur der Name geaendert" >> {
"action = ADD_COMPONENT" >> {
rConnectComponentToStep.get.action === Actions.CONNECT_COMPONENT_TO_STEP
}
"componentId < 32 && > 10" >> {
rConnectComponentToStep.get.result.get.configProperties.get.componentId.get.length must (be_<=(32) and be_>(10))
}
"stepId = None" >> {
rConnectComponentToStep.get.result.get.configProperties.get.stepId.get.length must (be_<=(32) and be_>(10))
}
"nameToShow = ComponentUpdated" >> {
rConnectComponentToStep.get.result.get.userProperties.get.nameToShow === None
}
"errors = None" >> {
rConnectComponentToStep.get.result.get.errors === None
}
}
}
def before(): Unit = {
val wC: WebClient = WebClient.init
val username = "connectComponentToStep"
val userId = createUser(username, wC)
val configId = createConfig(userId, s"http://contig/$username")
stepId_S1 = addStep(nameToShow = Some(s"S1_$username"), outId = Some(configId), min = 1, max = 1, wC = wC)
componentId_C1 = createComponent(wC, stepId_S1, Some(s"C1_$username"))
componentId_C2 = createComponent(wC, stepId_S1, Some(s"C2_$username"))
stepId_S2 = addStep(nameToShow = Some(s"S2_$username"), outId = componentId_C1, min = 1, max = 1, wC = wC)
val connectComponent: JsValue = Json.toJson(ComponentDTO(
action = Actions.CONNECT_COMPONENT_TO_STEP,
params = Some(ComponentParamsDTO(
configProperties = Some(ComponentConfigPropertiesDTO(
stepId = stepId_S2,
componentId = componentId_C2
))
))
))
Logger.info("CONNECT_COMPONENT_TO_STEP -> " + connectComponent)
val resultJsValue: JsValue = wC.handleMessage(connectComponent)
Logger.info("CONNECT_COMPONENT_TO_STEP <- " + resultJsValue)
rConnectComponentToStep = Json.fromJson[ComponentDTO](resultJsValue)
}
} | gennadij/admin | server/test/org/genericConfig/admin/models/component/ConnectComponentToStep.scala | Scala | apache-2.0 | 4,042 |
package sandbox.lift.hellodarwin;
import _root_.junit.framework._;
import Assert._;
object AppTest {
def suite: Test = {
val suite = new TestSuite(classOf[AppTest]);
suite
}
def main(args : Array[String]) {
_root_.junit.textui.TestRunner.run(suite);
}
}
/**
* Unit test for simple App.
*/
class AppTest extends TestCase("app") {
/**
* Rigourous Tests :-)
*/
def testOK() = assertTrue(true);
//def testKO() = assertTrue(false);
}
| andreum/liftweb | sites/hellodarwin/src/test/scala/sandbox/lift/hellodarwin/AppTest.scala | Scala | apache-2.0 | 500 |
package com.faacets.qalg
package algebra
import scala.{specialized => sp}
import scala.annotation.tailrec
import spire.algebra._
import spire.syntax.cfor._
import spire.syntax.field._
import indup.algebra._
trait VecField[V, @sp(Double, Long) A] extends Any with VecRing[V, A] with InnerProductSpace[V, A] { self =>
override implicit def scalar: Field[A]
// override def dot(x: V, y: V): A
// override def divr(v: V, a: A): V
}
object VecField {
def apply[V, @sp(Double, Long) A](implicit V: VecField[V, A]): VecField[V, A] = V
// implicit def fromPack[V, @sp(Double, Long) A](implicit ev: PackVR[V, A]): VecRing[V, A] = ev.V
}
| denisrosset/qalg | core/src/main/scala/qalg/algebra/VecField.scala | Scala | mit | 645 |
package com.sksamuel.scrimage.filter
import org.scalatest.{ OneInstancePerTest, BeforeAndAfter, FunSuite }
import com.sksamuel.scrimage.Image
/** @author Stephen Samuel */
class BrightnessFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val original = Image(getClass.getResourceAsStream("/bird_small.png"))
test("filter output matches expected") {
val expected = Image(getClass.getResourceAsStream("/com/sksamuel/scrimage/filters/bird_small_brighten.png"))
assert(original.filter(BrightnessFilter(1.4)) === expected)
}
}
| carlosFattor/scrimage | scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/BrightnessFilterTest.scala | Scala | apache-2.0 | 564 |
package org.jetbrains.plugins.scala
package annotator
import com.intellij.lang.annotation.AnnotationHolder
import org.jetbrains.plugins.scala.annotator.AnnotatorUtils._
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
/**
* Pavel.Fatin, 18.05.2010
*/
trait PatternDefinitionAnnotator {
def annotatePatternDefinition(definition: ScPatternDefinition, holder: AnnotationHolder, highlightErrors: Boolean) {
if (highlightErrors && definition.pList.allPatternsSimple) {
for (expr <- definition.expr; element <- definition.children.findByType(classOf[ScSimpleTypeElement]))
checkConformance(expr, element, holder)
}
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/annotator/PatternDefinitionAnnotator.scala | Scala | apache-2.0 | 800 |
package ml.combust.mleap.executor.service
import akka.pattern.pipe
import akka.actor.{Actor, Props, ReceiveTimeout, Status}
import akka.stream.{DelayOverflowStrategy, Materializer, OverflowStrategy, QueueOfferResult}
import akka.stream.scaladsl.{Flow, Keep, Sink, Source, SourceQueueWithComplete}
import ml.combust.mleap.executor._
import ml.combust.mleap.executor.error.ExecutorException
import ml.combust.mleap.runtime.frame.{DefaultLeapFrame, Transformer}
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
import scala.util.Try
object FrameStreamActor {
def props(transformer: Transformer,
request: CreateFrameStreamRequest,
config: ExecutorStreamConfig)
(implicit materializer: Materializer): Props = {
Props(new FrameStreamActor(transformer, request, config))
}
object Messages {
case object Initialize
case class TransformFrame(request: StreamTransformFrameRequest, promise: Promise[Try[DefaultLeapFrame]])
case object StreamClosed
}
}
class FrameStreamActor(transformer: Transformer,
request: CreateFrameStreamRequest,
config: ExecutorStreamConfig)
(implicit materializer: Materializer) extends Actor {
import FrameStreamActor.Messages
import context.dispatcher
context.setReceiveTimeout(1.minute)
val frameStream: FrameStream = FrameStream(request.modelName,
request.streamName,
request.streamConfig.getOrElse(StreamConfig()))
private var queue: Option[SourceQueueWithComplete[Messages.TransformFrame]] = None
private var queueF: Option[Future[SourceQueueWithComplete[Messages.TransformFrame]]] = None
override def postStop(): Unit = {
for (q <- queue) { q.complete() }
}
override def receive: Receive = {
case r: Messages.TransformFrame => transformFrame(r)
case Messages.Initialize => initialize()
case Messages.StreamClosed => context.stop(self)
case r: GetFrameStreamRequest => getFrameStream(r)
case r: CreateFrameFlowRequest => createFrameFlow(r)
case ReceiveTimeout => receiveTimeout()
case Status.Failure(err) => throw err
}
def initialize(): Unit = {
if (queue.isEmpty) {
queue = Some {
var source = Source.queue[Messages.TransformFrame](
frameStream.streamConfig.bufferSize.getOrElse(config.defaultBufferSize),
OverflowStrategy.backpressure)
source = frameStream.streamConfig.idleTimeout.orElse(config.defaultIdleTimeout).map {
timeout => source.idleTimeout(timeout)
}.getOrElse(source)
source = frameStream.streamConfig.throttle.orElse(config.defaultThrottle).map {
throttle => source.throttle(throttle.elements, throttle.duration, throttle.maxBurst, throttle.mode)
}.getOrElse(source)
source = frameStream.streamConfig.transformDelay.orElse(config.defaultTransformDelay).map {
delay =>
source.delay(delay, DelayOverflowStrategy.backpressure)
}.getOrElse(source)
val transform = Flow[Messages.TransformFrame].
mapAsyncUnordered(frameStream.streamConfig.parallelism.getOrElse(config.defaultParallelism).get) {
case Messages.TransformFrame(req, promise) =>
ExecuteTransform(transformer, req.frame, req.options).map {
frame => (frame, promise)
}
}.to(Sink.foreach {
case (frame, promise) => promise.success(frame)
})
source.toMat(transform)(Keep.left).run()
}
queue.get.watchCompletion().
map(_ => Messages.StreamClosed).
pipeTo(self)
queueF = Some(Future(queue.get))
context.setReceiveTimeout(Duration.Inf)
}
sender ! frameStream
}
def transformFrame(frame: Messages.TransformFrame): Unit = {
queueF = Some(queueF.get.flatMap {
q =>
q.offer(frame).map {
case QueueOfferResult.Enqueued => q
case QueueOfferResult.Failure(err) =>
frame.promise.failure(err)
q
case QueueOfferResult.Dropped =>
frame.promise.failure(new ExecutorException("item dropped"))
q
case QueueOfferResult.QueueClosed =>
frame.promise.failure(new ExecutorException("queue closed"))
q
}
})
}
def getFrameStream(request: GetFrameStreamRequest): Unit = {
sender ! frameStream
}
def createFrameFlow(request: CreateFrameFlowRequest): Unit = {
queue match {
case Some(q) => sender ! (self, q.watchCompletion())
case None => sender ! Status.Failure(new ExecutorException(s"frame stream not initialized ${frameStream.modelName}/frame/${frameStream.streamName}"))
}
}
def receiveTimeout(): Unit = {
if (queue.isEmpty) { context.stop(self) }
}
}
| combust/mleap | mleap-executor/src/main/scala/ml/combust/mleap/executor/service/FrameStreamActor.scala | Scala | apache-2.0 | 4,841 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import kafka.api.LeaderAndIsr
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult
import kafka.zk.{KafkaZkClient, TopicPartitionStateZNode}
import kafka.zookeeper.GetDataResponse
import org.apache.kafka.common.TopicPartition
import org.apache.zookeeper.KeeperException.Code
import org.apache.zookeeper.data.Stat
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.{Before, Test}
import org.scalatest.junit.JUnitSuite
import scala.collection.mutable
class ReplicaStateMachineTest extends JUnitSuite {
private var controllerContext: ControllerContext = null
private var mockZkClient: KafkaZkClient = null
private var mockControllerBrokerRequestBatch: ControllerBrokerRequestBatch = null
private var mockTopicDeletionManager: TopicDeletionManager = null
private var replicaState: mutable.Map[PartitionAndReplica, ReplicaState] = null
private var replicaStateMachine: ReplicaStateMachine = null
private val brokerId = 5
private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(brokerId, "zkConnect"))
private val controllerEpoch = 50
private val partition = new TopicPartition("t", 0)
private val partitions = Seq(partition)
private val replica = PartitionAndReplica(partition, brokerId)
private val replicas = Seq(replica)
@Before
def setUp(): Unit = {
controllerContext = new ControllerContext
controllerContext.epoch = controllerEpoch
mockZkClient = EasyMock.createMock(classOf[KafkaZkClient])
mockControllerBrokerRequestBatch = EasyMock.createMock(classOf[ControllerBrokerRequestBatch])
mockTopicDeletionManager = EasyMock.createMock(classOf[TopicDeletionManager])
replicaState = mutable.Map.empty[PartitionAndReplica, ReplicaState]
replicaStateMachine = new ReplicaStateMachine(config, new StateChangeLogger(brokerId, true, None), controllerContext, mockTopicDeletionManager, mockZkClient,
replicaState, mockControllerBrokerRequestBatch)
}
@Test
def testNonexistentReplicaToNewReplicaTransition(): Unit = {
replicaStateMachine.handleStateChanges(replicas, NewReplica)
assertEquals(NewReplica, replicaState(replica))
}
@Test
def testInvalidNonexistentReplicaToOnlineReplicaTransition(): Unit = {
replicaStateMachine.handleStateChanges(replicas, OnlineReplica)
assertEquals(NonExistentReplica, replicaState(replica))
}
@Test
def testInvalidNonexistentReplicaToOfflineReplicaTransition(): Unit = {
replicaStateMachine.handleStateChanges(replicas, OfflineReplica)
assertEquals(NonExistentReplica, replicaState(replica))
}
@Test
def testInvalidNonexistentReplicaToReplicaDeletionStartedTransition(): Unit = {
replicaStateMachine.handleStateChanges(replicas, ReplicaDeletionStarted)
assertEquals(NonExistentReplica, replicaState(replica))
}
@Test
def testInvalidNonexistentReplicaToReplicaDeletionIneligibleTransition(): Unit = {
replicaStateMachine.handleStateChanges(replicas, ReplicaDeletionIneligible)
assertEquals(NonExistentReplica, replicaState(replica))
}
@Test
def testInvalidNonexistentReplicaToReplicaDeletionSuccessfulTransition(): Unit = {
replicaStateMachine.handleStateChanges(replicas, ReplicaDeletionSuccessful)
assertEquals(NonExistentReplica, replicaState(replica))
}
@Test
def testInvalidNewReplicaToNonexistentReplicaTransition(): Unit = {
testInvalidTransition(NewReplica, NonExistentReplica)
}
@Test
def testNewReplicaToOnlineReplicaTransition(): Unit = {
replicaState.put(replica, NewReplica)
controllerContext.partitionReplicaAssignment.put(partition, Seq(brokerId))
replicaStateMachine.handleStateChanges(replicas, OnlineReplica)
assertEquals(OnlineReplica, replicaState(replica))
}
@Test
def testNewReplicaToOfflineReplicaTransition(): Unit = {
replicaState.put(replica, NewReplica)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockControllerBrokerRequestBatch.addStopReplicaRequestForBrokers(EasyMock.eq(Seq(brokerId)),
EasyMock.eq(partition), EasyMock.eq(false), EasyMock.anyObject()))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockControllerBrokerRequestBatch)
replicaStateMachine.handleStateChanges(replicas, OfflineReplica)
EasyMock.verify(mockControllerBrokerRequestBatch)
assertEquals(NewReplica, replicaState(replica))
}
@Test
def testInvalidNewReplicaToReplicaDeletionStartedTransition(): Unit = {
testInvalidTransition(NewReplica, ReplicaDeletionStarted)
}
@Test
def testInvalidNewReplicaToReplicaDeletionIneligibleTransition(): Unit = {
testInvalidTransition(NewReplica, ReplicaDeletionIneligible)
}
@Test
def testInvalidNewReplicaToReplicaDeletionSuccessfulTransition(): Unit = {
testInvalidTransition(NewReplica, ReplicaDeletionSuccessful)
}
@Test
def testInvalidOnlineReplicaToNonexistentReplicaTransition(): Unit = {
testInvalidTransition(OnlineReplica, NonExistentReplica)
}
@Test
def testInvalidOnlineReplicaToNewReplicaTransition(): Unit = {
testInvalidTransition(OnlineReplica, NewReplica)
}
@Test
def testOnlineReplicaToOnlineReplicaTransition(): Unit = {
replicaState.put(replica, OnlineReplica)
controllerContext.partitionReplicaAssignment.put(partition, Seq(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(brokerId, List(brokerId)), controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId),
partition, leaderIsrAndControllerEpoch, Seq(brokerId), isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
replicaStateMachine.handleStateChanges(replicas, OnlineReplica)
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlineReplica, replicaState(replica))
}
@Test
def testOnlineReplicaToOfflineReplicaTransition(): Unit = {
val otherBrokerId = brokerId + 1
val replicaIds = List(brokerId, otherBrokerId)
replicaState.put(replica, OnlineReplica)
controllerContext.partitionReplicaAssignment.put(partition, replicaIds)
val leaderAndIsr = LeaderAndIsr(brokerId, replicaIds)
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
val stat = new Stat(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockControllerBrokerRequestBatch.addStopReplicaRequestForBrokers(EasyMock.eq(Seq(brokerId)),
EasyMock.eq(partition), EasyMock.eq(false), EasyMock.anyObject()))
val adjustedLeaderAndIsr = leaderAndIsr.newLeaderAndIsr(LeaderAndIsr.NoLeader, List(otherBrokerId))
val updatedLeaderAndIsr = adjustedLeaderAndIsr.withZkVersion(adjustedLeaderAndIsr .zkVersion + 1)
val updatedLeaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(updatedLeaderAndIsr, controllerEpoch)
EasyMock.expect(mockZkClient.getTopicPartitionStatesRaw(partitions)).andReturn(
Seq(GetDataResponse(Code.OK, null, Some(partition),
TopicPartitionStateZNode.encode(leaderIsrAndControllerEpoch), stat)))
EasyMock.expect(mockZkClient.updateLeaderAndIsr(Map(partition -> adjustedLeaderAndIsr), controllerEpoch))
.andReturn(UpdateLeaderAndIsrResult(Map(partition -> updatedLeaderAndIsr), Seq.empty, Map.empty))
EasyMock.expect(mockTopicDeletionManager.isPartitionToBeDeleted(partition)).andReturn(false)
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(otherBrokerId),
partition, updatedLeaderIsrAndControllerEpoch, replicaIds, isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch, mockTopicDeletionManager)
replicaStateMachine.handleStateChanges(replicas, OfflineReplica)
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch, mockTopicDeletionManager)
assertEquals(updatedLeaderIsrAndControllerEpoch, controllerContext.partitionLeadershipInfo(partition))
assertEquals(OfflineReplica, replicaState(replica))
}
@Test
def testInvalidOnlineReplicaToReplicaDeletionStartedTransition(): Unit = {
testInvalidTransition(OnlineReplica, ReplicaDeletionStarted)
}
@Test
def testInvalidOnlineReplicaToReplicaDeletionIneligibleTransition(): Unit = {
testInvalidTransition(OnlineReplica, ReplicaDeletionIneligible)
}
@Test
def testInvalidOnlineReplicaToReplicaDeletionSuccessfulTransition(): Unit = {
testInvalidTransition(OnlineReplica, ReplicaDeletionSuccessful)
}
@Test
def testInvalidOfflineReplicaToNonexistentReplicaTransition(): Unit = {
testInvalidTransition(OfflineReplica, NonExistentReplica)
}
@Test
def testInvalidOfflineReplicaToNewReplicaTransition(): Unit = {
testInvalidTransition(OfflineReplica, NewReplica)
}
@Test
def testOfflineReplicaToOnlineReplicaTransition(): Unit = {
replicaState.put(replica, OfflineReplica)
controllerContext.partitionReplicaAssignment.put(partition, Seq(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(brokerId, List(brokerId)), controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId),
partition, leaderIsrAndControllerEpoch, Seq(brokerId), isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
replicaStateMachine.handleStateChanges(replicas, OnlineReplica)
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlineReplica, replicaState(replica))
}
@Test
def testOfflineReplicaToReplicaDeletionStartedTransition(): Unit = {
val callbacks = new Callbacks()
replicaState.put(replica, OfflineReplica)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockControllerBrokerRequestBatch.addStopReplicaRequestForBrokers(Seq(brokerId),
partition, true, callbacks.stopReplicaResponseCallback))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
replicaStateMachine.handleStateChanges(replicas, ReplicaDeletionStarted, callbacks)
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(ReplicaDeletionStarted, replicaState(replica))
}
@Test
def testInvalidOfflineReplicaToReplicaDeletionIneligibleTransition(): Unit = {
testInvalidTransition(OfflineReplica, ReplicaDeletionIneligible)
}
@Test
def testInvalidOfflineReplicaToReplicaDeletionSuccessfulTransition(): Unit = {
testInvalidTransition(OfflineReplica, ReplicaDeletionSuccessful)
}
@Test
def testInvalidReplicaDeletionStartedToNonexistentReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionStarted, NonExistentReplica)
}
@Test
def testInvalidReplicaDeletionStartedToNewReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionStarted, NewReplica)
}
@Test
def testInvalidReplicaDeletionStartedToOnlineReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionStarted, OnlineReplica)
}
@Test
def testInvalidReplicaDeletionStartedToOfflineReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionStarted, OfflineReplica)
}
@Test
def testReplicaDeletionStartedToReplicaDeletionIneligibleTransition(): Unit = {
replicaState.put(replica, ReplicaDeletionStarted)
replicaStateMachine.handleStateChanges(replicas, ReplicaDeletionIneligible)
assertEquals(ReplicaDeletionIneligible, replicaState(replica))
}
@Test
def testReplicaDeletionStartedToReplicaDeletionSuccessfulTransition(): Unit = {
replicaState.put(replica, ReplicaDeletionStarted)
replicaStateMachine.handleStateChanges(replicas, ReplicaDeletionSuccessful)
assertEquals(ReplicaDeletionSuccessful, replicaState(replica))
}
@Test
def testReplicaDeletionSuccessfulToNonexistentReplicaTransition(): Unit = {
replicaState.put(replica, ReplicaDeletionSuccessful)
controllerContext.partitionReplicaAssignment.put(partition, Seq(brokerId))
replicaStateMachine.handleStateChanges(replicas, NonExistentReplica)
assertEquals(Seq.empty, controllerContext.partitionReplicaAssignment(partition))
assertEquals(None, replicaState.get(replica))
}
@Test
def testInvalidReplicaDeletionSuccessfulToNewReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionSuccessful, NewReplica)
}
@Test
def testInvalidReplicaDeletionSuccessfulToOnlineReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionSuccessful, OnlineReplica)
}
@Test
def testInvalidReplicaDeletionSuccessfulToOfflineReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionSuccessful, OfflineReplica)
}
@Test
def testInvalidReplicaDeletionSuccessfulToReplicaDeletionStartedTransition(): Unit = {
testInvalidTransition(ReplicaDeletionSuccessful, ReplicaDeletionStarted)
}
@Test
def testInvalidReplicaDeletionSuccessfulToReplicaDeletionIneligibleTransition(): Unit = {
testInvalidTransition(ReplicaDeletionSuccessful, ReplicaDeletionIneligible)
}
@Test
def testInvalidReplicaDeletionIneligibleToNonexistentReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionIneligible, NonExistentReplica)
}
@Test
def testInvalidReplicaDeletionIneligibleToNewReplicaTransition(): Unit = {
testInvalidTransition(ReplicaDeletionIneligible, NewReplica)
}
@Test
def testReplicaDeletionIneligibleToOnlineReplicaTransition(): Unit = {
replicaState.put(replica, ReplicaDeletionIneligible)
controllerContext.partitionReplicaAssignment.put(partition, Seq(brokerId))
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(LeaderAndIsr(brokerId, List(brokerId)), controllerEpoch)
controllerContext.partitionLeadershipInfo.put(partition, leaderIsrAndControllerEpoch)
EasyMock.expect(mockControllerBrokerRequestBatch.newBatch())
EasyMock.expect(mockControllerBrokerRequestBatch.addLeaderAndIsrRequestForBrokers(Seq(brokerId),
partition, leaderIsrAndControllerEpoch, Seq(brokerId), isNew = false))
EasyMock.expect(mockControllerBrokerRequestBatch.sendRequestsToBrokers(controllerEpoch))
EasyMock.replay(mockZkClient, mockControllerBrokerRequestBatch)
replicaStateMachine.handleStateChanges(replicas, OnlineReplica)
EasyMock.verify(mockZkClient, mockControllerBrokerRequestBatch)
assertEquals(OnlineReplica, replicaState(replica))
}
@Test
def testInvalidReplicaDeletionIneligibleToReplicaDeletionStartedTransition(): Unit = {
testInvalidTransition(ReplicaDeletionIneligible, ReplicaDeletionStarted)
}
@Test
def testInvalidReplicaDeletionIneligibleToReplicaDeletionSuccessfulTransition(): Unit = {
testInvalidTransition(ReplicaDeletionIneligible, ReplicaDeletionSuccessful)
}
private def testInvalidTransition(fromState: ReplicaState, toState: ReplicaState): Unit = {
replicaState.put(replica, fromState)
replicaStateMachine.handleStateChanges(replicas, toState)
assertEquals(fromState, replicaState(replica))
}
}
| themarkypantz/kafka | core/src/test/scala/unit/kafka/controller/ReplicaStateMachineTest.scala | Scala | apache-2.0 | 16,780 |
package net.fehmicansaglam.tepkin.protocol.command
import net.fehmicansaglam.bson.BsonDocument
import net.fehmicansaglam.bson.BsonDsl._
import net.fehmicansaglam.bson.util.Converters.md5Hex
case class Authenticate(databaseName: String,
username: String,
password: String,
nonce: String) extends Command {
override val command: BsonDocument = {
("authenticate" := 1) ~
("user" := username) ~
("nonce" := nonce) ~
("key" := md5Hex(nonce + username + md5Hex(s"$username:mongo:$password")))
}
}
| fehmicansaglam/tepkin | tepkin/src/main/scala/net/fehmicansaglam/tepkin/protocol/command/Authenticate.scala | Scala | apache-2.0 | 591 |
package mr.merc.ui.world
import mr.merc.diplomacy.Claim
import mr.merc.diplomacy.Claim.{StrongProvinceClaim, VassalizationClaim, WeakProvinceClaim}
import mr.merc.economics.SeasonOfYear
import mr.merc.local.Localization
import mr.merc.politics.State
import scalafx.scene.layout.FlowPane
import scalafx.scene.text.Text
class ClaimReceivedDomesticMessagePane(receiver: State, claim: Claim) extends FlowPane {
claim match {
case WeakProvinceClaim(state, province, until) =>
if (state == receiver) {
children.add(BigText(Localization("messages.claims.weHaveWeakClaim")))
} else {
children.add(new StateComponentColorName(state))
children.add(BigText(Localization("messages.claims.someoneHasWeakClaim")))
}
children.add(new Text {
text = EconomicLocalization.localizeProvince(province)
font = Components.boldFont(Components.largeFontSize)
})
children.add(new StateComponentColorName(province.owner))
children.add(BigText(Localization("messages.claims.until")))
children.add(BigText(SeasonOfYear.date(until).localizedString))
case StrongProvinceClaim(state, province) =>
if (state == receiver) {
children.add(BigText(Localization("messages.claims.weHaveStrongClaim")))
} else {
children.add(new StateComponentColorName(state))
children.add(BigText(Localization("messages.claims.someoneHasStrongClaim")))
}
children.add(new Text {
text = EconomicLocalization.localizeProvince(province)
font = Components.boldFont(Components.largeFontSize)
})
children.add(new StateComponentColorName(province.owner))
case VassalizationClaim(state, possibleVassal, claimTurnEnd) =>
children.add(new StateComponentColorName(state))
children.add(BigText(Localization("messages.claims.vassalizationClaim")))
children.add(new StateComponentColorName(possibleVassal))
children.add(BigText(Localization("messages.claims.until")))
children.add(BigText(SeasonOfYear.date(claimTurnEnd).localizedString))
}
}
| RenualdMarch/merc | src/main/scala/mr/merc/ui/world/ClaimReceivedDomesticMessagePane.scala | Scala | gpl-3.0 | 2,092 |
package cromwell.engine.db.slick
import java.sql.{Clob, Timestamp}
import java.util.{Date, UUID}
import javax.sql.rowset.serial.SerialClob
import _root_.slick.backend.DatabaseConfig
import _root_.slick.driver.JdbcProfile
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import cromwell.engine.finalcall.FinalCall
import wdl4s._
import wdl4s.types.{WdlPrimitiveType, WdlType}
import wdl4s.values.WdlValue
import cromwell.engine.ExecutionIndex._
import cromwell.engine.ExecutionStatus._
import cromwell.engine._
import cromwell.engine.backend.jes.{JesBackend, JesJobKey}
import cromwell.engine.backend.local.LocalBackend
import cromwell.engine.backend.sge.SgeBackend
import cromwell.engine.backend.{Backend, WorkflowQueryResult}
import cromwell.engine.db._
import cromwell.engine.workflow._
import cromwell.engine.{SymbolHash, CallOutput, WorkflowOutputs}
import cromwell.webservice.{CallCachingParameters, WorkflowQueryParameters, WorkflowQueryResponse}
import lenthall.config.ScalaConfig._
import org.joda.time.DateTime
import org.slf4j.LoggerFactory
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.{implicitConversions, postfixOps}
object SlickDataAccess {
type IoValue = String
val IoInput = "INPUT"
val IoOutput = "OUTPUT"
lazy val rootConfig = ConfigFactory.load()
/*
VERY TEMPORARY!
Turns out, Slick has a way to load databases from configs: DatabaseConfig
http://slick.typesafe.com/doc/3.0.0/database.html?highlight=databaseconfig#databaseconfig
To switch over to this config format, we need to rename:
1. Property name "driver" renamed to "db.driver"
2. Property name "slick.driver" renamed to "driver"
3. Property value with the slick driver needs to append "$"
To make sure the code continues to run during this switch, the application.conf's have been updated ahead of time
with the temporary "databaseSlickDriverConfigSwitch" configuration.
*/
private lazy val rootDatabaseConfig = rootConfig.getConfig(
if (rootConfig.hasPath("databaseSlickDriverConfigSwitch")) "databaseSlickDriverConfigSwitch" else "database")
private lazy val databaseConfigName = rootDatabaseConfig.getStringOption("config")
lazy val defaultDatabaseConfig = databaseConfigName.map(getDatabaseConfig).getOrElse(rootDatabaseConfig)
def getDatabaseConfig(path: String) = rootDatabaseConfig.getConfig(path)
implicit class DateToTimestamp(val date: Date) extends AnyVal {
def toTimestamp = new Timestamp(date.getTime)
}
implicit class ClobToRawString(val clob: Clob) extends AnyVal {
def toRawString: String = clob.getSubString(1, clob.length.toInt) // yes, it starts at 1
}
implicit class StringToClob(val str: String) extends AnyVal {
def toClob: Clob = new SerialClob(str.toCharArray)
}
implicit class ConfigWithUniqueSchema(val config: Config) extends AnyVal {
/**
* Returns either the "url" or "properties.url"
*/
def urlKey = if (config.hasPath("db.url")) "db.url" else "db.properties.url"
/**
* Returns the value of either the "url" or "properties.url"
*/
def urlValue = config.getString(urlKey)
/**
* Modifies config.getString("url") to return a unique schema, if the original url contains the text
* "${slick.uniqueSchema}".
*
* This allows each instance of a SlickDataAccess object to use a clean, and different, in memory database.
*
* @return Config with ${slick.uniqueSchema} in url replaced with a unique string.
*/
def withUniqueSchema: Config = {
if (urlValue.contains("${slick.uniqueSchema}")) {
// Config wasn't updating with a simple withValue/withFallback.
// So instead, do a bit of extra work to insert the generated schema name in the url.
val schema = UUID.randomUUID().toString
val newUrl = urlValue.replaceAll("""\\$\\{slick\\.uniqueSchema\\}""", schema)
val origin = urlKey + " with slick.uniqueSchema=" + schema
val urlConfigValue = ConfigValueFactory.fromAnyRef(newUrl, origin)
val urlConfig = ConfigFactory.empty(origin).withValue(urlKey, urlConfigValue)
urlConfig.withFallback(config)
} else {
config
}
}
}
lazy val log = LoggerFactory.getLogger("slick")
}
/**
* Data Access implementation using Slick.
*
* NOTE: the uses of .head below will cause an exception to be thrown
* if the list is empty. In every use case as of the writing of this comment,
* those exceptions would have been wrapped in a failed Future and returned.
*/
class SlickDataAccess(databaseConfig: Config) extends DataAccess {
import SlickDataAccess._
def this() = this(SlickDataAccess.defaultDatabaseConfig)
private val configWithUniqueSchema = this.databaseConfig.withUniqueSchema
val slickConfig = DatabaseConfig.forConfig[JdbcProfile]("", configWithUniqueSchema)
val dataAccess = new DataAccessComponent(slickConfig.driver)
// NOTE: Used for slick flatMap. May switch to custom ExecutionContext the future
private implicit val executionContext = ExecutionContext.global
// Allows creation of a Database, plus implicits for running transactions
import dataAccess.driver.api._
// NOTE: if you want to refactor database is inner-class type: this.dataAccess.driver.backend.DatabaseFactory
val database = slickConfig.db
// Possibly create the database
{
import SlickDataAccess._
log.info(s"Running with database ${configWithUniqueSchema.urlKey} = ${configWithUniqueSchema.urlValue}")
// NOTE: Slick 3.0.0 schema creation, Clobs, and MySQL don't mix: https://github.com/slick/slick/issues/637
//
// Not really an issue, since externally run liquibase is standard way of installing / upgrading MySQL.
//
// Also, creating the unique key on UUID stored as a VARCHAR requires setting the length to O.Length(36) or (100)
// for MySQL schema gen to avoid:
// com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: BLOB/TEXT column 'WORKFLOW_EXECUTION_UUID'
// used in key specification without a key length
//
// Perhaps we'll use a more optimized data type for UUID's bytes in the future, as a FK, instead auto-inc cols
//
// The value `${slick.uniqueSchema}` may be used in the url, in combination with `slick.createSchema = true`, to
// generate unique schema instances that don't conflict.
//
// Otherwise, create one DataAccess and hold on to the reference.
if (this.databaseConfig.getBooleanOr("slick.createSchema")) {
Await.result(database.run(dataAccess.schema.create), Duration.Inf)
}
}
private def wdlValueToDbValue(v: WdlValue): String = v.wdlType match {
case p: WdlPrimitiveType => v.valueString
case o => v.toWdlString
}
private def dbEntryToWdlValue(dbValue: String, wdlType: WdlType): WdlValue = wdlType match {
// .get here is because we trust the value in the database is coercible to the given type
case p: WdlPrimitiveType => p.coerceRawValue(dbValue).get
case o => wdlType.fromWdlString(dbValue)
}
override def shutdown() = database.shutdown
// Run action with an outer transaction
private def runTransaction[R](action: DBIOAction[R, _ <: NoStream, _ <: Effect]): Future[R] = {
database.run(action.transactionally)
}
/**
* Creates a row in each of the backend-info specific tables for each key in `keys` corresponding to the backend
* `backend`. Or perhaps defer this?
*/
override def createWorkflow(workflowDescriptor: WorkflowDescriptor,
workflowInputs: Traversable[SymbolStoreEntry],
scopes: Traversable[Scope],
backend: Backend): Future[Unit] = {
val scopeKeys: Traversable[ExecutionStoreKey] = scopes collect {
case call: Call => BackendCallKey(call, None)
case scatter: Scatter => ScatterKey(scatter, None)
case finalCall: FinalCall => FinalCallKey(finalCall)
}
val action = for {
workflowExecutionInsert <- dataAccess.workflowExecutionsAutoInc +=
new WorkflowExecution(
workflowDescriptor.id.toString,
workflowDescriptor.name,
WorkflowSubmitted.toString,
new Date().toTimestamp)
_ <- dataAccess.workflowExecutionAuxesAutoInc += new WorkflowExecutionAux(
workflowExecutionInsert.workflowExecutionId.get,
workflowDescriptor.sourceFiles.wdlSource.toClob,
workflowDescriptor.sourceFiles.inputsJson.toClob,
workflowDescriptor.sourceFiles.workflowOptionsJson.toClob
)
symbolInsert <- dataAccess.symbolsAutoInc ++= toInputSymbols(workflowExecutionInsert, workflowDescriptor.namespace.workflow, workflowInputs)
// NOTE: Don't use DBIO.seq for **transforming** sequences
// - DBIO.seq(mySeq: _*) runs *any* items in sequence, but converts Seq[ DBIOAction[_] ] to DBIOAction[ Unit ]
// - DBIO.sequence(mySeq) converts Seq[ DBIOAction[R] ] to DBIOAction[ Seq[R] ]
// - DBIO.fold(mySeq, init) converts Seq[ DBIOAction[R] ] to DBIOAction[R]
_ <- DBIO.sequence(toScopeActions(workflowExecutionInsert, backend, scopeKeys))
} yield ()
runTransaction(action)
}
// Converts the SymbolStoreEntry to Symbols. Does not create the action to do the insert.
private def toInputSymbols(workflowExecution: WorkflowExecution,
rootWorkflowScope: Workflow,
symbolStoreEntries: Traversable[SymbolStoreEntry]): Seq[Symbol] = {
symbolStoreEntries.toSeq map { symbol =>
val reportableResult = rootWorkflowScope.outputs exists { _.fullyQualifiedName == symbol.key.fqn }
new Symbol(
workflowExecution.workflowExecutionId.get,
symbol.key.scope,
symbol.key.name,
symbol.key.index.fromIndex,
if (symbol.key.input) IoInput else IoOutput,
reportableResult,
symbol.wdlType.toWdlString,
symbol.wdlValue.map(v => wdlValueToDbValue(v).toClob),
symbol.symbolHash map { _.value }
)
}
}
// Converts the Traversable[Call] to Seq[DBIOAction[]] that insert the correct rows
private def toScopeActions(workflowExecution: WorkflowExecution, backend: Backend,
keys: Traversable[ExecutionStoreKey]): Seq[DBIO[Unit]] = {
keys.toSeq map toScopeAction(workflowExecution, backend)
}
override def insertCalls(workflowId: WorkflowId, keys: Traversable[ExecutionStoreKey], backend: Backend): Future[Unit] = {
val action = for {
workflowExecution <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.head
_ <- DBIO.sequence(toScopeActions(workflowExecution, backend, keys))
} yield ()
runTransaction(action)
}
// Converts a single Call to a composite DBIOAction[] that inserts the correct rows
private def toScopeAction(workflowExecution: WorkflowExecution, backend: Backend)
(key: ExecutionStoreKey): DBIO[Unit] = {
for {
// Insert an execution row
executionInsert <- dataAccess.executionsAutoInc +=
new Execution(
workflowExecutionId = workflowExecution.workflowExecutionId.get,
callFqn = key.scope.fullyQualifiedName,
index = key.index.fromIndex,
status = ExecutionStatus.NotStarted.toString,
rc = None,
startDt = None,
endDt = None)
// Depending on the backend, insert a job specific row
_ <- backend match {
case _: LocalBackend =>
dataAccess.localJobsAutoInc +=
new LocalJob(
executionInsert.executionId.get,
None,
None)
case j: JesBackend =>
// FIXME: Placeholder for now, discussed w/ Khalid
dataAccess.jesJobsAutoInc += new JesJob(executionInsert.executionId.get, None, None, None)
case s: SgeBackend =>
dataAccess.sgeJobsAutoInc += new SgeJob(executionInsert.executionId.get, None)
case null =>
throw new IllegalArgumentException("Backend is null")
case unknown =>
throw new IllegalArgumentException("Unknown backend: " + backend.getClass)
}
} yield ()
}
override def getWorkflowState(workflowId: WorkflowId): Future[Option[WorkflowState]] = {
val action = for {
maybeWorkflowExecution <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(
workflowId.id.toString).result.headOption
workflowState = maybeWorkflowExecution map { w => WorkflowState.fromString(w.status) }
} yield workflowState
runTransaction(action)
}
override def getExecutionStatuses(workflowId: WorkflowId): Future[Map[ExecutionDatabaseKey, CallStatus]] = {
val action = for {
// NOTE: For now, intentionally causes query to error out instead of returning an Map.empty
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(
workflowId.id.toString).result.head
// Alternatively, could use a dataAccess.executionCallFqnsAndStatusesByWorkflowExecutionUuid
executionKeyAndStatusResults <- dataAccess.executionCallFqnsAndStatusesByWorkflowExecutionId(
workflowExecutionResult.workflowExecutionId.get).result
executionStatuses = executionKeyAndStatusResults map {
case (fqn, indexInt, status, rc, executionHash, dockerHash) => (ExecutionDatabaseKey(fqn, indexInt.toIndex), CallStatus(status.toExecutionStatus, rc, executionHash map { ExecutionHash(_, dockerHash )}, None)) }
} yield executionStatuses.toMap
runTransaction(action)
}
override def getExecutionStatuses(workflowId: WorkflowId, fqn: FullyQualifiedName): Future[Map[ExecutionDatabaseKey, CallStatus]] = {
val action = for {
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(
workflowId.toString).result.head
executionKeyAndStatusResults <- dataAccess.executionStatusByWorkflowExecutionIdAndCallFqn(
(workflowExecutionResult.workflowExecutionId.get, fqn)).result
executionStatuses = executionKeyAndStatusResults map { case (callFqn, indexInt, status, rc, hash, dockerHash) =>
(ExecutionDatabaseKey(callFqn, indexInt.toIndex), CallStatus(status.toExecutionStatus, rc,
hash map { ExecutionHash(_, dockerHash) }, None)) }
} yield executionStatuses.toMap
runTransaction(action)
}
override def getExecutionStatus(workflowId: WorkflowId, key: ExecutionDatabaseKey): Future[Option[CallStatus]] = {
val action = for {
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.head
executionStatuses <- dataAccess.executionStatusesAndReturnCodesByWorkflowExecutionIdAndCallKey(
(workflowExecutionResult.workflowExecutionId.get, key.fqn, key.index.fromIndex)).result
maybeStatus = executionStatuses.headOption map { case (status, rc, hash, dockerHash) => CallStatus(status.toExecutionStatus, rc, hash map { ExecutionHash(_, dockerHash )}, None) }
} yield maybeStatus
runTransaction(action)
}
override def getWorkflow(workflowExecutionId: Int): Future[WorkflowDescriptor] = {
val action = for {
workflowExecutionResult <- dataAccess.workflowExecutionsByPrimaryKey(workflowExecutionId).result.head
workflowAux <- dataAccess.workflowExecutionAuxesByWorkflowExecutionUuid(workflowExecutionResult.workflowExecutionUuid).result.head
workflowDescriptor = WorkflowDescriptor(
WorkflowId(UUID.fromString(workflowExecutionResult.workflowExecutionUuid)),
WorkflowSourceFiles(workflowAux.wdlSource.toRawString, workflowAux.jsonInputs.toRawString, workflowAux.workflowOptions.toRawString)
)
} yield workflowDescriptor
runTransaction(action)
}
override def getWorkflow(workflowId: WorkflowId): Future[WorkflowDescriptor] = {
val action = for {
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.head
workflowAux <- dataAccess.workflowExecutionAuxesByWorkflowExecutionUuid(workflowExecutionResult.workflowExecutionUuid).result.head
workflowDescriptor = WorkflowDescriptor(
workflowId,
WorkflowSourceFiles(workflowAux.wdlSource.toRawString, workflowAux.jsonInputs.toRawString, workflowAux.workflowOptions.toRawString)
)
} yield workflowDescriptor
runTransaction(action)
}
override def getWorkflowsByState(states: Traversable[WorkflowState]): Future[Traversable[WorkflowDescriptor]] = {
val action = for {
workflowExecutionResults <- dataAccess.workflowExecutionsByStatuses(states.map(_.toString)).result
workflowDescriptors <- DBIO.sequence(
workflowExecutionResults map { workflowExecutionResult =>
val workflowExecutionAuxResult = dataAccess.workflowExecutionAuxesByWorkflowExecutionUuid(
workflowExecutionResult.workflowExecutionUuid).result.head
workflowExecutionAuxResult map { workflowExecutionAux =>
WorkflowDescriptor(
WorkflowId.fromString(workflowExecutionResult.workflowExecutionUuid),
WorkflowSourceFiles(
workflowExecutionAux.wdlSource.toRawString,
workflowExecutionAux.jsonInputs.toRawString,
workflowExecutionAux.workflowOptions.toRawString
)
)
}
}
)
} yield workflowDescriptors
runTransaction(action)
}
override def updateWorkflowState(workflowId: WorkflowId, workflowState: WorkflowState): Future[Unit] = {
val query = dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).extract
val endDate = if (workflowState.isTerminal) Option(new Date().toTimestamp) else None
val action = for {
count <- query.map(w => (w.status, w.endDt)).update(workflowState.toString, endDate)
_ = require(count == 1, s"Unexpected workflow execution update count $count")
} yield ()
runTransaction(action)
}
override def getExecutionBackendInfo(workflowId: WorkflowId, call: Call): Future[CallBackendInfo] = {
val action = for {
executionResult <- dataAccess.executionsByWorkflowExecutionUuidAndCallFqn(
(workflowId.toString, call.fullyQualifiedName)).result.head
localJobResultOption <- dataAccess.localJobsByExecutionId(executionResult.executionId.get).result.headOption
jesJobResultOption <- dataAccess.jesJobsByExecutionId(executionResult.executionId.get).result.headOption
sgeJobResultOption <- dataAccess.sgeJobsByExecutionId(executionResult.executionId.get).result.headOption
jobResultOption = localJobResultOption orElse jesJobResultOption orElse sgeJobResultOption
backendInfo = jobResultOption match {
case Some(localJobResult: LocalJob) =>
new LocalCallBackendInfo(localJobResult.pid)
case Some(jesJobResult: JesJob) =>
new JesCallBackendInfo(jesJobResult.jesRunId map JesId,
jesJobResult.jesStatus map JesStatus)
case Some(sgeJobResult: SgeJob) =>
new SgeCallBackendInfo(sgeJobResult.sgeJobNumber)
case _ =>
throw new IllegalArgumentException(
s"Unknown backend from db for (uuid, fqn): " +
s"($workflowId, ${call.fullyQualifiedName})")
}
} yield backendInfo
runTransaction(action)
}
override def updateExecutionBackendInfo(workflowId: WorkflowId,
callKey: BackendCallKey,
backendInfo: CallBackendInfo): Future[Unit] = {
require(backendInfo != null, "backend info is null")
import ExecutionIndex._
val action = for {
executionResult <- dataAccess.executionsByWorkflowExecutionUuidAndCallFqnAndShardIndex(
workflowId.toString, callKey.scope.fullyQualifiedName, callKey.index.fromIndex).result.head
backendUpdate <- backendInfo match {
case localBackendInfo: LocalCallBackendInfo =>
dataAccess.localJobPidsByExecutionId(
executionResult.executionId.get).update(
localBackendInfo.processId)
case jesBackendInfo: JesCallBackendInfo =>
dataAccess.jesIdsAndJesStatusesByExecutionId(
executionResult.executionId.get).update(
jesBackendInfo.jesId map { _.id },
jesBackendInfo.jesStatus map { _.status }
)
case sgeBackendInfo: SgeCallBackendInfo =>
dataAccess.sgeJobNumberByExecutionId(
executionResult.executionId.get
).update(sgeBackendInfo.sgeJobNumber)
}
_ = require(backendUpdate == 1, s"Unexpected backend update count $backendUpdate")
} yield ()
runTransaction(action)
}
private def toSymbolStoreEntries(symbolResults: Traversable[Symbol]) =
symbolResults map toSymbolStoreEntry
private def toSymbolStoreEntry(symbolResult: Symbol) = {
val wdlType = WdlType.fromWdlString(symbolResult.wdlType)
new SymbolStoreEntry(
new SymbolStoreKey(
symbolResult.scope,
symbolResult.name,
symbolResult.index.toIndex,
input = symbolResult.io == IoInput // input = true, if db contains "INPUT"
),
wdlType,
symbolResult.wdlValue map { v => dbEntryToWdlValue(v.toRawString, wdlType) },
symbolResult.symbolHash map SymbolHash
)
}
override def getAllSymbolStoreEntries(workflowId: WorkflowId): Future[Traversable[SymbolStoreEntry]] = {
val action = dataAccess.allSymbols(workflowId.toString).result
runTransaction(action) map toSymbolStoreEntries
}
/** Get all inputs for the scope of this key. */
override def getInputs(workflowId: WorkflowId, call: Call): Future[Traversable[SymbolStoreEntry]] = {
require(call != null, "call cannot be null")
getSymbols(workflowId, IoInput, Option(call.fullyQualifiedName))
}
/** Get all outputs for the scope of this key. */
override def getOutputs(workflowId: WorkflowId, key: ExecutionDatabaseKey): Future[Traversable[SymbolStoreEntry]] = {
require(key != null, "key cannot be null")
getSymbols(workflowId, IoOutput, Option(key.fqn), key.index)
}
/** Returns all NON SHARDS outputs for this workflowId */
override def getWorkflowOutputs(workflowId: WorkflowId): Future[Traversable[SymbolStoreEntry]] = {
val action = dataAccess.symbolsForWorkflowOutput(workflowId.toString).result
runTransaction(action) map toSymbolStoreEntries
}
private def getSymbols(workflowId: WorkflowId,
ioValue: IoValue,
callFqnOption: Option[FullyQualifiedName] = None,
callIndexOption: Option[Int] = None): Future[Traversable[SymbolStoreEntry]] = {
val action = dataAccess.symbolsByWorkflowExecutionUuidAndIoAndMaybeScope(
workflowId.toString, ioValue, callFqnOption, callIndexOption
).result
runTransaction(action) map toSymbolStoreEntries
}
/** Should fail if a value is already set. The keys in the Map are locally qualified names. */
override def setOutputs(workflowId: WorkflowId, key: OutputKey, callOutputs: WorkflowOutputs, reportableResults: Seq[ReportableSymbol]): Future[Unit] = {
val reportableResultNames = reportableResults map { _.fullyQualifiedName }
val action = for {
workflowExecution <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.head
_ <- dataAccess.symbolsAutoInc ++= callOutputs map {
case (symbolLocallyQualifiedName, CallOutput(wdlValue, hash)) =>
val reportableSymbol = key.index.fromIndex == -1 && reportableResultNames.contains(key.scope.fullyQualifiedName + "." + symbolLocallyQualifiedName)
new Symbol(
workflowExecution.workflowExecutionId.get,
key.scope.fullyQualifiedName,
symbolLocallyQualifiedName,
key.index.fromIndex,
IoOutput,
reportableSymbol,
wdlValue.wdlType.toWdlString,
Option(wdlValueToDbValue(wdlValue).toClob),
hash.value.map(_.value)
)
}
} yield ()
runTransaction(action)
}
/**
* Updates the existing input symbols to replace expressions with real values.
* @return The number of rows updated - as a Future.
*/
override def updateCallInputs(workflowId: WorkflowId, key: BackendCallKey, callInputs: CallInputs): Future[Int] = {
type ProjectionFunction = SlickDataAccess.this.dataAccess.Symbols => (Rep[String], Rep[Option[Clob]])
val projectionFn: ProjectionFunction = (s: SlickDataAccess.this.dataAccess.Symbols) => (s.wdlType, s.wdlValue)
val inputUpdateActions = callInputs map { case (inputName, wdlValue) =>
for {
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.head
symbols = dataAccess.symbolsFilterByWorkflowAndScopeAndNameAndIndex(workflowExecutionResult.workflowExecutionId.get, key.scope.fullyQualifiedName, inputName, key.index.fromIndex)
count <- symbols.map(projectionFn).update(wdlValue.wdlType.toWdlString, Option(wdlValueToDbValue(wdlValue).toClob))
} yield count
}
// Do an FP dance to get the DBIOAction[Iterable[Int]] from Iterable[DBIOAction[Int]].
val allInputUpdatesAction = DBIO.sequence(inputUpdateActions)
runTransaction(allInputUpdatesAction) map { _.sum }
}
override def setExecutionEvents(workflowId: WorkflowId, callFqn: String, shardIndex: Option[Int], events: Seq[ExecutionEventEntry]): Future[Unit] = {
val action = for {
execution <- shardIndex match {
case Some(idx) => dataAccess.executionsByWorkflowExecutionUuidAndCallFqnAndShardIndex(workflowId.toString, callFqn, idx).result.head
case None => dataAccess.executionsByWorkflowExecutionUuidAndCallFqn(workflowId.toString, callFqn).result.head
}
_ <- dataAccess.executionEventsAutoInc ++= events map { executionEventEntry =>
new ExecutionEvent(
execution.executionId.get,
executionEventEntry.description,
new Timestamp(executionEventEntry.startTime.getMillis),
new Timestamp(executionEventEntry.endTime.getMillis))
}
} yield ()
runTransaction(action)
}
override def getAllExecutionEvents(workflowId: WorkflowId): Future[Map[ExecutionDatabaseKey, Seq[ExecutionEventEntry]]] = {
// The database query gives us a Seq[(CallFqn, ExecutionEvent)]. We want a Map[CallFqn -> ExecutionEventEntry].
// So let's do some functional programming!
val action = dataAccess.executionEventsByWorkflowExecutionUuid(workflowId.toString).result
runTransaction(action) map toExecutionEvents
}
private def toExecutionEvents(events: Traversable[((String, Int), ExecutionEvent)]): Map[ExecutionDatabaseKey, Seq[ExecutionEventEntry]] = {
// First: Group all the entries together by name
val grouped: Map[ExecutionDatabaseKey, Seq[((String, Int), ExecutionEvent)]] = events.toSeq groupBy { case ((fqn: String, idx: Int), event: ExecutionEvent) => ExecutionDatabaseKey(fqn, idx.toIndex) }
// Second: Transform the values. The value no longer needs the String since that's now part of the Map, and
// convert the executionEvent into a friendlier ExecutionEventEntry:
grouped mapValues { _ map { case (_ , event: ExecutionEvent) =>
ExecutionEventEntry(
event.description,
new DateTime(event.startTime.getTime),
new DateTime(event.endTime.getTime))
} }
}
private def setStatusAction(workflowId: WorkflowId, scopeKeys: Traversable[ExecutionDatabaseKey],
callStatus: CallStatus): DBIO[Unit] = {
// Describes a function from an input `Executions` to a projection of fields to be updated.
type ProjectionFunction = SlickDataAccess.this.dataAccess.Executions => (
Rep[String], Rep[Option[Timestamp]], Rep[Option[Int]], Rep[Option[String]], Rep[Option[String]], Rep[Option[Int]])
// If the call status is Starting, target the start date for update, otherwise target the end date. The end date
// is only set to a non-None value if the status is terminal.
val projectionFn: ProjectionFunction = if (callStatus.isStarting)
e => (e.status, e.startDt, e.rc, e.executionHash, e.dockerImageHash, e.resultsClonedFrom)
else
e => (e.status, e.endDt, e.rc, e.executionHash, e.dockerImageHash, e.resultsClonedFrom)
val maybeDate = if (callStatus.isStarting || callStatus.isTerminal) Option(new Date().toTimestamp) else None
// If this call represents a call caching hit, find the execution ID for the call from which results were cloned and
// wrap that in an `Option`.
// If this wasn't a call caching hit just return `DBIO.successful(None)`, `None` lifted into `DBIO`.
val findResultsClonedFromId = callStatus.resultsClonedFrom map { backendCall =>
for {
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(backendCall.workflowDescriptor.id.toString).result.head
execution <- dataAccess.executionsByWorkflowExecutionIdAndCallFqnAndIndex(
workflowExecutionResult.workflowExecutionId.get, backendCall.key.scope.fullyQualifiedName, backendCall.key.index.fromIndex).result.head
} yield Option(execution.executionId.get)
} getOrElse DBIO.successful(None)
for {
workflowExecutionResult <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.head
executions = dataAccess.executionsByWorkflowExecutionIdAndScopeKeys(workflowExecutionResult.workflowExecutionId.get, scopeKeys)
clonedFromId <- findResultsClonedFromId
overallHash = callStatus.hash map { _.overallHash }
dockerHash = callStatus.hash flatMap { _.dockerHash }
count <- executions.map(projectionFn).update((callStatus.executionStatus.toString, maybeDate, callStatus.returnCode, overallHash, dockerHash, clonedFromId))
scopeSize = scopeKeys.size
_ = require(count == scopeSize, s"Execution update count $count did not match scopes size $scopeSize")
} yield ()
}
override def setStatus(workflowId: WorkflowId, scopeKeys: Traversable[ExecutionDatabaseKey],
callStatus: CallStatus): Future[Unit] = {
if (scopeKeys.isEmpty) Future.successful(()) else runTransaction(setStatusAction(workflowId, scopeKeys, callStatus))
}
override def getExecutions(id: WorkflowId): Future[Traversable[Execution]] = {
val action = dataAccess.executionsByWorkflowExecutionUuid(id.toString).result
runTransaction(action)
}
override def getExecutionsForRestart(id: WorkflowId): Future[Traversable[Execution]] = {
val action = dataAccess.executionsForRestartByWorkflowExecutionUuid(id.toString).result
runTransaction(action)
}
override def getExecutionsWithResuableResultsByHash(hash: String): Future[Traversable[Execution]] = {
val action = dataAccess.executionsWithReusableResultsByExecutionHash(hash).result
runTransaction(action)
}
override def getWorkflowExecution(workflowId: WorkflowId): Future[WorkflowExecution] = {
val action = dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.toString).result.headOption
runTransaction(action) map { _.getOrElse(throw new NoSuchElementException(s"Workflow $workflowId not found.")) }
}
override def getWorkflowExecutionAux(id: WorkflowId): Future[WorkflowExecutionAux] = {
val action = dataAccess.workflowExecutionAuxesByWorkflowExecutionUuid(id.toString).result.headOption
runTransaction(action) map { _.getOrElse(throw new NoSuchElementException(s"No workflow execution aux found for ID '$id'.")) }
}
override def getAllInputs(workflowId: WorkflowId): Future[Traversable[SymbolStoreEntry]] = {
val action = dataAccess.symbolsByWorkflowExecutionUuidAndIo(workflowId.toString, IoInput).result
runTransaction(action) map toSymbolStoreEntries
}
override def getAllOutputs(workflowId: WorkflowId): Future[Traversable[SymbolStoreEntry]] = {
val action = dataAccess.symbolsByWorkflowExecutionUuidAndIo(workflowId.toString, IoOutput).result
runTransaction(action) map toSymbolStoreEntries
}
override def jesJobInfo(id: WorkflowId): Future[Map[ExecutionDatabaseKey, JesJob]] = {
val action = for {
executionAndJob <- dataAccess.jesJobsWithExecutionsByWorkflowExecutionUuid(id.toString).result
} yield executionAndJob
runTransaction(action) map { results =>
results map { case (execution, job) =>
ExecutionDatabaseKey(execution.callFqn, execution.index.toIndex) -> job
} toMap
}
}
override def localJobInfo(id: WorkflowId): Future[Map[ExecutionDatabaseKey, LocalJob]] = {
val action = for {
executionAndJob <- dataAccess.localJobsWithExecutionsByWorkflowExecutionUuid(id.toString).result
} yield executionAndJob
runTransaction(action) map { results =>
results map { case (execution, job) =>
ExecutionDatabaseKey(execution.callFqn, execution.index.toIndex) -> job
} toMap
}
}
override def sgeJobInfo(id: WorkflowId): Future[Map[ExecutionDatabaseKey, SgeJob]] = {
val action = for {
executionAndJob <- dataAccess.sgeJobsWithExecutionsByWorkflowExecutionUuid(id.toString).result
} yield executionAndJob
runTransaction(action) map { results =>
results map { case (execution, job) =>
ExecutionDatabaseKey(execution.callFqn, execution.index.toIndex) -> job
} toMap
}
}
override def updateWorkflowOptions(workflowId: WorkflowId, workflowOptionsJson: String): Future[Unit] = {
val action = for {
workflowExecution <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(workflowId.id.toString).result.head
count <- dataAccess.workflowOptionsFromWorkflowId(workflowExecution.workflowExecutionId.get).update(workflowOptionsJson.toClob)
_ = require(count == 1, s"Unexpected workflow aux update count $count")
} yield ()
runTransaction(action)
}
override def resetNonResumableJesExecutions(workflowId: WorkflowId): Future[Unit] = {
// These executions have no corresponding recorded operation ID and are therefore not resumable.
def collectNonResumableDatabaseKeys(executionsAndJobs: Seq[(Execution, JesJob)]): Seq[ExecutionDatabaseKey] = {
executionsAndJobs collect {
case (execution, job) if execution.status.toExecutionStatus == ExecutionStatus.Running && job.jesRunId.isEmpty =>
ExecutionDatabaseKey(execution.callFqn, execution.index.toIndex)
}
}
val action = for {
executionsAndJobs <- dataAccess.jesJobsWithExecutionsByWorkflowExecutionUuid(workflowId.toString).result
nonResumableDatabaseKeys = collectNonResumableDatabaseKeys(executionsAndJobs)
_ <- setStatusAction(workflowId, nonResumableDatabaseKeys, CallStatus(ExecutionStatus.NotStarted, None, None, None))
} yield ()
runTransaction(action)
}
override def findResumableJesExecutions(workflowId: WorkflowId): Future[Map[ExecutionDatabaseKey, JesJobKey]] = {
// These executions have a corresponding recorded operation ID and should therefore be resumable.
def collectResumableKeyPairs(executionsAndJobs: Traversable[(Execution, JesJob)]): Traversable[(ExecutionDatabaseKey, JesJobKey)] = {
executionsAndJobs collect {
case (execution, job) if execution.status.toExecutionStatus == ExecutionStatus.Running && job.jesRunId.nonEmpty =>
(ExecutionDatabaseKey(execution.callFqn, execution.index.toIndex), JesJobKey(job.jesRunId.get))
}
}
val action = for {
executionsAndJobs <- dataAccess.jesJobsWithExecutionsByWorkflowExecutionUuid(workflowId.toString).result
resumableKeyPairs = collectResumableKeyPairs(executionsAndJobs)
} yield resumableKeyPairs
runTransaction(action) map { _.toMap }
}
override def queryWorkflows(queryParameters: WorkflowQueryParameters): Future[WorkflowQueryResponse] = {
val action = dataAccess.queryWorkflowExecutions(queryParameters).result
runTransaction(action) map { workflows =>
WorkflowQueryResponse(workflows map { workflow =>
WorkflowQueryResult(
id = workflow.workflowExecutionUuid,
name = workflow.name,
status = workflow.status,
start = new DateTime(workflow.startDt),
end = workflow.endDt map { new DateTime(_) })
})
}
}
override def updateCallCaching(parameters: CallCachingParameters): Future[Int] = {
// Figure out which of the three possible queries to use based on whether a call has been specified and
// if so whether an index has been specified.
val executionQuery: (Int) => Query[dataAccess.Executions, Execution, Seq] = {
(parameters.callKey, parameters.callKey flatMap { _.index }) match {
case (Some(key), Some(idx)) => dataAccess.executionsByWorkflowExecutionIdAndCallFqnAndIndex(_: Int, key.fqn, idx).extract
case (Some(key), None) => dataAccess.executionsByWorkflowExecutionIdAndCallFqn(_: Int, key.fqn).extract
case _ => dataAccess.executionsByWorkflowExecutionId(_: Int).extract
}
}
val action = for {
workflowExecution <- dataAccess.workflowExecutionsByWorkflowExecutionUuid(parameters.workflowId.id.toString).result.head
count <- executionQuery(workflowExecution.workflowExecutionId.get).map(_.allowsResultReuse).update(parameters.allow)
} yield count
runTransaction(action)
}
}
| dgtester/cromwell | src/main/scala/cromwell/engine/db/slick/SlickDataAccess.scala | Scala | bsd-3-clause | 37,668 |
package com.identityblitz.login.util
import org.apache.commons.codec.binary.Base64
/**
*/
object Base64Util {
def decode(base64: String) = Base64.decodeBase64(base64)
def decodeAsString(base64: String) = new String(Base64.decodeBase64(base64), "UTF-8")
def encode(source: Array[Byte]): String = Base64.encodeBase64String(source)
def encode(source: String): String = Base64.encodeBase64String(source.getBytes("UTF-8"))
}
| brainysmith/login-framework | src/main/scala/com/identityblitz/login/util/Base64Util.scala | Scala | mit | 436 |
/*
* Author: Steffen Reith ([email protected])
*
* Create Date: Fri Jun 19 10:19:44 CEST 2020
* Module Name: J1DStack - The data stack
* Project Name: J1Sc - A simple J1 implementation in Scala using Spinal HDL
*
*/
import spinal.core._
case class J1DStack(cfg : J1Config) extends J1Stack(cfg.dataStackIdxWidth) {
// Enable signal for writing to the stack
//override val stackWriteEnable = Bool
// Stack pointer and next signal for the data stack
//val stackPtrN = UInt(cfg.dataStackIdxWidth bits)
//val stackPtr = Reg(UInt(cfg.dataStackIdxWidth bits)) init (0)
def apply(stall : Bool, dtosN : Bits) : (Bits, Bits, UInt) = {
// Change the stack pointer only when the CPU is not stalled
when(!stall) { stackPtr := stackPtrN }
// Top of stack and next value
val dtos = RegNext(dtosN) init(0)
// Stack memory with read and write port
val stackMem = Mem(Bits(cfg.wordSize bits), wordCount = (1 << cfg.dataStackIdxWidth))
stackMem.write(address = stackPtrN,
data = dtos,
enable = stackWriteEnable & !stall)
val dnos = stackMem.readAsync(address = stackPtr, readUnderWrite = writeFirst)
// Return top and next of stack as a pair and the dstack pointer
(dtos, dnos, stackPtr)
}
def updateDStack(msb : Bool, instr : Bits, funcTtoN : Bool) : Unit = {
// Increment for data stack pointer
val stackPtrInc = SInt(cfg.dataStackIdxWidth bits)
// Handle the update of the data stack
switch(msb ## instr(instr.high downto (instr.high - 3) + 1)) {
// For a high call push the instruction (== memory access) and for a literal push the value to the data stack
is (M"1_---", M"0_1--") {stackWriteEnable := True; stackPtrInc := 1}
// Conditional jump (pop DTOS from data stack)
is (M"0_001") {stackWriteEnable := False; stackPtrInc := -1}
// ALU instruction (check for a possible push of data, ISA bug can be fixed by '| (instr(1 downto 0) === B"b01")')
is (M"0_011") {stackWriteEnable := funcTtoN; stackPtrInc := instr(1 downto 0).asSInt.resized}
// Don't change the data stack by default
default {stackWriteEnable := False; stackPtrInc := 0}
}
// Calculate the new value of the data stack pointer
stackPtrN := (stackPtr.asSInt + stackPtrInc).asUInt
}
}
| SteffenReith/J1Sc | src/main/scala/J1DStack.scala | Scala | bsd-3-clause | 2,352 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.ann
import java.util.Random
import breeze.linalg.{*, axpy => Baxpy, DenseMatrix => BDM, DenseVector => BDV, Vector => BV}
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.mllib.linalg.{Vector => OldVector, Vectors => OldVectors}
import org.apache.spark.mllib.linalg.VectorImplicits._
import org.apache.spark.mllib.optimization._
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.random.XORShiftRandom
/**
* Trait that holds Layer properties, that are needed to instantiate it.
* Implements Layer instantiation.
*
*/
private[ann] trait Layer extends Serializable {
/**
* Number of weights that is used to allocate memory for the weights vector
*/
val weightSize: Int
/**
* Returns the output size given the input size (not counting the stack size).
* Output size is used to allocate memory for the output.
*
* @param inputSize input size
* @return output size
*/
def getOutputSize(inputSize: Int): Int
/**
* If true, the memory is not allocated for the output of this layer.
* The memory allocated to the previous layer is used to write the output of this layer.
* Developer can set this to true if computing delta of a previous layer
* does not involve its output, so the current layer can write there.
* This also mean that both layers have the same number of outputs.
*/
val inPlace: Boolean
/**
* Returns the instance of the layer based on weights provided.
* Size of weights must be equal to weightSize
*
* @param initialWeights vector with layer weights
* @return the layer model
*/
def createModel(initialWeights: BDV[Double]): LayerModel
/**
* Returns the instance of the layer with random generated weights.
*
* @param weights vector for weights initialization, must be equal to weightSize
* @param random random number generator
* @return the layer model
*/
def initModel(weights: BDV[Double], random: Random): LayerModel
}
/**
* Trait that holds Layer weights (or parameters).
* Implements functions needed for forward propagation, computing delta and gradient.
* Can return weights in Vector format.
*/
private[ann] trait LayerModel extends Serializable {
val weights: BDV[Double]
/**
* Evaluates the data (process the data through the layer).
* Output is allocated based on the size provided by the
* LayerModel implementation and the stack (batch) size.
* Developer is responsible for checking the size of output
* when writing to it.
*
* @param data data
* @param output output (modified in place)
*/
def eval(data: BDM[Double], output: BDM[Double]): Unit
/**
* Computes the delta for back propagation.
* Delta is allocated based on the size provided by the
* LayerModel implementation and the stack (batch) size.
* Developer is responsible for checking the size of
* prevDelta when writing to it.
*
* @param delta delta of this layer
* @param output output of this layer
* @param prevDelta the previous delta (modified in place)
*/
def computePrevDelta(delta: BDM[Double], output: BDM[Double], prevDelta: BDM[Double]): Unit
/**
* Computes the gradient.
* cumGrad is a wrapper on the part of the weight vector.
* Size of cumGrad is based on weightSize provided by
* implementation of LayerModel.
*
* @param delta delta for this layer
* @param input input data
* @param cumGrad cumulative gradient (modified in place)
*/
def grad(delta: BDM[Double], input: BDM[Double], cumGrad: BDV[Double]): Unit
}
/**
* Layer properties of affine transformations, that is y=A*x+b
*
* @param numIn number of inputs
* @param numOut number of outputs
*/
private[ann] class AffineLayer(val numIn: Int, val numOut: Int) extends Layer {
override val weightSize = numIn * numOut + numOut
override def getOutputSize(inputSize: Int): Int = numOut
override val inPlace = false
override def createModel(weights: BDV[Double]): LayerModel = new AffineLayerModel(weights, this)
override def initModel(weights: BDV[Double], random: Random): LayerModel =
AffineLayerModel(this, weights, random)
}
/**
* Model of Affine layer
*
* @param weights weights
* @param layer layer properties
*/
private[ann] class AffineLayerModel private[ann] (
val weights: BDV[Double],
val layer: AffineLayer) extends LayerModel {
val w = new BDM[Double](layer.numOut, layer.numIn, weights.data, weights.offset)
val b =
new BDV[Double](weights.data, weights.offset + (layer.numOut * layer.numIn), 1, layer.numOut)
private var ones: BDV[Double] = null
override def eval(data: BDM[Double], output: BDM[Double]): Unit = {
output(::, *) := b
BreezeUtil.dgemm(1.0, w, data, 1.0, output)
}
override def computePrevDelta(
delta: BDM[Double],
output: BDM[Double],
prevDelta: BDM[Double]): Unit = {
BreezeUtil.dgemm(1.0, w.t, delta, 0.0, prevDelta)
}
override def grad(delta: BDM[Double], input: BDM[Double], cumGrad: BDV[Double]): Unit = {
// compute gradient of weights
val cumGradientOfWeights = new BDM[Double](w.rows, w.cols, cumGrad.data, cumGrad.offset)
BreezeUtil.dgemm(1.0 / input.cols, delta, input.t, 1.0, cumGradientOfWeights)
if (ones == null || ones.length != delta.cols) ones = BDV.ones[Double](delta.cols)
// compute gradient of bias
val cumGradientOfBias = new BDV[Double](cumGrad.data, cumGrad.offset + w.size, 1, b.length)
BreezeUtil.dgemv(1.0 / input.cols, delta, ones, 1.0, cumGradientOfBias)
}
}
/**
* Fabric for Affine layer models
*/
private[ann] object AffineLayerModel {
/**
* Creates a model of Affine layer
*
* @param layer layer properties
* @param weights vector for weights initialization
* @param random random number generator
* @return model of Affine layer
*/
def apply(layer: AffineLayer, weights: BDV[Double], random: Random): AffineLayerModel = {
randomWeights(layer.numIn, layer.numOut, weights, random)
new AffineLayerModel(weights, layer)
}
/**
* Initialize weights randomly in the interval.
* Uses [Bottou-88] heuristic [-a/sqrt(in); a/sqrt(in)],
* where `a` is chosen in such a way that the weight variance corresponds
* to the points to the maximal curvature of the activation function
* (which is approximately 2.38 for a standard sigmoid).
*
* @param numIn number of inputs
* @param numOut number of outputs
* @param weights vector for weights initialization
* @param random random number generator
*/
def randomWeights(
numIn: Int,
numOut: Int,
weights: BDV[Double],
random: Random): Unit = {
var i = 0
val sqrtIn = math.sqrt(numIn)
while (i < weights.length) {
weights(i) = (random.nextDouble * 4.8 - 2.4) / sqrtIn
i += 1
}
}
}
/**
* Trait for functions and their derivatives for functional layers
*/
private[ann] trait ActivationFunction extends Serializable {
/**
* Implements a function
*/
def eval: Double => Double
/**
* Implements a derivative of a function (needed for the back propagation)
*/
def derivative: Double => Double
}
/**
* Implements in-place application of functions in the arrays
*/
private[ann] object ApplyInPlace {
// TODO: use Breeze UFunc
def apply(x: BDM[Double], y: BDM[Double], func: Double => Double): Unit = {
var i = 0
while (i < x.rows) {
var j = 0
while (j < x.cols) {
y(i, j) = func(x(i, j))
j += 1
}
i += 1
}
}
// TODO: use Breeze UFunc
def apply(
x1: BDM[Double],
x2: BDM[Double],
y: BDM[Double],
func: (Double, Double) => Double): Unit = {
var i = 0
while (i < x1.rows) {
var j = 0
while (j < x1.cols) {
y(i, j) = func(x1(i, j), x2(i, j))
j += 1
}
i += 1
}
}
}
/**
* Implements Sigmoid activation function
*/
private[ann] class SigmoidFunction extends ActivationFunction {
override def eval: (Double) => Double = x => 1.0 / (1 + math.exp(-x))
override def derivative: (Double) => Double = z => (1 - z) * z
}
/**
* Functional layer properties, y = f(x)
*
* @param activationFunction activation function
*/
private[ann] class FunctionalLayer (val activationFunction: ActivationFunction) extends Layer {
override val weightSize = 0
override def getOutputSize(inputSize: Int): Int = inputSize
override val inPlace = true
override def createModel(weights: BDV[Double]): LayerModel = new FunctionalLayerModel(this)
override def initModel(weights: BDV[Double], random: Random): LayerModel =
createModel(weights)
}
/**
* Functional layer model. Holds no weights.
*
* @param layer functional layer
*/
private[ann] class FunctionalLayerModel private[ann] (val layer: FunctionalLayer)
extends LayerModel {
// empty weights
val weights = new BDV[Double](0)
override def eval(data: BDM[Double], output: BDM[Double]): Unit = {
ApplyInPlace(data, output, layer.activationFunction.eval)
}
override def computePrevDelta(
nextDelta: BDM[Double],
input: BDM[Double],
delta: BDM[Double]): Unit = {
ApplyInPlace(input, delta, layer.activationFunction.derivative)
delta :*= nextDelta
}
override def grad(delta: BDM[Double], input: BDM[Double], cumGrad: BDV[Double]): Unit = {}
}
/**
* Trait for the artificial neural network (ANN) topology properties
*/
private[ann] trait Topology extends Serializable {
def model(weights: Vector): TopologyModel
def model(seed: Long): TopologyModel
}
/**
* Trait for ANN topology model
*/
private[ann] trait TopologyModel extends Serializable {
val weights: Vector
/**
* Array of layers
*/
val layers: Array[Layer]
/**
* Array of layer models
*/
val layerModels: Array[LayerModel]
/**
* Forward propagation
*
* @param data input data
* @param includeLastLayer Include the last layer in the output. In
* MultilayerPerceptronClassifier, the last layer is always softmax;
* the last layer of outputs is needed for class predictions, but not
* for rawPrediction.
*
* @return array of outputs for each of the layers
*/
def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]]
/**
* Prediction of the model. See `ProbabilisticClassificationModel``
*
* @param features input features
* @return prediction
*/
def predict(features: Vector): Vector
/**
* Raw prediction of the model. See `ProbabilisticClassificationModel`
*
* @param features input features
* @return raw prediction
*
* Note: This interface is only used for classification Model.
*/
def predictRaw(features: Vector): Vector
/**
* Probability of the model. See `ProbabilisticClassificationModel`
*
* @param rawPrediction raw prediction vector
* @return probability
*
* Note: This interface is only used for classification Model.
*/
def raw2ProbabilityInPlace(rawPrediction: Vector): Vector
/**
* Computes gradient for the network
*
* @param data input data
* @param target target output
* @param cumGradient cumulative gradient
* @param blockSize block size
* @return error
*/
def computeGradient(data: BDM[Double], target: BDM[Double], cumGradient: Vector,
blockSize: Int): Double
}
/**
* Feed forward ANN
*
* @param layers Array of layers
*/
private[ann] class FeedForwardTopology private(val layers: Array[Layer]) extends Topology {
override def model(weights: Vector): TopologyModel = FeedForwardModel(this, weights)
override def model(seed: Long): TopologyModel = FeedForwardModel(this, seed)
}
/**
* Factory for some of the frequently-used topologies
*/
private[ml] object FeedForwardTopology {
/**
* Creates a feed forward topology from the array of layers
*
* @param layers array of layers
* @return feed forward topology
*/
def apply(layers: Array[Layer]): FeedForwardTopology = {
new FeedForwardTopology(layers)
}
/**
* Creates a multi-layer perceptron
*
* @param layerSizes sizes of layers including input and output size
* @param softmaxOnTop whether to use SoftMax or Sigmoid function for an output layer.
* Softmax is default
* @return multilayer perceptron topology
*/
def multiLayerPerceptron(
layerSizes: Array[Int],
softmaxOnTop: Boolean = true): FeedForwardTopology = {
val layers = new Array[Layer]((layerSizes.length - 1) * 2)
for (i <- 0 until layerSizes.length - 1) {
layers(i * 2) = new AffineLayer(layerSizes(i), layerSizes(i + 1))
layers(i * 2 + 1) =
if (i == layerSizes.length - 2) {
if (softmaxOnTop) {
new SoftmaxLayerWithCrossEntropyLoss()
} else {
// TODO: squared error is more natural but converges slower
new SigmoidLayerWithSquaredError()
}
} else {
new FunctionalLayer(new SigmoidFunction())
}
}
FeedForwardTopology(layers)
}
}
/**
* Model of Feed Forward Neural Network.
* Implements forward, gradient computation and can return weights in vector format.
*
* @param weights network weights
* @param topology network topology
*/
private[ml] class FeedForwardModel private(
val weights: Vector,
val topology: FeedForwardTopology) extends TopologyModel {
val layers = topology.layers
val layerModels = new Array[LayerModel](layers.length)
private var offset = 0
for (i <- 0 until layers.length) {
layerModels(i) = layers(i).createModel(
new BDV[Double](weights.toArray, offset, 1, layers(i).weightSize))
offset += layers(i).weightSize
}
private var outputs: Array[BDM[Double]] = null
private var deltas: Array[BDM[Double]] = null
override def forward(data: BDM[Double], includeLastLayer: Boolean): Array[BDM[Double]] = {
// Initialize output arrays for all layers. Special treatment for InPlace
val currentBatchSize = data.cols
// TODO: allocate outputs as one big array and then create BDMs from it
if (outputs == null || outputs(0).cols != currentBatchSize) {
outputs = new Array[BDM[Double]](layers.length)
var inputSize = data.rows
for (i <- 0 until layers.length) {
if (layers(i).inPlace) {
outputs(i) = outputs(i - 1)
} else {
val outputSize = layers(i).getOutputSize(inputSize)
outputs(i) = new BDM[Double](outputSize, currentBatchSize)
inputSize = outputSize
}
}
}
layerModels(0).eval(data, outputs(0))
val end = if (includeLastLayer) layerModels.length else layerModels.length - 1
for (i <- 1 until end) {
layerModels(i).eval(outputs(i - 1), outputs(i))
}
outputs
}
override def computeGradient(
data: BDM[Double],
target: BDM[Double],
cumGradient: Vector,
realBatchSize: Int): Double = {
val outputs = forward(data, true)
val currentBatchSize = data.cols
// TODO: allocate deltas as one big array and then create BDMs from it
if (deltas == null || deltas(0).cols != currentBatchSize) {
deltas = new Array[BDM[Double]](layerModels.length)
var inputSize = data.rows
for (i <- 0 until layerModels.length - 1) {
val outputSize = layers(i).getOutputSize(inputSize)
deltas(i) = new BDM[Double](outputSize, currentBatchSize)
inputSize = outputSize
}
}
val L = layerModels.length - 1
// TODO: explain why delta of top layer is null (because it might contain loss+layer)
val loss = layerModels.last match {
case levelWithError: LossFunction => levelWithError.loss(outputs.last, target, deltas(L - 1))
case _ =>
throw new UnsupportedOperationException("Top layer is required to have objective.")
}
for (i <- (L - 2) to (0, -1)) {
layerModels(i + 1).computePrevDelta(deltas(i + 1), outputs(i + 1), deltas(i))
}
val cumGradientArray = cumGradient.toArray
var offset = 0
for (i <- 0 until layerModels.length) {
val input = if (i == 0) data else outputs(i - 1)
layerModels(i).grad(deltas(i), input,
new BDV[Double](cumGradientArray, offset, 1, layers(i).weightSize))
offset += layers(i).weightSize
}
loss
}
override def predict(data: Vector): Vector = {
val size = data.size
val result = forward(new BDM[Double](size, 1, data.toArray), true)
Vectors.dense(result.last.toArray)
}
override def predictRaw(data: Vector): Vector = {
val result = forward(new BDM[Double](data.size, 1, data.toArray), false)
Vectors.dense(result(result.length - 2).toArray)
}
override def raw2ProbabilityInPlace(data: Vector): Vector = {
val dataMatrix = new BDM[Double](data.size, 1, data.toArray)
layerModels.last.eval(dataMatrix, dataMatrix)
data
}
}
/**
* Fabric for feed forward ANN models
*/
private[ann] object FeedForwardModel {
/**
* Creates a model from a topology and weights
*
* @param topology topology
* @param weights weights
* @return model
*/
def apply(topology: FeedForwardTopology, weights: Vector): FeedForwardModel = {
val expectedWeightSize = topology.layers.map(_.weightSize).sum
require(weights.size == expectedWeightSize,
s"Expected weight vector of size ${expectedWeightSize} but got size ${weights.size}.")
new FeedForwardModel(weights, topology)
}
/**
* Creates a model given a topology and seed
*
* @param topology topology
* @param seed seed for generating the weights
* @return model
*/
def apply(topology: FeedForwardTopology, seed: Long = 11L): FeedForwardModel = {
val layers = topology.layers
val layerModels = new Array[LayerModel](layers.length)
val weights = BDV.zeros[Double](topology.layers.map(_.weightSize).sum)
var offset = 0
val random = new XORShiftRandom(seed)
for (i <- 0 until layers.length) {
layerModels(i) = layers(i).
initModel(new BDV[Double](weights.data, offset, 1, layers(i).weightSize), random)
offset += layers(i).weightSize
}
new FeedForwardModel(Vectors.fromBreeze(weights), topology)
}
}
/**
* Neural network gradient. Does nothing but calling Model's gradient
*
* @param topology topology
* @param dataStacker data stacker
*/
private[ann] class ANNGradient(topology: Topology, dataStacker: DataStacker) extends Gradient {
override def compute(
data: OldVector,
label: Double,
weights: OldVector,
cumGradient: OldVector): Double = {
val (input, target, realBatchSize) = dataStacker.unstack(data)
val model = topology.model(weights)
model.computeGradient(input, target, cumGradient, realBatchSize)
}
}
/**
* Stacks pairs of training samples (input, output) in one vector allowing them to pass
* through Optimizer/Gradient interfaces. If stackSize is more than one, makes blocks
* or matrices of inputs and outputs and then stack them in one vector.
* This can be used for further batch computations after unstacking.
*
* @param stackSize stack size
* @param inputSize size of the input vectors
* @param outputSize size of the output vectors
*/
private[ann] class DataStacker(stackSize: Int, inputSize: Int, outputSize: Int)
extends Serializable {
/**
* Stacks the data
*
* @param data RDD of vector pairs
* @return RDD of double (always zero) and vector that contains the stacked vectors
*/
def stack(data: RDD[(Vector, Vector)]): RDD[(Double, Vector)] = {
val stackedData = if (stackSize == 1) {
data.map { v =>
(0.0,
Vectors.fromBreeze(BDV.vertcat(
v._1.asBreeze.toDenseVector,
v._2.asBreeze.toDenseVector))
) }
} else {
data.mapPartitions { it =>
it.grouped(stackSize).map { seq =>
val size = seq.size
val bigVector = new Array[Double](inputSize * size + outputSize * size)
var i = 0
seq.foreach { case (in, out) =>
System.arraycopy(in.toArray, 0, bigVector, i * inputSize, inputSize)
System.arraycopy(out.toArray, 0, bigVector,
inputSize * size + i * outputSize, outputSize)
i += 1
}
(0.0, Vectors.dense(bigVector))
}
}
}
stackedData
}
/**
* Unstack the stacked vectors into matrices for batch operations
*
* @param data stacked vector
* @return pair of matrices holding input and output data and the real stack size
*/
def unstack(data: Vector): (BDM[Double], BDM[Double], Int) = {
val arrData = data.toArray
val realStackSize = arrData.length / (inputSize + outputSize)
val input = new BDM(inputSize, realStackSize, arrData)
val target = new BDM(outputSize, realStackSize, arrData, inputSize * realStackSize)
(input, target, realStackSize)
}
}
/**
* Simple updater
*/
private[ann] class ANNUpdater extends Updater {
override def compute(
weightsOld: OldVector,
gradient: OldVector,
stepSize: Double,
iter: Int,
regParam: Double): (OldVector, Double) = {
val thisIterStepSize = stepSize
val brzWeights: BV[Double] = weightsOld.asBreeze.toDenseVector
Baxpy(-thisIterStepSize, gradient.asBreeze, brzWeights)
(OldVectors.fromBreeze(brzWeights), 0)
}
}
/**
* MLlib-style trainer class that trains a network given the data and topology
*
* @param topology topology of ANN
* @param inputSize input size
* @param outputSize output size
*/
private[ml] class FeedForwardTrainer(
topology: Topology,
val inputSize: Int,
val outputSize: Int) extends Serializable {
private var _seed = this.getClass.getName.hashCode.toLong
private var _weights: Vector = null
private var _stackSize = 128
private var dataStacker = new DataStacker(_stackSize, inputSize, outputSize)
private var _gradient: Gradient = new ANNGradient(topology, dataStacker)
private var _updater: Updater = new ANNUpdater()
private var optimizer: Optimizer = LBFGSOptimizer.setConvergenceTol(1e-4).setNumIterations(100)
/**
* Returns seed
*/
def getSeed: Long = _seed
/**
* Sets seed
*/
def setSeed(value: Long): this.type = {
_seed = value
this
}
/**
* Returns weights
*/
def getWeights: Vector = _weights
/**
* Sets weights
*
* @param value weights
* @return trainer
*/
def setWeights(value: Vector): this.type = {
_weights = value
this
}
/**
* Sets the stack size
*
* @param value stack size
* @return trainer
*/
def setStackSize(value: Int): this.type = {
_stackSize = value
dataStacker = new DataStacker(value, inputSize, outputSize)
this
}
/**
* Sets the SGD optimizer
*
* @return SGD optimizer
*/
def SGDOptimizer: GradientDescent = {
val sgd = new GradientDescent(_gradient, _updater)
optimizer = sgd
sgd
}
/**
* Sets the LBFGS optimizer
*
* @return LBGS optimizer
*/
def LBFGSOptimizer: LBFGS = {
val lbfgs = new LBFGS(_gradient, _updater)
optimizer = lbfgs
lbfgs
}
/**
* Sets the updater
*
* @param value updater
* @return trainer
*/
def setUpdater(value: Updater): this.type = {
_updater = value
updateUpdater(value)
this
}
/**
* Sets the gradient
*
* @param value gradient
* @return trainer
*/
def setGradient(value: Gradient): this.type = {
_gradient = value
updateGradient(value)
this
}
private[this] def updateGradient(gradient: Gradient): Unit = {
optimizer match {
case lbfgs: LBFGS => lbfgs.setGradient(gradient)
case sgd: GradientDescent => sgd.setGradient(gradient)
case other => throw new UnsupportedOperationException(
s"Only LBFGS and GradientDescent are supported but got ${other.getClass}.")
}
}
private[this] def updateUpdater(updater: Updater): Unit = {
optimizer match {
case lbfgs: LBFGS => lbfgs.setUpdater(updater)
case sgd: GradientDescent => sgd.setUpdater(updater)
case other => throw new UnsupportedOperationException(
s"Only LBFGS and GradientDescent are supported but got ${other.getClass}.")
}
}
/**
* Trains the ANN
*
* @param data RDD of input and output vector pairs
* @return model
*/
def train(data: RDD[(Vector, Vector)]): (TopologyModel, Array[Double]) = {
val w = if (getWeights == null) {
// TODO: will make a copy if vector is a subvector of BDV (see Vectors code)
topology.model(_seed).weights
} else {
getWeights
}
// TODO: deprecate standard optimizer because it needs Vector
val trainData = dataStacker.stack(data).map { v =>
(v._1, OldVectors.fromML(v._2))
}
val handlePersistence = trainData.getStorageLevel == StorageLevel.NONE
if (handlePersistence) trainData.persist(StorageLevel.MEMORY_AND_DISK)
val (newWeights, lossHistory) = optimizer match {
case lbfgs: LBFGS => lbfgs.optimizeWithLossReturned(trainData, w)
case sgd: GradientDescent => sgd.optimizeWithLossReturned(trainData, w)
case other => throw new UnsupportedOperationException(
s"Only LBFGS and GradientDescent are supported but got ${other.getClass}.")
}
if (handlePersistence) trainData.unpersist()
(topology.model(newWeights), lossHistory)
}
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/ml/ann/Layer.scala | Scala | apache-2.0 | 26,291 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.nio
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import com.vividsolutions.jts.io.{WKBReader, WKBWriter}
import org.geotools.feature.{AttributeImpl, GeometryAttributeImpl}
import org.geotools.filter.identity.FeatureIdImpl
import org.geotools.geometry.jts.ReferencedEnvelope
import org.opengis.feature.`type`.{AttributeDescriptor, GeometryDescriptor, Name}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.feature.{GeometryAttribute, Property}
import org.opengis.filter.identity.FeatureId
import org.opengis.geometry.BoundingBox
import scala.collection.JavaConversions._
sealed trait AttributeAccessor[T <: AnyRef] {
def getAttribute(buf: ByteBuffer): T
}
object AttributeAccessor {
def apply[T <: AnyRef](f: ByteBuffer => T): AttributeAccessor[T] =
new AttributeAccessor[T] {
override def getAttribute(buf: ByteBuffer): T = f(buf)
}
def readVariableLengthAttribute(buf: ByteBuffer, voffset: Int, offset: Int) = {
val length = buf.getInt(voffset+offset)
buf.position(voffset+offset+4)
val ret = Array.ofDim[Byte](length)
buf.get(ret, 0, length)
ret
}
def descriptorLength(desc: AttributeDescriptor): Int = desc match {
case _ if classOf[Integer].equals(desc.getType.getBinding) => 4
case _ if classOf[java.lang.Long].equals(desc.getType.getBinding) => 8
case _ if classOf[java.lang.Double].equals(desc.getType.getBinding) => 8
case _ if classOf[java.util.Date].equals(desc.getType.getBinding) => 8
case _ => 4 // integer offset from start of variable section
}
private val wkbReader = new WKBReader()
private val wkbWriter = new WKBWriter()
def buildSimpleFeatureTypeAttributeAccessors(sft: SimpleFeatureType): IndexedSeq[AttributeAccessor[_ <: AnyRef]] = {
val descriptors = sft.getAttributeDescriptors
val voffset = descriptors.map(d => descriptorLength(d)).sum
val (_, accessors) =
descriptors.foldLeft((0, List.empty[AttributeAccessor[_ <: AnyRef]])) {
case ((pos, acc), desc) if classOf[Integer].equals(desc.getType.getBinding) =>
(pos+4, acc :+ AttributeAccessor[Integer](_.getInt(pos)))
case ((pos, acc), desc) if classOf[java.lang.Long].equals(desc.getType.getBinding) =>
(pos+8, acc :+ AttributeAccessor[java.lang.Long](_.getLong(pos)))
case ((pos, acc), desc) if classOf[java.lang.Double].equals(desc.getType.getBinding) =>
(pos+8, acc :+ AttributeAccessor[java.lang.Double](_.getDouble(pos)))
case ((pos, acc), desc) if classOf[java.util.Date].equals(desc.getType.getBinding) =>
(pos+8, acc :+ AttributeAccessor[java.util.Date] { buf =>
val l = buf.getLong(pos)
new Date(l)
})
case ((pos, acc), desc) if classOf[java.lang.Boolean].equals(desc.getType.getBinding) =>
(pos+1, acc :+ AttributeAccessor[java.lang.Boolean] { buf =>
val l = buf.get(pos)
l != 0
})
case ((pos, acc), desc) if desc.isInstanceOf[GeometryDescriptor] =>
(pos+4, acc :+ AttributeAccessor[Geometry] { buf =>
val offset = buf.getInt(pos)
val bytes = readVariableLengthAttribute(buf, voffset, offset)
wkbReader.read(bytes)
})
case ((pos, acc), desc) if classOf[String].equals(desc.getType.getBinding) =>
(pos+4, acc :+ AttributeAccessor[String] { buf =>
val offset = buf.getInt(pos)
val bytes = readVariableLengthAttribute(buf, voffset, offset)
new String(bytes, StandardCharsets.UTF_8)
})
}
accessors.toIndexedSeq
}
trait AttributeWriter[T <: AnyRef] {
def write(attr: T, offset: Int, voffset: Int, buf: ByteBuffer): (Int, Int)
}
object AttributeWriter {
def apply[T <: AnyRef](w: (T, Int, Int, ByteBuffer) => (Int, Int)) =
new AttributeWriter[T] {
override def write(attr: T, offset: Int, voffset: Int, buf: ByteBuffer): (Int, Int) =
w(attr, offset, voffset, buf)
}
}
class ByteBufferSimpleFeatureSerializer(sft: SimpleFeatureType) {
val descriptors = sft.getAttributeDescriptors
val vstart = descriptors.map(d => descriptorLength(d)).sum
val attributeWriters =
descriptors.map {
case desc if classOf[Integer].equals(desc.getType.getBinding) =>
AttributeWriter[Integer] { (attr: Integer, offset: Int, voffset: Int, buf: ByteBuffer) =>
buf.putInt(offset, attr)
(4, 0)
}
case desc if classOf[java.lang.Long].equals(desc.getType.getBinding) =>
AttributeWriter[java.lang.Long] { (attr: java.lang.Long, offset: Int, voffset: Int, buf: ByteBuffer) =>
buf.putLong(offset, attr)
(8, 0)
}
case desc if classOf[java.lang.Double].equals(desc.getType.getBinding) =>
AttributeWriter[java.lang.Double] { (attr: java.lang.Double, offset: Int, voffset: Int, buf: ByteBuffer) =>
buf.putDouble(offset, attr)
(8, 0)
}
case desc if classOf[java.util.Date].equals(desc.getType.getBinding) =>
AttributeWriter[java.util.Date] { (attr: java.util.Date, offset: Int, voffset: Int, buf: ByteBuffer) =>
if (attr != null) {
buf.putLong(offset, attr.getTime)
}
(8, 0)
}
case desc if classOf[java.lang.Boolean].equals(desc.getType.getBinding) =>
AttributeWriter[java.lang.Boolean] { (attr: java.lang.Boolean, offset: Int, voffset: Int, buf: ByteBuffer) =>
buf.put(offset, if(!attr) 0.toByte else 1.toByte)
(1, 0)
}
case desc if desc.isInstanceOf[GeometryDescriptor] =>
AttributeWriter[Geometry] { (attr: Geometry, offset: Int, voffset: Int, buf: ByteBuffer) =>
buf.putInt(offset, voffset)
val geom = wkbWriter.write(attr)
val geomlength = geom.length
buf.putInt(vstart+voffset, geomlength)
val voffsetdatastart = vstart+voffset + 4
buf.position(voffsetdatastart)
buf.put(geom, 0, geomlength)
(4, 4 + geomlength)
}
case desc if classOf[String].equals(desc.getType.getBinding) =>
AttributeWriter[String] { (attr: String, offset: Int, voffset: Int, buf: ByteBuffer) =>
buf.putInt(offset, voffset)
val bytes = attr.getBytes(StandardCharsets.UTF_8)
val blength = bytes.length
buf.putInt(vstart + voffset, blength)
val voffsetdatastart = vstart + voffset + 4
buf.position(voffsetdatastart)
buf.put(bytes, 0, blength)
(4, 4+blength)
}
}.toIndexedSeq.asInstanceOf[IndexedSeq[AttributeWriter[AnyRef]]]
def write(reuse: ByteBuffer, f: SimpleFeature): Int = {
var curvoffset = 0
var curoffset = 0
val zipped = f.getAttributes.zip(attributeWriters)
zipped.foreach { case (a, w) =>
val (uoffset, uvoffset) = w.write(a.asInstanceOf[AnyRef], curoffset, curvoffset, reuse)
curoffset += uoffset
curvoffset += uvoffset
}
vstart + curvoffset
}
}
}
class LazySimpleFeature(id: String,
sft: SimpleFeatureType,
accessors: IndexedSeq[AttributeAccessor[_ <: AnyRef]],
var buf: ByteBuffer) extends SimpleFeature {
def setBuf(reuse: ByteBuffer): Unit = {
buf = reuse
}
override def getType: SimpleFeatureType = sft
override def getID: String = id
override def getAttributes: util.List[AnyRef] = List()
override def getAttributeCount: Int = sft.getAttributeCount
override def getAttribute(name: String): AnyRef = {
val accessor = accessors(sft.indexOf(name))
accessor.getAttribute(buf)
}
override def getAttribute(name: Name): AnyRef = getAttribute(name.getLocalPart)
override def getAttribute(index: Int): AnyRef = accessors(index).getAttribute(buf)
override def getDefaultGeometry: AnyRef = getAttribute(sft.getGeometryDescriptor.getName)
override def setAttributes(values: util.List[AnyRef]): Unit = {}
override def setAttributes(values: Array[AnyRef]): Unit = {}
override def getFeatureType: SimpleFeatureType = sft
override def setAttribute(name: String, value: scala.Any): Unit = {}
override def setAttribute(name: Name, value: scala.Any): Unit = {}
override def setAttribute(index: Int, value: scala.Any): Unit = {}
override def setDefaultGeometry(geometry: scala.Any): Unit = {}
override def setDefaultGeometryProperty(geometryAttribute: GeometryAttribute): Unit = {}
override def getDefaultGeometryProperty: GeometryAttribute =
new GeometryAttributeImpl(getDefaultGeometry, sft.getGeometryDescriptor, null)
override def getIdentifier: FeatureId = new FeatureIdImpl(id)
override def getBounds: BoundingBox =
new ReferencedEnvelope(getDefaultGeometry.asInstanceOf[Geometry].getEnvelopeInternal, sft.getCoordinateReferenceSystem)
override def getValue: util.Collection[_ <: Property] = getProperties
override def setValue(values: util.Collection[Property]): Unit = {}
override def getProperty(name: Name): Property =
new AttributeImpl(getAttribute(name), sft.getDescriptor(name), getIdentifier)
override def getProperty(name: String): Property =
new AttributeImpl(getAttribute(name), sft.getDescriptor(name), getIdentifier)
override def validate(): Unit = {}
override def getProperties(name: Name): util.Collection[Property] = ???
override def getProperties(name: String): util.Collection[Property] = ???
override def getProperties: util.Collection[Property] = ???
override def getDescriptor: AttributeDescriptor = ???
override def isNillable: Boolean = ???
override def setValue(newValue: scala.Any): Unit = ???
override def getName: Name = sft.getName
override def getUserData: util.Map[AnyRef, AnyRef] = Map.empty[AnyRef, AnyRef]
}
| ddseapy/geomesa | geomesa-features/geomesa-feature-nio/src/main/scala/org/locationtech/geomesa/features/nio/LazySimpleFeature.scala | Scala | apache-2.0 | 10,635 |
/*
* Copyright (c) 2012 - 2020 Splice Machine, Inc.
*
* This file is part of Splice Machine.
* Splice Machine is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either
* version 3, or (at your option) any later version.
* Splice Machine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License along with Splice Machine.
* If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.splicemachine.spark.splicemachine
object SpliceJDBCOptions {
private val jdbcOptionNames = collection.mutable.Set[String]()
private def newOption(name: String): String = {
jdbcOptionNames += name.toLowerCase
name
}
/**
* JDBC Url including authentication mechanism (user/password, principal or principal/keytab)
*/
val JDBC_URL = newOption("url")
/**
* Whether to create relations using SplicemachineContext.internalDf() by default. Defaults to "false"
*/
val JDBC_INTERNAL_QUERIES = newOption("internal")
/**
* Temporary directory used by SplicemachineContext.internalDf() to hold temporary data. It has to be accessible by the client's user and SpliceMachine user
*/
val JDBC_TEMP_DIRECTORY = newOption("tmp")
}
| splicemachine/spliceengine | splice_spark/src/main/scala/com/splicemachine/spark/splicemachine/SpliceJDBCOptions.scala | Scala | agpl-3.0 | 1,531 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import java.lang.reflect.InvocationTargetException
import java.net.URI
import java.util.Locale
import java.util.concurrent.Callable
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal
import com.google.common.cache.{Cache, CacheBuilder}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo}
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParserInterface}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias, View}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
object SessionCatalog {
val DEFAULT_DATABASE = "default"
}
/**
* An internal catalog that is used by a Spark Session. This internal catalog serves as a
* proxy to the underlying metastore (e.g. Hive Metastore) and it also manages temporary
* tables and functions of the Spark Session that it belongs to.
*
* This class must be thread-safe.
*/
class SessionCatalog(
val externalCatalog: ExternalCatalog,
globalTempViewManager: GlobalTempViewManager,
functionRegistry: FunctionRegistry,
conf: SQLConf,
hadoopConf: Configuration,
parser: ParserInterface,
functionResourceLoader: FunctionResourceLoader) extends Logging {
import SessionCatalog._
import CatalogTypes.TablePartitionSpec
// For testing only.
def this(
externalCatalog: ExternalCatalog,
functionRegistry: FunctionRegistry,
conf: SQLConf) {
this(
externalCatalog,
new GlobalTempViewManager("global_temp"),
functionRegistry,
conf,
new Configuration(),
new CatalystSqlParser(conf),
DummyFunctionResourceLoader)
}
// For testing only.
def this(externalCatalog: ExternalCatalog) {
this(
externalCatalog,
new SimpleFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
}
/** List of temporary tables, mapping from table name to their logical plan. */
@GuardedBy("this")
protected val tempTables = new mutable.HashMap[String, LogicalPlan]
// Note: we track current database here because certain operations do not explicitly
// specify the database (e.g. DROP TABLE my_table). In these cases we must first
// check whether the temporary table or function exists, then, if not, operate on
// the corresponding item in the current database.
@GuardedBy("this")
protected var currentDb: String = formatDatabaseName(DEFAULT_DATABASE)
/**
* Checks if the given name conforms the Hive standard ("[a-zA-z_0-9]+"),
* i.e. if this name only contains characters, numbers, and _.
*
* This method is intended to have the same behavior of
* org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName.
*/
private def validateName(name: String): Unit = {
val validNameFormat = "([\\\\w_]+)".r
if (!validNameFormat.pattern.matcher(name).matches()) {
throw new AnalysisException(s"`$name` is not a valid name for tables/databases. " +
"Valid names only contain alphabet characters, numbers and _.")
}
}
/**
* Format table name, taking into account case sensitivity.
*/
protected[this] def formatTableName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
/**
* Format database name, taking into account case sensitivity.
*/
protected[this] def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
private val tableRelationCache: Cache[QualifiedTableName, LogicalPlan] = {
val cacheSize = conf.tableRelationCacheSize
CacheBuilder.newBuilder().maximumSize(cacheSize).build[QualifiedTableName, LogicalPlan]()
}
/** This method provides a way to get a cached plan. */
def getCachedPlan(t: QualifiedTableName, c: Callable[LogicalPlan]): LogicalPlan = {
tableRelationCache.get(t, c)
}
/** This method provides a way to get a cached plan if the key exists. */
def getCachedTable(key: QualifiedTableName): LogicalPlan = {
tableRelationCache.getIfPresent(key)
}
/** This method provides a way to cache a plan. */
def cacheTable(t: QualifiedTableName, l: LogicalPlan): Unit = {
tableRelationCache.put(t, l)
}
/** This method provides a way to invalidate a cached plan. */
def invalidateCachedTable(key: QualifiedTableName): Unit = {
tableRelationCache.invalidate(key)
}
/** This method provides a way to invalidate all the cached plans. */
def invalidateAllCachedTables(): Unit = {
tableRelationCache.invalidateAll()
}
/**
* This method is used to make the given path qualified before we
* store this path in the underlying external catalog. So, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
private def makeQualifiedPath(path: URI): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(hadoopConf)
fs.makeQualified(hadoopPath).toUri
}
private def requireDbExists(db: String): Unit = {
if (!databaseExists(db)) {
throw new NoSuchDatabaseException(db)
}
}
private def requireTableExists(name: TableIdentifier): Unit = {
if (!tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new NoSuchTableException(db = db, table = name.table)
}
}
private def requireTableNotExists(name: TableIdentifier): Unit = {
if (tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new TableAlreadyExistsException(db = db, table = name.table)
}
}
// ----------------------------------------------------------------------------
// Databases
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// ----------------------------------------------------------------------------
def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot create a database with this name.")
}
validateName(dbName)
val qualifiedPath = makeQualifiedPath(dbDefinition.locationUri)
externalCatalog.createDatabase(
dbDefinition.copy(name = dbName, locationUri = qualifiedPath),
ignoreIfExists)
}
def dropDatabase(db: String, ignoreIfNotExists: Boolean, cascade: Boolean): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == DEFAULT_DATABASE) {
throw new AnalysisException(s"Can not drop default database")
}
externalCatalog.dropDatabase(dbName, ignoreIfNotExists, cascade)
}
def alterDatabase(dbDefinition: CatalogDatabase): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
requireDbExists(dbName)
externalCatalog.alterDatabase(dbDefinition.copy(name = dbName))
}
def getDatabaseMetadata(db: String): CatalogDatabase = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
externalCatalog.getDatabase(dbName)
}
def databaseExists(db: String): Boolean = {
val dbName = formatDatabaseName(db)
externalCatalog.databaseExists(dbName)
}
def listDatabases(): Seq[String] = {
externalCatalog.listDatabases()
}
def listDatabases(pattern: String): Seq[String] = {
externalCatalog.listDatabases(pattern)
}
def getCurrentDatabase: String = synchronized { currentDb }
def setCurrentDatabase(db: String): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot use it as current database. To access global temporary views, you should " +
"use qualified name with the GLOBAL_TEMP_DATABASE, e.g. SELECT * FROM " +
s"${globalTempViewManager.database}.viewName.")
}
requireDbExists(dbName)
synchronized { currentDb = dbName }
}
/**
* Get the path for creating a non-default database when database location is not provided
* by users.
*/
def getDefaultDBPath(db: String): URI = {
val database = formatDatabaseName(db)
new Path(new Path(conf.warehousePath), database + ".db").toUri
}
// ----------------------------------------------------------------------------
// Tables
// ----------------------------------------------------------------------------
// There are two kinds of tables, temporary tables and metastore tables.
// Temporary tables are isolated across sessions and do not belong to any
// particular database. Metastore tables can be used across multiple
// sessions as their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// ----------------------------------------------------
// | Methods that interact with metastore tables only |
// ----------------------------------------------------
/**
* Create a metastore table in the database specified in `tableDefinition`.
* If no such database is specified, create it in the current database.
*/
def createTable(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
validateName(table)
val newTableDefinition = if (tableDefinition.storage.locationUri.isDefined
&& !tableDefinition.storage.locationUri.get.isAbsolute) {
// make the location of the table qualified.
val qualifiedTableLocation =
makeQualifiedPath(tableDefinition.storage.locationUri.get)
tableDefinition.copy(
storage = tableDefinition.storage.copy(locationUri = Some(qualifiedTableLocation)),
identifier = TableIdentifier(table, Some(db)))
} else {
tableDefinition.copy(identifier = TableIdentifier(table, Some(db)))
}
requireDbExists(db)
externalCatalog.createTable(newTableDefinition, ignoreIfExists)
}
/**
* Alter the metadata of an existing metastore table identified by `tableDefinition`.
*
* If no database is specified in `tableDefinition`, assume the table is in the
* current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterTable(tableDefinition: CatalogTable): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
val newTableDefinition = tableDefinition.copy(identifier = tableIdentifier)
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTable(newTableDefinition)
}
/**
* Alter the schema of a table identified by the provided table identifier. The new schema
* should still contain the existing bucket columns and partition columns used by the table. This
* method will also update any Spark SQL-related parameters stored as Hive table properties (such
* as the schema itself).
*
* @param identifier TableIdentifier
* @param newSchema Updated schema to be used for the table (must contain existing partition and
* bucket columns, and partition columns need to be at the end)
*/
def alterTableSchema(
identifier: TableIdentifier,
newSchema: StructType): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
val catalogTable = externalCatalog.getTable(db, table)
val oldSchema = catalogTable.schema
// not supporting dropping columns yet
val nonExistentColumnNames = oldSchema.map(_.name).filterNot(columnNameResolved(newSchema, _))
if (nonExistentColumnNames.nonEmpty) {
throw new AnalysisException(
s"""
|Some existing schema fields (${nonExistentColumnNames.mkString("[", ",", "]")}) are
|not present in the new schema. We don't support dropping columns yet.
""".stripMargin)
}
// assuming the newSchema has all partition columns at the end as required
externalCatalog.alterTableSchema(db, table, newSchema)
}
private def columnNameResolved(schema: StructType, colName: String): Boolean = {
schema.fields.map(_.name).exists(conf.resolver(_, colName))
}
/**
* Alter Spark's statistics of an existing metastore table identified by the provided table
* identifier.
*/
def alterTableStats(identifier: TableIdentifier, newStats: Option[CatalogStatistics]): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTableStats(db, table, newStats)
}
/**
* Return whether a table/view with the specified name exists. If no database is specified, check
* with current database.
*/
def tableExists(name: TableIdentifier): Boolean = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
externalCatalog.tableExists(db, table)
}
/**
* Retrieve the metadata of an existing permanent table/view. If no database is specified,
* assume the table/view is in the current database.
*/
@throws[NoSuchDatabaseException]
@throws[NoSuchTableException]
def getTableMetadata(name: TableIdentifier): CatalogTable = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.getTable(db, table)
}
/**
* Load files stored in given path into an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadTable(
name: TableIdentifier,
loadPath: String,
isOverwrite: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.loadTable(db, table, loadPath, isOverwrite, isSrcLocal)
}
/**
* Load files stored in given path into the partition of an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadPartition(
name: TableIdentifier,
loadPath: String,
spec: TablePartitionSpec,
isOverwrite: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.loadPartition(
db, table, loadPath, spec, isOverwrite, inheritTableSpecs, isSrcLocal)
}
def defaultTablePath(tableIdent: TableIdentifier): URI = {
val dbName = formatDatabaseName(tableIdent.database.getOrElse(getCurrentDatabase))
val dbLocation = getDatabaseMetadata(dbName).locationUri
new Path(new Path(dbLocation), formatTableName(tableIdent.table)).toUri
}
// ----------------------------------------------
// | Methods that interact with temp views only |
// ----------------------------------------------
/**
* Create a local temporary view.
*/
def createTempView(
name: String,
tableDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = synchronized {
val table = formatTableName(name)
if (tempTables.contains(table) && !overrideIfExists) {
throw new TempTableAlreadyExistsException(name)
}
tempTables.put(table, tableDefinition)
}
/**
* Create a global temporary view.
*/
def createGlobalTempView(
name: String,
viewDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = {
globalTempViewManager.create(formatTableName(name), viewDefinition, overrideIfExists)
}
/**
* Alter the definition of a local/global temp view matching the given name, returns true if a
* temp view is matched and altered, false otherwise.
*/
def alterTempViewDefinition(
name: TableIdentifier,
viewDefinition: LogicalPlan): Boolean = synchronized {
val viewName = formatTableName(name.table)
if (name.database.isEmpty) {
if (tempTables.contains(viewName)) {
createTempView(viewName, viewDefinition, overrideIfExists = true)
true
} else {
false
}
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.update(viewName, viewDefinition)
} else {
false
}
}
/**
* Return a local temporary view exactly as it was stored.
*/
def getTempView(name: String): Option[LogicalPlan] = synchronized {
tempTables.get(formatTableName(name))
}
/**
* Return a global temporary view exactly as it was stored.
*/
def getGlobalTempView(name: String): Option[LogicalPlan] = {
globalTempViewManager.get(formatTableName(name))
}
/**
* Drop a local temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropTempView(name: String): Boolean = synchronized {
tempTables.remove(formatTableName(name)).isDefined
}
/**
* Drop a global temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropGlobalTempView(name: String): Boolean = {
globalTempViewManager.remove(formatTableName(name))
}
// -------------------------------------------------------------
// | Methods that interact with temporary and metastore tables |
// -------------------------------------------------------------
/**
* Retrieve the metadata of an existing temporary view or permanent table/view.
*
* If a database is specified in `name`, this will return the metadata of table/view in that
* database.
* If no database is specified, this will first attempt to get the metadata of a temporary view
* with the same name, then, if that does not exist, return the metadata of table/view in the
* current database.
*/
def getTempViewOrPermanentTableMetadata(name: TableIdentifier): CatalogTable = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
getTempView(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(getTableMetadata(name))
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table, Some(globalTempViewManager.database)),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(throw new NoSuchTableException(globalTempViewManager.database, table))
} else {
getTableMetadata(name)
}
}
/**
* Rename a table.
*
* If a database is specified in `oldName`, this will rename the table in that database.
* If no database is specified, this will first attempt to rename a temporary table with
* the same name, then, if that does not exist, rename the table in the current database.
*
* This assumes the database specified in `newName` matches the one in `oldName`.
*/
def renameTable(oldName: TableIdentifier, newName: TableIdentifier): Unit = synchronized {
val db = formatDatabaseName(oldName.database.getOrElse(currentDb))
newName.database.map(formatDatabaseName).foreach { newDb =>
if (db != newDb) {
throw new AnalysisException(
s"RENAME TABLE source and destination databases do not match: '$db' != '$newDb'")
}
}
val oldTableName = formatTableName(oldName.table)
val newTableName = formatTableName(newName.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.rename(oldTableName, newTableName)
} else {
requireDbExists(db)
if (oldName.database.isDefined || !tempTables.contains(oldTableName)) {
requireTableExists(TableIdentifier(oldTableName, Some(db)))
requireTableNotExists(TableIdentifier(newTableName, Some(db)))
validateName(newTableName)
externalCatalog.renameTable(db, oldTableName, newTableName)
} else {
if (newName.database.isDefined) {
throw new AnalysisException(
s"RENAME TEMPORARY TABLE from '$oldName' to '$newName': cannot specify database " +
s"name '${newName.database.get}' in the destination table")
}
if (tempTables.contains(newTableName)) {
throw new AnalysisException(s"RENAME TEMPORARY TABLE from '$oldName' to '$newName': " +
"destination table already exists")
}
val table = tempTables(oldTableName)
tempTables.remove(oldTableName)
tempTables.put(newTableName, table)
}
}
}
/**
* Drop a table.
*
* If a database is specified in `name`, this will drop the table from that database.
* If no database is specified, this will first attempt to drop a temporary table with
* the same name, then, if that does not exist, drop the table from the current database.
*/
def dropTable(
name: TableIdentifier,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
val viewExists = globalTempViewManager.remove(table)
if (!viewExists && !ignoreIfNotExists) {
throw new NoSuchTableException(globalTempViewManager.database, table)
}
} else {
if (name.database.isDefined || !tempTables.contains(table)) {
requireDbExists(db)
// When ignoreIfNotExists is false, no exception is issued when the table does not exist.
// Instead, log it as an error message.
if (tableExists(TableIdentifier(table, Option(db)))) {
externalCatalog.dropTable(db, table, ignoreIfNotExists = true, purge = purge)
} else if (!ignoreIfNotExists) {
throw new NoSuchTableException(db = db, table = table)
}
} else {
tempTables.remove(table)
}
}
}
/**
* Return a [[LogicalPlan]] that represents the given table or view.
*
* If a database is specified in `name`, this will return the table/view from that database.
* If no database is specified, this will first attempt to return a temporary table/view with
* the same name, then, if that does not exist, return the table/view from the current database.
*
* Note that, the global temp view database is also valid here, this will return the global temp
* view matching the given name.
*
* If the relation is a view, we generate a [[View]] operator from the view description, and
* wrap the logical plan in a [[SubqueryAlias]] which will track the name of the view.
*
* @param name The name of the table/view that we look up.
*/
def lookupRelation(name: TableIdentifier): LogicalPlan = {
synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.get(table).map { viewDef =>
SubqueryAlias(table, viewDef)
}.getOrElse(throw new NoSuchTableException(db, table))
} else if (name.database.isDefined || !tempTables.contains(table)) {
val metadata = externalCatalog.getTable(db, table)
if (metadata.tableType == CatalogTableType.VIEW) {
val viewText = metadata.viewText.getOrElse(sys.error("Invalid view without text."))
// The relation is a view, so we wrap the relation by:
// 1. Add a [[View]] operator over the relation to keep track of the view desc;
// 2. Wrap the logical plan in a [[SubqueryAlias]] which tracks the name of the view.
val child = View(
desc = metadata,
output = metadata.schema.toAttributes,
child = parser.parsePlan(viewText))
SubqueryAlias(table, child)
} else {
SubqueryAlias(table, UnresolvedCatalogRelation(metadata))
}
} else {
SubqueryAlias(table, tempTables(table))
}
}
}
/**
* Return whether a table with the specified name is a temporary table.
*
* Note: The temporary table cache is checked only when database is not
* explicitly specified.
*/
def isTemporaryTable(name: TableIdentifier): Boolean = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
tempTables.contains(table)
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).isDefined
} else {
false
}
}
/**
* List all tables in the specified database, including local temporary tables.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String): Seq[TableIdentifier] = listTables(db, "*")
/**
* List all matching tables in the specified database, including local temporary tables.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String, pattern: String): Seq[TableIdentifier] = {
val dbName = formatDatabaseName(db)
val dbTables = if (dbName == globalTempViewManager.database) {
globalTempViewManager.listViewNames(pattern).map { name =>
TableIdentifier(name, Some(globalTempViewManager.database))
}
} else {
requireDbExists(dbName)
externalCatalog.listTables(dbName, pattern).map { name =>
TableIdentifier(name, Some(dbName))
}
}
val localTempViews = synchronized {
StringUtils.filterPattern(tempTables.keys.toSeq, pattern).map { name =>
TableIdentifier(name)
}
}
dbTables ++ localTempViews
}
/**
* Refresh the cache entry for a metastore table, if any.
*/
def refreshTable(name: TableIdentifier): Unit = synchronized {
val dbName = formatDatabaseName(name.database.getOrElse(currentDb))
val tableName = formatTableName(name.table)
// Go through temporary tables and invalidate them.
// If the database is defined, this may be a global temporary view.
// If the database is not defined, there is a good chance this is a temp table.
if (name.database.isEmpty) {
tempTables.get(tableName).foreach(_.refresh())
} else if (dbName == globalTempViewManager.database) {
globalTempViewManager.get(tableName).foreach(_.refresh())
}
// Also invalidate the table relation cache.
val qualifiedTableName = QualifiedTableName(dbName, tableName)
tableRelationCache.invalidate(qualifiedTableName)
}
/**
* Drop all existing temporary tables.
* For testing only.
*/
def clearTempTables(): Unit = synchronized {
tempTables.clear()
}
// ----------------------------------------------------------------------------
// Partitions
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// These methods are concerned with only metastore tables.
// ----------------------------------------------------------------------------
// TODO: We need to figure out how these methods interact with our data source
// tables. For such tables, we do not store values of partitioning columns in
// the metastore. For now, partition values of a data source table will be
// automatically discovered when we load the table.
/**
* Create partitions in an existing table, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def createPartitions(
tableName: TableIdentifier,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.createPartitions(db, table, parts, ignoreIfExists)
}
/**
* Drop partitions from a table, assuming they exist.
* If no database is specified, assume the table is in the current database.
*/
def dropPartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requirePartialMatchedPartitionSpec(specs, getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(specs)
externalCatalog.dropPartitions(db, table, specs, ignoreIfNotExists, purge, retainData)
}
/**
* Override the specs of one or many existing table partitions, assuming they exist.
*
* This assumes index i of `specs` corresponds to index i of `newSpecs`.
* If no database is specified, assume the table is in the current database.
*/
def renamePartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = {
val tableMetadata = getTableMetadata(tableName)
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(specs, tableMetadata)
requireExactMatchedPartitionSpec(newSpecs, tableMetadata)
requireNonEmptyValueInPartitionSpec(specs)
requireNonEmptyValueInPartitionSpec(newSpecs)
externalCatalog.renamePartitions(db, table, specs, newSpecs)
}
/**
* Alter one or many table partitions whose specs that match those specified in `parts`,
* assuming the partitions exist.
*
* If no database is specified, assume the table is in the current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterPartitions(tableName: TableIdentifier, parts: Seq[CatalogTablePartition]): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.alterPartitions(db, table, parts)
}
/**
* Retrieve the metadata of a table partition, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def getPartition(tableName: TableIdentifier, spec: TablePartitionSpec): CatalogTablePartition = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.getPartition(db, table, spec)
}
/**
* List the names of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitionNames(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitionNames(db, table, partialSpec)
}
/**
* List the metadata of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitions(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitions(db, table, partialSpec)
}
/**
* List the metadata of partitions that belong to the specified table, assuming it exists, that
* satisfy the given partition-pruning predicate expressions.
*/
def listPartitionsByFilter(
tableName: TableIdentifier,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
externalCatalog.listPartitionsByFilter(db, table, predicates, conf.sessionLocalTimeZone)
}
/**
* Verify if the input partition spec has any empty value.
*/
private def requireNonEmptyValueInPartitionSpec(specs: Seq[TablePartitionSpec]): Unit = {
specs.foreach { s =>
if (s.values.exists(_.isEmpty)) {
val spec = s.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
throw new AnalysisException(
s"Partition spec is invalid. The spec ($spec) contains an empty partition column value")
}
}
}
/**
* Verify if the input partition spec exactly matches the existing defined partition spec
* The columns must be the same but the orders could be different.
*/
private def requireExactMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames.sorted
specs.foreach { s =>
if (s.keys.toSeq.sorted != defined) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must match " +
s"the partition spec (${table.partitionColumnNames.mkString(", ")}) defined in " +
s"table '${table.identifier}'")
}
}
}
/**
* Verify if the input partition spec partially matches the existing defined partition spec
* That is, the columns of partition spec should be part of the defined partition spec.
*/
private def requirePartialMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames
specs.foreach { s =>
if (!s.keys.forall(defined.contains)) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must be contained " +
s"within the partition spec (${table.partitionColumnNames.mkString(", ")}) defined " +
s"in table '${table.identifier}'")
}
}
}
// ----------------------------------------------------------------------------
// Functions
// ----------------------------------------------------------------------------
// There are two kinds of functions, temporary functions and metastore
// functions (permanent UDFs). Temporary functions are isolated across
// sessions. Metastore functions can be used across multiple sessions as
// their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// -------------------------------------------------------
// | Methods that interact with metastore functions only |
// -------------------------------------------------------
/**
* Create a metastore function in the database specified in `funcDefinition`.
* If no such database is specified, create it in the current database.
*/
def createFunction(funcDefinition: CatalogFunction, ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (!functionExists(identifier)) {
externalCatalog.createFunction(db, newFuncDefinition)
} else if (!ignoreIfExists) {
throw new FunctionAlreadyExistsException(db = db, func = identifier.toString)
}
}
/**
* Drop a metastore function.
* If no database is specified, assume the function is in the current database.
*/
def dropFunction(name: FunctionIdentifier, ignoreIfNotExists: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = name.copy(database = Some(db))
if (functionExists(identifier)) {
if (functionRegistry.functionExists(identifier)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier)
}
externalCatalog.dropFunction(db, name.funcName)
} else if (!ignoreIfNotExists) {
throw new NoSuchFunctionException(db = db, func = identifier.toString)
}
}
/**
* overwirte a metastore function in the database specified in `funcDefinition`..
* If no database is specified, assume the function is in the current database.
*/
def alterFunction(funcDefinition: CatalogFunction): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (functionExists(identifier)) {
if (functionRegistry.functionExists(identifier)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier)
}
externalCatalog.alterFunction(db, newFuncDefinition)
} else {
throw new NoSuchFunctionException(db = db, func = identifier.toString)
}
}
/**
* Retrieve the metadata of a metastore function.
*
* If a database is specified in `name`, this will return the function in that database.
* If no database is specified, this will return the function in the current database.
*/
def getFunctionMetadata(name: FunctionIdentifier): CatalogFunction = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
externalCatalog.getFunction(db, name.funcName)
}
/**
* Check if the specified function exists.
*/
def functionExists(name: FunctionIdentifier): Boolean = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
functionRegistry.functionExists(name) ||
externalCatalog.functionExists(db, name.funcName)
}
// ----------------------------------------------------------------
// | Methods that interact with temporary and metastore functions |
// ----------------------------------------------------------------
/**
* Constructs a [[FunctionBuilder]] based on the provided class that represents a function.
*/
private def makeFunctionBuilder(name: String, functionClassName: String): FunctionBuilder = {
val clazz = Utils.classForName(functionClassName)
(input: Seq[Expression]) => makeFunctionExpression(name, clazz, input)
}
/**
* Constructs a [[Expression]] based on the provided class that represents a function.
*
* This performs reflection to decide what type of [[Expression]] to return in the builder.
*/
protected def makeFunctionExpression(
name: String,
clazz: Class[_],
input: Seq[Expression]): Expression = {
val clsForUDAF =
Utils.classForName("org.apache.spark.sql.expressions.UserDefinedAggregateFunction")
if (clsForUDAF.isAssignableFrom(clazz)) {
val cls = Utils.classForName("org.apache.spark.sql.execution.aggregate.ScalaUDAF")
cls.getConstructor(classOf[Seq[Expression]], clsForUDAF, classOf[Int], classOf[Int])
.newInstance(input, clazz.newInstance().asInstanceOf[Object], Int.box(1), Int.box(1))
.asInstanceOf[Expression]
} else {
throw new AnalysisException(s"No handler for UDAF '${clazz.getCanonicalName}'. " +
s"Use sparkSession.udf.register(...) instead.")
}
}
/**
* Loads resources such as JARs and Files for a function. Every resource is represented
* by a tuple (resource type, resource uri).
*/
def loadFunctionResources(resources: Seq[FunctionResource]): Unit = {
resources.foreach(functionResourceLoader.loadResource)
}
/**
* Registers a temporary or permanent function into a session-specific [[FunctionRegistry]]
*/
def registerFunction(
funcDefinition: CatalogFunction,
overrideIfExists: Boolean,
functionBuilder: Option[FunctionBuilder] = None): Unit = {
val func = funcDefinition.identifier
if (functionRegistry.functionExists(func) && !overrideIfExists) {
throw new AnalysisException(s"Function $func already exists")
}
val info = new ExpressionInfo(funcDefinition.className, func.database.orNull, func.funcName)
val builder =
functionBuilder.getOrElse {
val className = funcDefinition.className
if (!Utils.classIsLoadable(className)) {
throw new AnalysisException(s"Can not load class '$className' when registering " +
s"the function '$func', please make sure it is on the classpath")
}
makeFunctionBuilder(func.unquotedString, className)
}
functionRegistry.registerFunction(func, info, builder)
}
/**
* Drop a temporary function.
*/
def dropTempFunction(name: String, ignoreIfNotExists: Boolean): Unit = {
if (!functionRegistry.dropFunction(FunctionIdentifier(name)) && !ignoreIfNotExists) {
throw new NoSuchTempFunctionException(name)
}
}
/**
* Returns whether it is a temporary function. If not existed, returns false.
*/
def isTemporaryFunction(name: FunctionIdentifier): Boolean = {
// copied from HiveSessionCatalog
val hiveFunctions = Seq("histogram_numeric")
// A temporary function is a function that has been registered in functionRegistry
// without a database name, and is neither a built-in function nor a Hive function
name.database.isEmpty &&
functionRegistry.functionExists(name) &&
!FunctionRegistry.builtin.functionExists(name) &&
!hiveFunctions.contains(name.funcName.toLowerCase(Locale.ROOT))
}
protected def failFunctionLookup(name: FunctionIdentifier): Nothing = {
throw new NoSuchFunctionException(
db = name.database.getOrElse(getCurrentDatabase), func = name.funcName)
}
/**
* Look up the [[ExpressionInfo]] associated with the specified function, assuming it exists.
*/
def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = synchronized {
// TODO: just make function registry take in FunctionIdentifier instead of duplicating this
val database = name.database.orElse(Some(currentDb)).map(formatDatabaseName)
val qualifiedName = name.copy(database = database)
functionRegistry.lookupFunction(name)
.orElse(functionRegistry.lookupFunction(qualifiedName))
.getOrElse {
val db = qualifiedName.database.get
requireDbExists(db)
if (externalCatalog.functionExists(db, name.funcName)) {
val metadata = externalCatalog.getFunction(db, name.funcName)
new ExpressionInfo(
metadata.className,
qualifiedName.database.orNull,
qualifiedName.identifier)
} else {
failFunctionLookup(name)
}
}
}
/**
* Return an [[Expression]] that represents the specified function, assuming it exists.
*
* For a temporary function or a permanent function that has been loaded,
* this method will simply lookup the function through the
* FunctionRegistry and create an expression based on the builder.
*
* For a permanent function that has not been loaded, we will first fetch its metadata
* from the underlying external catalog. Then, we will load all resources associated
* with this function (i.e. jars and files). Finally, we create a function builder
* based on the function class and put the builder into the FunctionRegistry.
* The name of this function in the FunctionRegistry will be `databaseName.functionName`.
*/
def lookupFunction(
name: FunctionIdentifier,
children: Seq[Expression]): Expression = synchronized {
// Note: the implementation of this function is a little bit convoluted.
// We probably shouldn't use a single FunctionRegistry to register all three kinds of functions
// (built-in, temp, and external).
if (name.database.isEmpty && functionRegistry.functionExists(name)) {
// This function has been already loaded into the function registry.
return functionRegistry.lookupFunction(name, children)
}
// If the name itself is not qualified, add the current database to it.
val database = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val qualifiedName = name.copy(database = Some(database))
if (functionRegistry.functionExists(qualifiedName)) {
// This function has been already loaded into the function registry.
// Unlike the above block, we find this function by using the qualified name.
return functionRegistry.lookupFunction(qualifiedName, children)
}
// The function has not been loaded to the function registry, which means
// that the function is a permanent function (if it actually has been registered
// in the metastore). We need to first put the function in the FunctionRegistry.
// TODO: why not just check whether the function exists first?
val catalogFunction = try {
externalCatalog.getFunction(database, name.funcName)
} catch {
case _: AnalysisException => failFunctionLookup(name)
case _: NoSuchPermanentFunctionException => failFunctionLookup(name)
}
loadFunctionResources(catalogFunction.resources)
// Please note that qualifiedName is provided by the user. However,
// catalogFunction.identifier.unquotedString is returned by the underlying
// catalog. So, it is possible that qualifiedName is not exactly the same as
// catalogFunction.identifier.unquotedString (difference is on case-sensitivity).
// At here, we preserve the input from the user.
registerFunction(catalogFunction.copy(identifier = qualifiedName), overrideIfExists = false)
// Now, we need to create the Expression.
functionRegistry.lookupFunction(qualifiedName, children)
}
/**
* List all functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String): Seq[(FunctionIdentifier, String)] = listFunctions(db, "*")
/**
* List all matching functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String, pattern: String): Seq[(FunctionIdentifier, String)] = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
val dbFunctions = externalCatalog.listFunctions(dbName, pattern).map { f =>
FunctionIdentifier(f, Some(dbName)) }
val loadedFunctions = StringUtils
.filterPattern(functionRegistry.listFunction().map(_.unquotedString), pattern).map { f =>
// In functionRegistry, function names are stored as an unquoted format.
Try(parser.parseFunctionIdentifier(f)) match {
case Success(e) => e
case Failure(_) =>
// The names of some built-in functions are not parsable by our parser, e.g., %
FunctionIdentifier(f)
}
}
val functions = dbFunctions ++ loadedFunctions
// The session catalog caches some persistent functions in the FunctionRegistry
// so there can be duplicates.
functions.map {
case f if FunctionRegistry.functionSet.contains(f) => (f, "SYSTEM")
case f => (f, "USER")
}.distinct
}
// -----------------
// | Other methods |
// -----------------
/**
* Drop all existing databases (except "default"), tables, partitions and functions,
* and set the current database to "default".
*
* This is mainly used for tests.
*/
def reset(): Unit = synchronized {
setCurrentDatabase(DEFAULT_DATABASE)
externalCatalog.setCurrentDatabase(DEFAULT_DATABASE)
listDatabases().filter(_ != DEFAULT_DATABASE).foreach { db =>
dropDatabase(db, ignoreIfNotExists = false, cascade = true)
}
listTables(DEFAULT_DATABASE).foreach { table =>
dropTable(table, ignoreIfNotExists = false, purge = false)
}
listFunctions(DEFAULT_DATABASE).map(_._1).foreach { func =>
if (func.database.isDefined) {
dropFunction(func, ignoreIfNotExists = false)
} else {
dropTempFunction(func.funcName, ignoreIfNotExists = false)
}
}
clearTempTables()
globalTempViewManager.clear()
functionRegistry.clear()
tableRelationCache.invalidateAll()
// restore built-in functions
FunctionRegistry.builtin.listFunction().foreach { f =>
val expressionInfo = FunctionRegistry.builtin.lookupFunction(f)
val functionBuilder = FunctionRegistry.builtin.lookupFunctionBuilder(f)
require(expressionInfo.isDefined, s"built-in function '$f' is missing expression info")
require(functionBuilder.isDefined, s"built-in function '$f' is missing function builder")
functionRegistry.registerFunction(f, expressionInfo.get, functionBuilder.get)
}
}
/**
* Copy the current state of the catalog to another catalog.
*
* This function is synchronized on this [[SessionCatalog]] (the source) to make sure the copied
* state is consistent. The target [[SessionCatalog]] is not synchronized, and should not be
* because the target [[SessionCatalog]] should not be published at this point. The caller must
* synchronize on the target if this assumption does not hold.
*/
private[sql] def copyStateTo(target: SessionCatalog): Unit = synchronized {
target.currentDb = currentDb
// copy over temporary tables
tempTables.foreach(kv => target.tempTables.put(kv._1, kv._2))
}
}
| narahari92/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala | Scala | apache-2.0 | 54,959 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.VariableFormat.Default
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.RandomGenerator
/**
* VariableFormat describe the meaning of each dimension of the variable
* (the trainable parameters of a model like weight and bias) and can be used to
* return the fan in and fan out size of the variable when provided the variable shape.
*/
trait VariableFormat {
def getFanIn(shape: Array[Int]): Int = {
throw new Exception("FanIn is not defined in this format")
}
def getFanOut(shape: Array[Int]): Int = {
throw new Exception("FanOut is not defined in this format")
}
}
object VariableFormat {
/**
* The default VariableFormat used when we do not care about
* the specified format of this variable.
*/
case object Default extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
shape.product
}
override def getFanOut(shape: Array[Int]): Int = {
shape.product
}
}
case object ONE_D extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
shape(0)
}
override def getFanOut(shape: Array[Int]): Int = {
1
}
}
case object IN_OUT extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
shape(0)
}
override def getFanOut(shape: Array[Int]): Int = {
shape(1)
}
}
case object OUT_IN extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
shape(1)
}
override def getFanOut(shape: Array[Int]): Int = {
shape(0)
}
}
case object IN_OUT_KW_KH extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(2) * shape(3)
shape(0) * receptiveFieldSize
}
override def getFanOut(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(2) * shape(3)
shape(1) * receptiveFieldSize
}
}
case object OUT_IN_KW_KH extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(2) * shape(3)
shape(1) * receptiveFieldSize
}
override def getFanOut(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(2) * shape(3)
shape(0) * receptiveFieldSize
}
}
case object GP_OUT_IN_KW_KH extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(0) * shape(3) * shape(4)
shape(2) * receptiveFieldSize
}
override def getFanOut(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(0) * shape(3) * shape(4)
shape(1) * receptiveFieldSize
}
}
case object GP_IN_OUT_KW_KH extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(0) * shape(3) * shape(4)
shape(1) * receptiveFieldSize
}
override def getFanOut(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(0) * shape(3) * shape(4)
shape(2) * receptiveFieldSize
}
}
case object OUT_IN_KT_KH_KW extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(2) * shape(3) * shape(4)
shape(1) * receptiveFieldSize
}
override def getFanOut(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(2) * shape(3) * shape(4)
shape(0) * receptiveFieldSize
}
}
case object GP_KH_KW_IN_OUT extends VariableFormat {
override def getFanIn(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(0) * shape(1) * shape(2)
shape(2) * receptiveFieldSize
}
override def getFanOut(shape: Array[Int]): Int = {
val receptiveFieldSize = shape(0) * shape(1) * shape(2)
shape(3) * receptiveFieldSize
}
}
}
/**
* Initialization method to initialize bias and weight.
* The init method will be called in Module.reset()
*/
trait InitializationMethod {
type Shape = Array[Int]
/**
* Initialize the given weight and bias.
*
* @param variable the weight to initialize
* @param dataFormat the data format of weight indicating the dimension order of
* the weight. "output_first" means output is in the lower dimension
* "input_first" means input is in the lower dimension.
*/
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit
}
/**
* Initializer that generates tensors with a uniform distribution.
*
* It draws samples from a uniform distribution within [-limit, limit]
* where "limit" is "1/sqrt(fan_in)"
*
*/
case object RandomUniform extends InitializationMethod {
override def init[T](variable: Tensor[T], dataFormat: VariableFormat)
(implicit ev: TensorNumeric[T]): Unit = {
val shape = variable.size()
val fanIn = dataFormat.getFanIn(shape)
val stdv = 1.0 / math.sqrt(fanIn)
variable.rand(-stdv, stdv)
}
}
/**
* Initializer that generates tensors with a uniform distribution.
*
* It draws samples from a uniform distribution within [lower, upper]
*
*/
case class RandomUniform(lower: Double, upper: Double) extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit = {
variable.rand(lower, upper)
}
}
/**
* Initializer that generates tensors with a normal distribution.
*
*/
case class RandomNormal(mean: Double, stdv: Double) extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit = {
variable.randn(mean, stdv)
}
}
/**
* Initializer that generates tensors with zeros.
*/
case object Zeros extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit = {
variable.zero()
}
}
/**
* Initializer that generates tensors with zeros.
*/
case object Ones extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit = {
variable.fill(ev.one)
}
}
/**
* Initializer that generates tensors with certain constant double.
*/
case class ConstInitMethod(value: Double) extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit = {
variable.fill(ev.fromType(value))
}
}
/**
* In short, it helps signals reach deep into the network.
*
* During the training process of deep nn:
* 1. If the weights in a network start are too small,
* then the signal shrinks as it passes through
* each layer until it’s too tiny to be useful.
*
* 2. If the weights in a network start too large,
* then the signal grows as it passes through each
* layer until it’s too massive to be useful.
*
* Xavier initialization makes sure the weights are ‘just right’,
* keeping the signal in a reasonable range of values through many layers.
*
* More details on the paper
* [Understanding the difficulty of training deep feedforward neural networks]
* (http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf)
*/
case object Xavier extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat)
(implicit ev: TensorNumeric[T]): Unit = {
val shape = variable.size()
val fanIn = dataFormat.getFanIn(shape)
val fanOut = dataFormat.getFanOut(shape)
val stdv = math.sqrt(6.0 / (fanIn + fanOut))
variable.rand(-stdv, stdv)
}
}
/**
* A Filler based on the paper [He, Zhang, Ren and Sun 2015]: Specifically
* accounts for ReLU nonlinearities.
*
* Aside: for another perspective on the scaling factor, see the derivation of
* [Saxe, McClelland, and Ganguli 2013 (v3)].
*
* It fills the incoming matrix by randomly sampling Gaussian data with std =
* sqrt(2 / n) where n is the fanIn, fanOut, or their average, depending on
* the varianceNormAverage parameter.
*
* @param varianceNormAverage VarianceNorm use average of (fanIn + fanOut) or just fanOut
*/
case class MsraFiller(varianceNormAverage: Boolean = true) extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat)
(implicit ev: TensorNumeric[T]): Unit = {
val shape = variable.size()
val fanIn = dataFormat.getFanIn(shape)
val fanOut = dataFormat.getFanOut(shape)
val n = if (varianceNormAverage) {
(fanIn + fanOut) / 2
} else {
fanOut
}
val std = math.sqrt(2.0 / n)
variable.apply1(_ => ev.fromType(RandomGenerator.RNG.normal(0, std)))
}
}
/**
* Initialize the weight with coefficients for bilinear interpolation.
*
* A common use case is with the DeconvolutionLayer acting as upsampling.
* The variable tensor passed in the init function should have 5 dimensions
* of format [nGroup, nInput, nOutput, kH, kW], and kH should be equal to kW
*
*/
case object BilinearFiller extends InitializationMethod {
def init[T](variable: Tensor[T], dataFormat: VariableFormat = Default)
(implicit ev: TensorNumeric[T]): Unit = {
val shape = variable.size()
require(shape.length == 5, s"weight must be 5 dim, " +
s"but got ${shape.length}")
val kH = shape(3)
val kW = shape(4)
require(kH == kW, s"Kernel $kH * $kW must be square")
val f = Math.ceil(kW / 2.0).toInt
val c = (2 * f - 1 - f % 2) / (2.0f * f)
val weightArray = variable.storage().array()
val weightOffset = variable.storageOffset() - 1
var i = 0
while(i < variable.nElement()) {
val x : Float = i % kW
val y : Float = (i / kW) % kH
weightArray(i + weightOffset) = ev.fromType[Float](
(1f - math.abs(x / f - c)) * (1f - math.abs(y / f - c)))
i += 1
}
}
}
| qiuxin2012/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/InitializationMethod.scala | Scala | apache-2.0 | 10,757 |
package p01regression
import breeze.linalg._
import breeze.plot._
import breeze.stats.distributions._
object Data {
/**
* Given a vector of points generate the lower degree polynomial that passes through all of them.
*/
def polynomial1D(x: DenseVector[Double], y: DenseVector[Double]): DenseVector[Double] = {
val nPoints = x.length
val vanderMat = vandermonde(x, nPoints)
return vanderMat \\ y
}
def vandermonde(x: DenseVector[Double], order: Int): DenseMatrix[Double] = {
val nObs = x.length
return DenseMatrix.tabulate[Double](nObs, order)((i, j) => math.pow(x(i), j))
}
/**
* @return a pair of vectors (x, y)
*/
def oneDimFunction(
pointsX: DenseVector[Double],
pointsY: DenseVector[Double],
sd: Double,
min: Double,
max: Double,
nPoints: Int): (DenseVector[Double], DenseVector[Double]) = {
val polyDegree = pointsX.length
val coeff = polynomial1D(pointsX, pointsY)
def polyEval(coeff: DenseVector[Double], x: Double): Double = {
val powers = DenseVector.tabulate[Double](pointsX.length)(i => math.pow(x, i))
return coeff dot powers
}
val uni = new Uniform(min, max)
val x = DenseVector.rand[Double](nPoints, uni)
val gauss = new Gaussian(0.0, sd)
val y = x
.map(polyEval(coeff, _))
.map(_ + (gauss.sample))
return (x, y)
}
def oneDimFunctionTest = {
val (x, y) = oneDimFunction(
DenseVector[Double](0.0, 10.0, 16.0), // pointsX
DenseVector[Double](0.0, 10.0, 8.0), // pointsY
1.0, // sd
-3.0, // min
18.0, // max,
100) // nPoints
val fig = Figure()
val plt = fig.subplot(0)
plt += plot(x, y, '.')
}
} | vkubicki/Sernel | src/main/scala/p01regression/01 - Data.scala | Scala | bsd-3-clause | 1,770 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.client
import org.junit.Assert.assertEquals
import org.junit.Test
import org.scalatest.junit.{MustMatchersForJUnit, AssertionsForJUnit}
trait XForms extends AssertionsForJUnit with MustMatchersForJUnit with FormRunnerOps {
// https://github.com/orbeon/orbeon-forms/issues/889
@Test def issue889(): Unit = {
def clickCheckbox() = clickElementByCSS("#hide-checkbox input")
def liGroupElements = cssSelector("#group-begin-ul-group ~ li:not(.xforms-group-begin-end)").findAllElements.to[List]
def divGroupElements = cssSelector("#div-group > div").findAllElements.to[List]
def checkNonRelevantClasses(elements: List[Element]) = {
elements(0).classes must not contain ("class42")
elements(1).classes must contain ("myClass")
}
def checkRelevantClasses(elements: List[Element]) = {
elements(0).classes must contain ("class42")
elements(1).classes must contain ("myClass")
}
// Just after loading (checkbox is selected, content is hidden)
loadOrbeonPage("/unit-tests/issue-0889")
liGroupElements foreach (_.classes must contain ("xforms-disabled"))
cssSelector("#div-group").element.classes must contain ("xforms-disabled")
checkNonRelevantClasses(liGroupElements)
checkNonRelevantClasses(divGroupElements)
// Show content
clickCheckbox()
liGroupElements foreach (_.classes must not contain ("xforms-disabled"))
cssSelector("#div-group").element.classes must not contain ("xforms-disabled")
checkRelevantClasses(liGroupElements)
checkRelevantClasses(divGroupElements)
// Hide content again
clickCheckbox()
liGroupElements foreach (_.classes must contain ("xforms-disabled-subsequent"))
cssSelector("#div-group").element.classes must contain ("xforms-disabled-subsequent")
checkNonRelevantClasses(liGroupElements)
checkNonRelevantClasses(divGroupElements)
}
// https://github.com/orbeon/orbeon-forms/commit/9bfa9ad051c2bafa8c88e8562bb55f46dd9e7666
@Test def eventProperties(): Unit = {
def checkOutputs(outputs: Seq[(String, String)]) =
outputs.foreach { case (cssClass, expected) ⇒
val actual = $("." + cssClass + " span").getText
assertEquals(expected, actual)
}
loadOrbeonPage("/unit-tests/feature-event-properties")
checkOutputs(Seq("triggered" → "false", "p1" → "", "p2" → ""))
$("#send-event button").click()
waitForAjaxResponse()
checkOutputs(Seq("triggered" → "true", "p1" → "v1", "p2" → "v2"))
}
}
| evlist/orbeon-forms | src/test/scala/org/orbeon/oxf/client/XForms.scala | Scala | lgpl-2.1 | 3,386 |
package database
// external
import akka.actor.{Actor, ActorSystem, Props}
import akka.contrib.pattern.ReceivePipeline
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import models.analytics.theworks.KitchenSink
import models.strategies.FirstCrossStrategy
import org.scalatest._
import scala.concurrent.{Await, Future, Promise}
import scala.concurrent.duration._
// internal
import models.strategies.GoldenCrossStrategy
import models.market.MarketStructures.MarketMessage
class PoloniexTradeSpec extends FlatSpec with PoloniexDatabase with BeforeAndAfter {
// Implicit boilerplate necessary for creating akka-streams stuff
implicit lazy val system = ActorSystem("poloniex-tests")
implicit lazy val materializer = ActorMaterializer()
override def afterAll() {
Await.result(system.terminate, Duration.Inf)
}
case object Done
"GoldenCrossStrategy" should "be positive" in {
val processMessages: Future[BigDecimal] = {
val promise = Promise[BigDecimal]()
val actor = system.actorOf(Props(new Actor with KitchenSink {
val strategy = new FirstCrossStrategy(this)
setAllMarketAverages(exponentialMovingAverages(List(3, 17)))
def receive: Receive = {
case msg: MarketMessage =>
strategy.handleMessage(msg)
case Done =>
strategy.printResults()
// complete our future with the final balance
promise.success(strategy.totalBalance)
}
}))
// Send Done when complete
val sink = Sink.actorRef[MarketMessage](actor, onCompleteMessage = Done)
messageSource
.via(messageFlow)
.to(sink)
.run()
promise.future
}
Await.result(processMessages, Duration.Inf)
// must result in a profit
assert(processMessages.futureValue > 1.0)
}
// // #Uncomment to compute the perfect scenario
// "TheoreticalPerfectStrategy" should "be positive" in {
// val process: Future[BigDecimal] = {
//
// val promise = Promise[BigDecimal]()
//
// val actor = system.actorOf(Props(new Actor with TheoreticalPerfectStrategy {
// def receive = handleMessageUpdate orElse myReceive
//
// def myReceive: Receive = {
// case Done =>
// println(balance)
//
// // complete our future with the final balance
// promise.success(balance)
// }
// }))
//
// val sink = Sink.actorRef[MarketMessage2](actor, onCompleteMessage = Done)
//
// messageSource
// .via(messageFlow)
// .to(sink)
// .run()
//
// promise.future
// }
//
// Await.result(process, Duration.Inf)
// assert(process.futureValue > 1.0)
// }
} | asciiu/polo | test/database/PoloniexTradeSpec.scala | Scala | mit | 2,742 |
package com.atomist.project.common.yaml
import java.util.regex.{Pattern, PatternSyntaxException}
import com.atomist.param._
import com.atomist.project.common.template.{InvalidTemplateException, TemplateBasedProjectOperationInfo}
import com.fasterxml.jackson.annotation.JsonProperty
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import org.apache.commons.lang3.builder.ReflectionToStringBuilder
import scala.util.{Failure, Success, Try}
/**
* Parse YAML file to return ProjectOperationInfo.
*/
object YamlProjectOperationInfoParser {
private val mapper = new ObjectMapper(new YAMLFactory()) with ScalaObjectMapper
mapper.registerModule(DefaultScalaModule)
@throws[InvalidYamlDescriptorException]
def parse(yaml: String): TemplateBasedProjectOperationInfo = {
if (yaml == null || "".equals(yaml))
throw new InvalidYamlDescriptorException("YAML content required in template metadata file")
Try(mapper.readValue(yaml, classOf[BoundProjectOperationInfo])) match {
case s: Success[BoundProjectOperationInfo] =>
val badPatterns = s.value.parameters.flatMap(p => patternError(p))
if (badPatterns.nonEmpty)
throw new InvalidYamlDescriptorException(s"Bad regexp patterns: ${badPatterns.mkString(",")}")
s.value
case f: Failure[BoundProjectOperationInfo] =>
throw new InvalidYamlDescriptorException(s"Failed to parse YAML [$yaml]: ${f.exception.getMessage}", f.exception)
}
}
private def patternError(p: Parameter): Option[String] = {
try {
Pattern.compile(p.getPattern)
None
} catch {
case pse: PatternSyntaxException => Some(s"${p.getName}: Bad regular expression pattern: ${pse.getMessage}")
}
}
}
private class BoundProjectOperationInfo extends TemplateBasedProjectOperationInfo {
@JsonProperty("name")
var name: String = _
@JsonProperty("description")
var description: String = _
@JsonProperty("template_name")
var templateName: String = _
@JsonProperty("type")
var _templateType: String = _
override def templateType: Option[String] =
if (_templateType == null || "".equals(_templateType)) None
else Some(_templateType)
@JsonProperty("parameters")
private var _params: Seq[Parameter] = Nil
@JsonProperty("tags")
private var _tags: Seq[TagHolder] = Nil
override def parameters: Seq[Parameter] = _params
override def tags: Seq[Tag] = _tags.map(tw => tw.toTag)
override def toString = ReflectionToStringBuilder.toString(this)
}
private class TagHolder {
@JsonProperty
var name: String = _
@JsonProperty
var description: String = _
def toTag = Tag(name, description)
}
class InvalidYamlDescriptorException(msg: String, ex: Throwable = null) extends InvalidTemplateException(msg, ex)
| atomist/rug | src/main/scala/com/atomist/project/common/yaml/YamlProjectOperationInfoParser.scala | Scala | gpl-3.0 | 2,964 |
package sttp.client3.armeria.zio
import sttp.capabilities.zio.ZioStreams
import sttp.client3.SttpBackend
import sttp.client3.impl.zio.{ZioServerSentEvents, ZioTestBase}
import sttp.client3.internal._
import sttp.client3.testing.ConvertToFuture
import sttp.client3.testing.streaming.StreamingTest
import sttp.model.sse.ServerSentEvent
import zio.stream.Stream
import zio.{Chunk, Task}
class ArmeriaZioStreamingTest extends StreamingTest[Task, ZioStreams] with ZioTestBase {
override val streams: ZioStreams = ZioStreams
override val backend: SttpBackend[Task, ZioStreams] =
runtime.unsafeRun(ArmeriaZioBackend())
override implicit val convertToFuture: ConvertToFuture[Task] = convertZioTaskToFuture
override def bodyProducer(arrays: Iterable[Array[Byte]]): Stream[Throwable, Byte] =
Stream.fromChunks(arrays.map(Chunk.fromArray).toSeq: _*)
override def bodyConsumer(stream: Stream[Throwable, Byte]): Task[String] =
stream.runCollect.map(bytes => new String(bytes.toArray, Utf8))
// TODO: consider if viaFunction is what we want
override def sseConsumer(stream: Stream[Throwable, Byte]): Task[List[ServerSentEvent]] =
stream.viaFunction(ZioServerSentEvents.parse).runCollect.map(_.toList)
override protected def supportsStreamingMultipartParts: Boolean = false
}
| softwaremill/sttp | armeria-backend/zio/src/test/scala/sttp/client3/armeria/zio/ArmeriaZioStreamingTest.scala | Scala | apache-2.0 | 1,300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.iota.fey.performer
import akka.actor.{ActorSystem, Props}
import org.apache.iota.fey.FeyGenericActor.PROCESS
import scala.concurrent.duration._
object Application extends App {
/* print("Starting")
implicit val system = ActorSystem("STREAM-RUN")
val timestamp = system.actorOf(Props(classOf[Timestamp], Map.empty, 1.minutes, Map.empty, 1.seconds, "", "", false), name = "TIMESTAMP")
timestamp ! PROCESS("Stream it")
val heartbeat = system.actorOf(Props(classOf[Heartbeat], Map.empty, 1.minutes, Map.empty, 1.seconds, "", "", false), name = "HEARTBEAT")
heartbeat ! PROCESS("Stream it")
*/
}
| barbaragomes/incubator-iota | performers/stream/src/main/scala/org/apache/iota/fey/performer/Application.scala | Scala | apache-2.0 | 1,434 |
package com.websudos.phantom.db
import com.websudos.phantom.dsl._
import com.websudos.phantom.testkit.suites.PhantomCassandraTestSuite
import com.websudos.util.testing._
class DatabaseImplTest extends PhantomCassandraTestSuite {
val db = new TestDatabase
val db2 = new ValueInitDatabase
it should "instantiate a database and collect references to the tables" in {
db.tables.size shouldEqual 4
}
it should "automatically generate the CQL schema and initialise tables " in {
db.autocreate().future().successful {
res => {
res.nonEmpty shouldEqual true
}
}
}
ignore should "instantiate a database object and collect references to value fields" in {
db2.tables.size shouldEqual 4
}
ignore should "automatically generate the CQL schema and initialise tables for value tables" in {
db2.autocreate().future().successful {
res => {
res.nonEmpty shouldEqual true
}
}
}
}
| analytically/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/db/DatabaseImplTest.scala | Scala | bsd-2-clause | 952 |
package pimpathon
import pimpathon.multiMap.IgnoreFromCBF
import scala.collection.generic.CanBuildFrom
import scala.collection.{breakOut, mutable => M}
import scala.collection.immutable.{Iterable, Map => ▶:}
import pimpathon.builder.BuilderPimps
import pimpathon.map.MapPimps
object nestedMap {
type NMCBF[K1, K2, V] = CanBuildFrom[Nothing, (K1, K2, V), K1 ▶: K2 ▶: V]
implicit def build[K1, K2, V]: NMCBF[K1, K2, V] = new NestedMapCanBuilderFrom[K1, K2, V]
implicit class NestedMapPimps[K1, K2, V](val self: K1 ▶: K2 ▶: V) extends AnyVal {
def flipNesting: K2 ▶: K1 ▶: V = self.flatMap(o ⇒ o._2.map(i ⇒ (i._1, o._1, i._2)))(breakOut)
def +(kkv: (K1, K2, V)): K1 ▶: K2 ▶: V = append(kkv._1, kkv._2, kkv._3)
def append(k1: K1, k2: K2, v: V): K1 ▶: K2 ▶: V = self + ((k1, self.getOrEmpty(k1) + ((k2, v))))
def getOrEmpty(k1: K1): K2 ▶: V = self.getOrElse(k1, Map.empty[K2, V])
def nestedMap: NestedMapConflictingPimps[K1, K2, V] = new NestedMapConflictingPimps[K1, K2, V](self)
}
class NestedMapConflictingPimps[K1, K2, V](self: K1 ▶: K2 ▶: V) {
def mapValuesEagerly[W](f: V ⇒ W): K1 ▶: K2 ▶: W = self.mapValuesEagerly(_.mapValuesEagerly(f))
def mapKeysEagerly[C](f: K2 ⇒ C): K1 ▶: C ▶: V = self.mapValuesEagerly(_.mapKeysEagerly(f))
def mapEntries[C1, C2, W](f: (K1, K2, V) => (C1, C2, W)): C1 ▶: C2 ▶: W =
build[C1, C2, W]().run(_ ++= (for { (k1, k2v) <- self; (k2, v) <- k2v } yield f(k1, k2, v)))
}
object NestedMap {
def build[K1, K2, V]: NMCBF[K1, K2, V] = new NestedMapCanBuilderFrom[K1, K2, V]
def empty[K1, K2, V]: K1 ▶: K2 ▶: V = Map.empty[K1, K2 ▶: V]
}
class NestedMapCanBuilderFrom[K1, K2, V] extends NMCBF[K1, K2, V]
with IgnoreFromCBF[Nothing, (K1, K2, V), K1 ▶: K2 ▶: V] {
def apply(): M.Builder[(K1, K2, V), K1 ▶: K2 ▶: V] = new NestedMapBuilder[K1, K2, V]()
}
class NestedMapBuilder[K1, K2, V](map: M.Map[K1, K2 ▶: V] = M.Map.empty[K1, K2 ▶: V])
extends M.Builder[(K1, K2, V), K1 ▶: K2 ▶: V] {
def +=(elem: (K1, K2, V)): this.type = { add(elem._1, elem._2, elem._3); this}
def result(): K1 ▶: K2 ▶: V = map.map(entry ⇒ entry)(breakOut)
def clear(): Unit = map.clear()
private def add(k1: K1, k2: K2, v: V): Unit = map.put(k1, map.getOrElse(k1, Map.empty[K2, V]) + ((k2, v)))
}
} | raymanoz/pimpathon | src/main/scala/pimpathon/nestedMap.scala | Scala | apache-2.0 | 2,412 |
package edu.gemini.pit.ui.util
import java.awt
import javax.swing
import scala.swing._
import swing.text.JTextComponent
import swing.{JCheckBox, JTextField, JComponent}
/**
* Modal editor for a value of type A, with standard footer buttons.
*/
abstract class StdModalEditor[A](theTitle: String) extends ModalEditor[A] { dialog =>
// Top-level config
title = theTitle
resizable = false
// Call this before setting contents
setDropTarget(Contents.peer)
contents = Contents
pack()
def header: Component = null
def editor: Component
def value: A
/** Override this, then call validate() to update Ok button. */
def editorValid:Boolean = true
final def validateEditor() {
Contents.Footer.OkButton.enabled = editorValid
}
// REL-1131 This is necessary to avoid a bug that shows in certain Linux systems on Sun's JDK
def setDropTarget(c:JComponent) {
c match {
case _:JTextComponent => c.setDropTarget(null)
case _ =>
}
c.getComponents.collect {
case j:JComponent => j
}.foreach(setDropTarget)
}
// Our main content object
object Contents extends BorderPanel {
// Space things out a little more
peer.setLayout(new awt.BorderLayout(8, 8))
border = swing.BorderFactory.createEmptyBorder(8, 8, 8, 8)
// Add our content, defined below
Option(header).foreach { add(_, BorderPanel.Position.North) }
add(editor, BorderPanel.Position.Center)
add(Footer, BorderPanel.Position.South)
// Footer is a standard widget
lazy val Footer = OkCancelFooter(dialog) {
close(value)
}
}
} | arturog8m/ocs | bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/ui/util/StdModalEditor.scala | Scala | bsd-3-clause | 1,612 |
package info.drealm.s3pi
object Test extends App {
val exampleResourceType = new ResourceType(24)
val exampleResource = WrapperDealer.createNewResource(exampleResourceType)
println("WrapperDealer.createNewResource(new ResourceType(24)): " + exampleResource)
val demoResourceType = new ResourceType(1234)
val demoResource = WrapperDealer.createNewResource(demoResourceType)
println("WrapperDealer.createNewResource(new ResourceType(1234)): " + demoResource)
val defaultResourceType = new ResourceType(241)
val defaultResource = WrapperDealer.createNewResource(defaultResourceType)
println("WrapperDealer.createNewResource(new ResourceType(241)): " + defaultResource)
} | pljones/Reflection | src/scala/info/drealm/s3pi/Test.scala | Scala | gpl-3.0 | 706 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal
import java.math.BigDecimal
import java.lang.Character
import org.neo4j.cypher.SyntaxException
/**
* Comparer is a trait that enables it's subclasses to compare to AnyRef with each other.
*/
trait Comparer {
private def compareValuesOfSameType(l: AnyRef, r: AnyRef): Int = (l, r) match {
case (left: Comparable[AnyRef], right: Comparable[AnyRef]) => left.compareTo(right)
case _ => throw new RuntimeException("This shouldn't happen")
}
private def compareValuesOfDifferentTypes(l: Any, r: Any): Int = (l, r) match {
case (left: Long, right: Number) => BigDecimal.valueOf(left).compareTo(BigDecimal.valueOf(right.doubleValue()))
case (left: Number, right: Long) => BigDecimal.valueOf(left.doubleValue()).compareTo(BigDecimal.valueOf(right))
case (left: Number, right: Number) => java.lang.Double.compare(left.doubleValue(), right.doubleValue())
case (left: String, right: Character) => left.compareTo(right.toString)
case (left: Character, right: String) => left.toString.compareTo(right.toString)
case (null, null) => 0
case (null, _) => 1
case (_, null) => -1
case (left, right) => {
throw new SyntaxException("Don't know how to compare that. Left: " + left.toString + "; Right: " + right.toString)
}
}
private def areComparableOfSameType(l: AnyRef, r: AnyRef): Boolean = {
l.isInstanceOf[Comparable[_]] &&
r.isInstanceOf[Comparable[_]] &&
l.getClass.isInstance(r)
}
def compare(left: Any, right: Any): Int = {
if (left == Nil || right == Nil) {
throw new RuntimeException("Can't compare against NULL")
}
val l = left.asInstanceOf[AnyRef]
val r = right.asInstanceOf[AnyRef]
val comparisonResult: Int =
if (areComparableOfSameType(l, r)) {
compareValuesOfSameType(l, r)
} else {
compareValuesOfDifferentTypes(left, right)
}
comparisonResult
}
}
| dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/Comparer.scala | Scala | gpl-3.0 | 2,746 |
package com.twitter.finagle.exp
import com.netflix.concurrency.limits.limit.VegasLimit
import com.netflix.concurrency.limits.Limit
import com.twitter.finagle._
import com.twitter.finagle.stats.{Counter, StatsReceiver}
import com.twitter.logging.{HasLogLevel, Level}
import com.twitter.util._
import java.util.concurrent.atomic.AtomicInteger
import scala.annotation.tailrec
private[finagle] object ConcurrencyLimitFilter {
class ConcurrencyOverload(val flags: Long = FailureFlags.Rejected)
extends Exception("Concurrency limit exceeded")
with FailureFlags[ConcurrencyOverload]
with HasLogLevel {
def logLevel: Level = Level.DEBUG
protected def copyWithFlags(flags: Long): ConcurrencyOverload = new ConcurrencyOverload(flags)
}
/**
* Constant values used for limit algorithm
*
* Initial Limit: Number of concurrent requests the limiter initializes with.
* Typically lower than the true limit. As the limit algorithm receives more
* samples, the limit will be adjusted from this level.
*
* Max Limit: Maximum allowable concurrency. Any estimated concurrency will be capped
* at this value
*
* Alpha: When queue_use is small, limit increases by alpha. When queue_use is large,
* limit decreases by alpha. Typically 2-3
*
* Queue size is calculated using the formula,
* queue_use = limit × (1 − RTTnoLoad/RTTactual)
*
* Beta: Typically 4-6
*
* Smoothing: factor (0 < x < 1) to limit how aggressively the estimated limit can shrink
* when queuing has been detected.
*
* StartTime: Defaults to 0 for Vegas and Gradient2 limit algorithm. Used in WindowedLimit to
* calculate next update time
*/
private val DefaultInitialLimit: Int = 10
private val DefaultMaxLimit: Int = 50
private val DefaultAlpha: Int = 3
private val DefaultBeta: Int = 6
private val DefaultSmoothing: Double = 0.6
private val StartTime: Long = 0
val role: Stack.Role = Stack.Role("ConcurrencyLimitFilter")
trait Param
object Param {
/**
* A class eligible for configuring a [[com.twitter.finagle.Stackable]]
* [[com.twitter.finagle.exp.ConcurrencyLimitFilter]] module.
*/
case class Configured(initialLimit: Int, maxLimit: Int) extends Param
case object Disabled extends Param
implicit val param: Stack.Param[ConcurrencyLimitFilter.Param] =
Stack.Param(Configured(DefaultInitialLimit, DefaultMaxLimit))
}
private[finagle] val Disabled: Param = Param.Disabled
private[finagle] def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Module2[ConcurrencyLimitFilter.Param, param.Stats, ServiceFactory[Req, Rep]] {
val description: String = "Enforce dynamic concurrency limit"
val role: Stack.Role = ConcurrencyLimitFilter.role
def make(
_param: Param,
_stats: param.Stats,
next: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] = _param match {
case Param.Configured(initialLimit, maxLimit) =>
val param.Stats(stats) = _stats
val filter = new ConcurrencyLimitFilter[Req, Rep](
VegasLimit
.newBuilder()
.alpha(DefaultAlpha)
.beta(DefaultBeta)
.smoothing(DefaultSmoothing)
.initialLimit(initialLimit)
.maxConcurrency(maxLimit)
.build(),
Stopwatch.systemNanos,
stats.scope("concurrency_limit")
)
filter.andThen(next)
case _ =>
next
}
}
/**
* A [[com.twitter.finagle.Filter]] that calculates and enforces request concurrency limit.
* Incoming requests that exceed current limit are failed immediately with a
* [[com.twitter.finagle.FailureFlags.Rejected]]
*
* @param limit Builder for algorithm that calculates estimated limit
*/
private[finagle] final class ConcurrencyLimitFilter[Req, Rep](
limit: Limit,
now: () => Long,
statsReceiver: StatsReceiver)
extends SimpleFilter[Req, Rep] {
private[this] val rejections: Counter = statsReceiver.counter("dropped_requests")
private[exp] val pending = new AtomicInteger(0)
private[this] val pendingGauge = statsReceiver.addGauge("pending") { pending.get() }
private[this] val estimatedLimit = statsReceiver.addGauge("estimated_concurrency_limit") {
limit.getLimit()
}
/**
* Updates limit with every request
*
* @param start Time (in nanoseconds) when the request reached this filter
* @param didDrop Whether the request returned successfully or not
*
* limit.onSample(...) takes
* - startTime: 0
* - rtt: round trip time of request
* - inflight: number of outstanding requests
* - didDrop: success of request
*/
private[this] def updateLimit(start: Long, didDrop: Boolean): Unit = {
limit.onSample(StartTime, now() - start, pending.getAndDecrement, didDrop)
}
@tailrec
def apply(req: Req, service: Service[Req, Rep]): Future[Rep] = {
val currentPending = pending.get
if (currentPending >= limit.getLimit) {
rejections.incr()
Future.exception(new ConcurrencyOverload)
} else if (pending.compareAndSet(currentPending, currentPending + 1)) {
val start = now()
service(req).respond {
case Return(_) =>
updateLimit(start, false)
case Throw(_) =>
updateLimit(start, true)
}
} else {
apply(req, service)
}
}
}
}
| twitter/finagle | finagle-exp/src/main/scala/com/twitter/finagle/exp/ConcurrencyLimitFilter.scala | Scala | apache-2.0 | 5,546 |
package org.http4s
package server
package middleware
import scalaz.concurrent.Task
/** Removes a trailing slash from [[Request]] path
*
* If a route exists with a file style [[Uri]], eg "/foo",
* this middleware will cause [[Request]]s with uri = "/foo" and
* uri = "/foo/" to match the route.
*/
object AutoSlash {
def apply(service: HttpService): HttpService = Service.lift { req =>
service(req).flatMap {
case resp if resp.status == Status.NotFound =>
val p = req.uri.path
if (p.isEmpty || p.charAt(p.length - 1) != '/')
Task.now(resp)
else {
val withSlash = req.copy(uri = req.uri.copy(path = p.substring(0, p.length - 1)))
service.apply(withSlash)
}
case resp =>
Task.now(resp)
}
}
}
| hvesalai/http4s | server/src/main/scala/org/http4s/server/middleware/AutoSlash.scala | Scala | apache-2.0 | 795 |
val inBuf = Buffer.read(s, "/home/hhrutz/Documents/devel/MutagenTx/audio_work/mfcc_input.aif",
startFrame = 0, numFrames = 1024)
// inBuf.play()
play {
val fftBuf = LocalBuf(1024)
val in = PlayBuf.ar(1, inBuf.id)
val fft = FFT(buf = fftBuf, in = in, hop = 0.5, winType = -1)
val mfcc = MFCC(chain = fft, numCoeffs = 13)
val rate = SampleRate.ir / 512
val tr = Impulse.ar(rate)
val count = PulseCount.ar(tr)
mfcc.poll(count sig_== 3, "coef")
FreeSelf.kr(count sig_== 4)
}
val res = Vector(0.538461,
-0.366776, -0.367721, 0.62556 , 0.44059,
0.21176 , 0.132587, 0.211515, 0.30611,
0.148217, 0.27029 , 0.273416, 0.236873)
res.plot()
| Sciss/MutagenTx | notes/mfcc_supercollider.scala | Scala | gpl-3.0 | 729 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.aliyun.datahub
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.sources.v2.writer.{DataSourceWriter, DataWriter, DataWriterFactory, WriterCommitMessage}
import org.apache.spark.sql.types.StructType
class DatahubWriter(
project: Option[String],
topic: Option[String],
datahubOptions: Map[String, String],
schema: Option[StructType]) extends DataSourceWriter {
override def commit(messages: Array[WriterCommitMessage]): Unit = {
}
override def abort(messages: Array[WriterCommitMessage]): Unit = {
}
override def createWriterFactory(): DatahubWriterFactory = {
DatahubWriterFactory(project, topic, datahubOptions, schema)
}
}
case class DatahubWriterFactory(
project: Option[String],
topic: Option[String],
datahubParams: Map[String, String],
schema: Option[StructType]) extends DataWriterFactory[InternalRow] {
override def createDataWriter(
partitionId: Int,
taskId: Long,
epochId: Long): DataWriter[InternalRow] = {
new DatahubDataWriter(project, topic, datahubParams, schema)
}
}
| aliyun/aliyun-emapreduce-sdk | emr-datahub/src/main/scala/org/apache/spark/sql/aliyun/datahub/DatahubWriter.scala | Scala | artistic-2.0 | 1,922 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.api.dag.Transformation
import org.apache.flink.table.api.TableException
import org.apache.flink.table.data.{GenericRowData, RowData}
import org.apache.flink.table.planner.codegen.CodeGenUtils.{DEFAULT_INPUT1_TERM, GENERIC_ROW}
import org.apache.flink.table.planner.codegen.OperatorCodeGenerator.generateCollect
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext, ExprCodeGenerator, OperatorCodeGenerator}
import org.apache.flink.table.planner.plan.nodes.exec.utils.ExecNodeUtil
import org.apache.flink.table.runtime.operators.CodeGenOperatorFactory
import org.apache.flink.table.runtime.types.LogicalTypeDataTypeConverter.fromDataTypeToLogicalType
import org.apache.flink.table.runtime.typeutils.InternalTypeInfo
import org.apache.flink.table.sources.TableSource
import org.apache.flink.table.types.DataType
import org.apache.flink.table.types.logical.RowType
import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo
import org.apache.calcite.rel.core.TableScan
import org.apache.calcite.rex.RexNode
import java.util
import scala.collection.JavaConversions._
/**
* Util for [[TableScan]]s.
*/
object ScanUtil {
def hasTimeAttributeField(indexes: Array[Int]) =
indexes.contains(TimeIndicatorTypeInfo.ROWTIME_STREAM_MARKER)||
indexes.contains(TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER)||
indexes.contains(TimeIndicatorTypeInfo.PROCTIME_STREAM_MARKER)||
indexes.contains(TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER)
private[flink] def needsConversion(source: TableSource[_]): Boolean = {
needsConversion(source.getProducedDataType)
}
def needsConversion(dataType: DataType): Boolean =
fromDataTypeToLogicalType(dataType) match {
case _: RowType => !CodeGenUtils.isInternalClass(dataType)
case _ => true
}
def convertToInternalRow(
ctx: CodeGeneratorContext,
input: Transformation[Any],
fieldIndexes: Array[Int],
inputType: DataType,
outputRowType: RowType,
qualifiedName: util.List[String],
rowtimeExpr: Option[RexNode] = None,
beforeConvert: String = "",
afterConvert: String = ""): Transformation[RowData] = {
// conversion
val convertName = "SourceConversion"
// type convert
val inputTerm = DEFAULT_INPUT1_TERM
val internalInType = fromDataTypeToLogicalType(inputType)
val (inputTermConverter, inputRowType) = {
val convertFunc = CodeGenUtils.genToInternalConverter(ctx, inputType)
internalInType match {
case rt: RowType => (convertFunc, rt)
case _ => ((record: String) => s"$GENERIC_ROW.of(${convertFunc(record)})",
RowType.of(internalInType))
}
}
val processCode =
if ((inputRowType.getChildren == outputRowType.getChildren) &&
(inputRowType.getFieldNames == outputRowType.getFieldNames) &&
!hasTimeAttributeField(fieldIndexes)) {
s"${generateCollect(inputTerm)}"
} else {
// field index change (pojo) or has time attribute field
val conversion = new ExprCodeGenerator(ctx, false)
.bindInput(inputRowType, inputTerm = inputTerm, inputFieldMapping = Some(fieldIndexes))
.generateConverterResultExpression(
outputRowType, classOf[GenericRowData], rowtimeExpression = rowtimeExpr)
s"""
|$beforeConvert
|${conversion.code}
|${generateCollect(conversion.resultTerm)}
|$afterConvert
|""".stripMargin
}
val generatedOperator = OperatorCodeGenerator.generateOneInputStreamOperator[Any, RowData](
ctx,
convertName,
processCode,
outputRowType,
converter = inputTermConverter)
val substituteStreamOperator = new CodeGenOperatorFactory[RowData](generatedOperator)
ExecNodeUtil.createOneInputTransformation(
input.asInstanceOf[Transformation[RowData]],
getOperatorName(qualifiedName, outputRowType),
substituteStreamOperator,
InternalTypeInfo.of(outputRowType),
input.getParallelism,
0)
}
/**
* @param qualifiedName qualified name for table
*/
private[flink] def getOperatorName(qualifiedName: Seq[String], rowType: RowType): String = {
val tableQualifiedName = qualifiedName.mkString(".")
val fieldNames = rowType.getFieldNames.mkString(", ")
s"SourceConversion(table=[$tableQualifiedName], fields=[$fieldNames])"
}
/**
* Returns the field indices of primary key in given fields.
*/
def getPrimaryKeyIndices(
fieldNames: util.List[String],
keyFields: util.List[String]): Array[Int] = {
// we must use the output field names of scan node instead of the original schema
// to calculate the primary key indices, because the scan node maybe projection pushed down
keyFields.map { k =>
val index = fieldNames.indexOf(k)
if (index < 0) {
// primary key shouldn't be pruned, otherwise it's a bug
throw new TableException(
s"Can't find primary key field $k in the input fields $fieldNames. " +
s"This is a bug, please file an issue.")
}
index
}.toArray
}
}
| StephanEwen/incubator-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/utils/ScanUtil.scala | Scala | apache-2.0 | 6,045 |
package org.scalacoin.crypto
/**
* The result type returned by checking a signature
*/
sealed trait TransactionSignatureCheckerResult {
/**
* Indicates if the transaction signature checker was successful or failed
* @return
*/
def isValid : Boolean
}
/**
* Represents the case that the signatures checked inside of the transaction were
* all validly encoded as per the script verify flag & that the signatures
* were valid when checked against the public keys
*/
case object SignatureValidationSuccess extends TransactionSignatureCheckerResult {
def isValid = true
}
/**
* Signature validation failed because a signature was not encoded
* per the BIP66 rules https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki#specification
*/
case object SignatureValidationFailureNotStrictDerEncoding extends TransactionSignatureCheckerResult {
def isValid = false
}
/**
* Signature validation failed because there were not enough correct signatures for the transaction
* we were given
*/
case object SignatureValidationFailureIncorrectSignatures extends TransactionSignatureCheckerResult {
def isValid = false
}
/**
* This indicates that the signature validation failed because we have more signatures left to check
* than public keys remaining to check them against
* see https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.cpp#L914-915
*/
case object SignatureValidationFailureSignatureCount extends TransactionSignatureCheckerResult {
def isValid = false
}
| TomMcCabe/scalacoin | src/main/scala/org/scalacoin/crypto/TransactionSignatureCheckerResult.scala | Scala | mit | 1,520 |
package services
import core.{ HarvestCollectionLookupService, OrganizationCollectionLookupService }
import core.collection.{ OrganizationCollection, Harvestable }
import models.{ OrganizationConfiguration, RecordDefinition, DataSetState, DataSet }
/**
*
* @author Manuel Bernhardt <[email protected]>
*/
class DataSetLookupService extends HarvestCollectionLookupService with OrganizationCollectionLookupService {
def findAllNonEmpty(orgId: String, format: Option[String], accessKey: Option[String])(implicit configuration: OrganizationConfiguration): List[Harvestable] = {
val sets = DataSet.dao.findAll().filterNot(_.state != DataSetState.ENABLED)
if (format.isDefined) {
sets.filter(ds => ds.getVisibleMetadataSchemas(accessKey).exists(_.prefix == format.get))
} else {
sets
}
}
def findBySpecAndOrgId(spec: String, orgId: String)(implicit configuration: OrganizationConfiguration): Option[Harvestable] = DataSet.dao.findBySpecAndOrgId(spec, orgId)
def getAllMetadataFormats(orgId: String, accessKey: Option[String])(implicit configuration: OrganizationConfiguration): List[RecordDefinition] = DataSet.dao.getAllVisibleMetadataFormats(orgId, accessKey).distinct
def findAll(implicit configuration: OrganizationConfiguration): Seq[OrganizationCollection] = DataSet.dao.findAll()
} | delving/culture-hub | modules/dataset/app/services/DataSetLookupService.scala | Scala | apache-2.0 | 1,340 |
package libs
import org.im4java.core.{ ConvertCmd, IdentifyCmd, IMOperation, ImageCommand }
import org.im4java.process.OutputConsumer
import java.io.{ File, InputStreamReader, BufferedReader, InputStream }
import play.api.{ Logger, Play }
import org.apache.commons.io.FileUtils
/**
* Normalizes a TIF prior to tiling.
* Here we add all sorts of tricks we need to do in order to produce tiles that are compatible with the IIP Image Server for PTIF tiling.
*
* @author Manuel Bernhardt <[email protected]>
*/
object Normalizer {
private val log = Logger("CultureHub")
/**
* Normalizes a file to be usable for tiling and presentation for DeepZoom
* @param sourceImage the source image to be normalized
* @param targetDirectory the target directory to which the normalized file should be written to
* @return an optional normalized file, if normalization took place
*/
def normalize(sourceImage: File, targetDirectory: File): Option[File] = {
var source: File = sourceImage
val destination = new File(targetDirectory, sourceImage.getName)
val hasBeenNormalized = identifyLargestLayer(source) != None || !isRGB(source)
identifyLargestLayer(source).map { index =>
log.info("Image %s has multiple layers, normalizing to just one...".format(source.getName))
val convertCmd = new ConvertCmd
val convertOp = new IMOperation
convertOp.addRawArgs(source.getAbsolutePath + "[%s]".format(index))
convertOp.addRawArgs(destination.getAbsolutePath)
convertCmd.run(convertOp)
source = destination
}
if (!isRGB(source)) {
log.info("Image %s isn't RGB encoded, converting...".format(source.getName))
if (isGrayscale(source)) {
log.info("Image %s is Greyscale, converting to CMYK first to get the right colorspace when converting back...".format(source.getName))
// GraphicsMagick considers Grayscale to be a subset of RGB, so it won't change the type when converting directly to RGB
// so we first go over to CMYK and then back to RGB
convertColorspace(targetDirectory, source, destination, "CMYK")
source = destination
}
convertColorspace(targetDirectory, source, destination, "RGB")
}
if (hasBeenNormalized) {
Some(destination)
} else {
None
}
}
private def convertColorspace(targetDirectory: File, source: File, destination: File, colorspace: String) {
val converted = new File(targetDirectory, colorspace + "_" + source.getName)
val convertCmd = new ConvertCmd
val convertOp = new IMOperation
convertOp.colorspace(colorspace)
convertOp.addImage(source.getAbsolutePath)
convertOp.addImage(converted.getAbsolutePath)
convertCmd.run(convertOp)
if (converted.exists()) {
if (converted.getParentFile.getAbsolutePath == targetDirectory.getAbsoluteFile) {
FileUtils.deleteQuietly(source)
}
FileUtils.moveFile(converted, destination)
}
}
private def identifyLargestLayer(sourceImage: File): Option[Int] = {
val identified = identify(sourceImage, { op => })
if (identified.length > 1) {
// gm identify gives us lines like this:
// 2006-011.tif TIFF 1000x800+0+0 DirectClass 8-bit 3.6M 0.000u 0:01
// we want to fetch the 1000x800 part and know which line is da biggest
val largestLayer = identified.map { line =>
val Array(width: Int, height: Int) = line.split(" ")(2).split("\\\\+")(0).split("x").map(Integer.parseInt(_))
(width, height)
}.zipWithIndex.foldLeft((0, 0), 0) { (r: ((Int, Int), Int), c: ((Int, Int), Int)) =>
if (c._1._1 * c._1._2 > r._1._1 * r._1._2) c else r
}
val largestIndex = largestLayer._2
Some(largestIndex)
} else {
None
}
}
private def isRGB(sourceImage: File): Boolean = isColorspace(sourceImage, "RGB")
private def isGrayscale(sourceImage: File): Boolean = isColorspace(sourceImage, "Grayscale")
private def isColorspace(sourceImage: File, colorspace: String) = {
val identified = identify(sourceImage, { _.format("%[colorspace]") })
log.info(s"Identified colorspace of image ${sourceImage.getAbsolutePath} as $colorspace")
identified.headOption.map { c: String => c.contains(colorspace) }.getOrElse(false)
}
private def identify(sourceImage: File, addParameters: IMOperation => Unit): Seq[String] = {
val identifyCmd = new IdentifyCmd(false)
val identifyOp = new IMOperation
var identified: List[String] = List()
identifyCmd.setOutputConsumer(new OutputConsumer() {
def consumeOutput(is: InputStream) {
val br = new BufferedReader(new InputStreamReader(is))
identified = Stream.continually(br.readLine()).takeWhile(_ != null).toList
}
})
addParameters(identifyOp)
identifyOp.addImage(sourceImage.getAbsolutePath)
identifyCmd.run(identifyOp)
identified.toSeq
}
} | delving/culture-hub | modules/dos/app/libs/Normalizer.scala | Scala | apache-2.0 | 4,930 |
package dx.api
object DxPath {
val DX_URL_PREFIX = "dx://"
case class DxPathComponents(name: String,
folder: Option[String],
projName: Option[String],
objFullName: String,
sourcePath: String)
// TODO: use RuntimeExceptions for assertions
def parse(dxPath: String): DxPathComponents = {
// strip the prefix
if (!dxPath.startsWith(DX_URL_PREFIX)) {
throw new Exception(s"Path ${dxPath} does not start with prefix ${DX_URL_PREFIX}")
}
val s = dxPath.substring(DX_URL_PREFIX.length)
// take out the project, if it is specified
val components = s.split(":").toList
val (projName, dxObjectPath) = components match {
case Nil =>
throw new Exception(s"Path ${dxPath} is invalid")
case List(objName) =>
(None, objName)
case projName :: tail =>
val rest = tail.mkString(":")
(Some(projName), rest)
}
projName match {
case None => ()
case Some(proj) =>
if (proj.startsWith("file-"))
throw new Exception("""|Path ${dxPath} does not look like: dx://PROJECT_NAME:/FILE_PATH
|For example:
| dx://dxWDL_playground:/test_data/fileB
|""".stripMargin)
}
// split the object path into folder/name
val index = dxObjectPath.lastIndexOf('/')
val (folderRaw, name) =
if (index == -1) {
("/", dxObjectPath)
} else {
(dxObjectPath.substring(0, index), dxObjectPath.substring(index + 1))
}
// We don't want a folder if this is a dx-data-object (file-xxxx, record-yyyy)
val folder =
if (DxUtils.isDataObjectId(name)) None
else if (folderRaw == "") Some("/")
else Some(folderRaw)
DxPathComponents(name, folder, projName, dxObjectPath, dxPath)
}
}
| dnanexus-rnd/dxWDL | src/main/scala/dx/api/DxPath.scala | Scala | apache-2.0 | 1,959 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License,
*
* Contributors:
* Hao Jiang - initial API and implementation
*
*/
package edu.uchicago.cs.encsel
import java.util.Properties
import java.io.FileNotFoundException
import org.slf4j.LoggerFactory
object Config {
var collectorThreadCount = 10
var columnReaderEnableCheck = true
var columnReaderErrorLimit = 100
var columnFolder = "./columns"
val tempFolder = "./temp"
var distJmsHost = "vm://localhost"
load()
val logger = LoggerFactory.getLogger(getClass)
def load(): Unit = {
try {
val p = new Properties()
p.load(Thread.currentThread().getContextClassLoader.getResourceAsStream("config.properties"))
collectorThreadCount = Integer.parseInt(p.getProperty("collector.threadCount"))
columnReaderEnableCheck = "true".equals(p.getProperty("column.readerEnableCheck"))
columnReaderErrorLimit = Integer.parseInt(p.getProperty("column.readerErrorLimit"))
columnFolder = p.getProperty("column.folder")
distJmsHost = p.getProperty("dist.jms.host")
} catch {
case e: Exception => {
logger.warn("Failed to load configuration", e)
}
}
}
} | harperjiang/enc-selector | src/main/scala/edu/uchicago/cs/encsel/Config.scala | Scala | apache-2.0 | 1,937 |
package filodb.core.reprojector
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.velvia.filo.RowReader
import scala.concurrent.{ExecutionContext, Future}
import filodb.core._
import filodb.core.columnstore.{ColumnStore, RowWriterSegment, Segment}
import filodb.core.metadata.{Dataset, Column, RichProjection}
/**
* The Reprojector flushes rows out of the MemTable and writes out Segments to the ColumnStore.
* All of the work should be done asynchronously.
* The reprojector should be stateless. It takes MemTables and creates Futures for reprojection tasks.
*/
trait Reprojector {
import MemTable.IngestionSetup
import RowReader._
/**
* Does reprojection (columnar flushes from memtable) for a single dataset.
* Should be completely stateless.
* Does not need to reproject all the rows from the Locked memtable, but should progressively
* delete rows from the memtable until there are none left. This is how the reprojector marks progress:
* by deleting rows that has been committed to ColumnStore.
*
* Failures:
* The Scheduler only schedules one reprojection task at a time per (dataset, version), so if this fails,
* then it can be rerun.
*
* Most likely this will involve scheduling a whole bunch of futures to write segments.
* Be careful to do too much work, newTask is supposed to not take too much CPU time and use Futures
* to do work asynchronously. Also, scheduling too many futures leads to long blocking time and
* memory issues.
*
* @returns a Future[Seq[String]], representing info from individual segment flushes.
*/
def newTask(memTable: MemTable, dataset: Types.TableName, version: Int): Future[Seq[String]] = {
import Column.ColumnType._
val setup = memTable.getIngestionSetup(dataset, version).getOrElse(
throw new IllegalArgumentException(s"Could not find $dataset/$version"))
setup.schema(setup.sortColumnNum).columnType match {
case LongColumn => reproject[Long](memTable, setup, version)
case IntColumn => reproject[Int](memTable, setup, version)
case DoubleColumn => reproject[Double](memTable, setup, version)
case other: Column.ColumnType => throw new RuntimeException("Illegal sort key type $other")
}
}
// The inner, typed reprojection task launcher that must be implemented.
def reproject[K: TypedFieldExtractor](memTable: MemTable, setup: IngestionSetup, version: Int):
Future[Seq[String]]
}
/**
* Default reprojector, which scans the Locked memtable, turning them into segments for flushing,
* using fixed segment widths
*
* @param numSegments the number of segments to reproject for each reprojection task.
*/
class DefaultReprojector(columnStore: ColumnStore,
numSegments: Int = 3)
(implicit ec: ExecutionContext) extends Reprojector with StrictLogging {
import MemTable._
import Types._
import RowReader._
import filodb.core.Iterators._
// PERF/TODO: Maybe we should pass in an Iterator[RowReader], and extract partition and sort keys
// out. Heck we could create a custom FiloRowReader which has methods to extract this out.
// Might be faster than creating a Tuple3 for every row... or not, for complex sort and partition keys
def chunkize[K](rows: Iterator[(PartitionKey, K, RowReader)],
setup: IngestionSetup): Iterator[Segment[K]] = {
implicit val helper = setup.helper[K]
rows.sortedGroupBy { case (partition, sortKey, row) =>
// lazy grouping of partition/segment from the sortKey
(partition, helper.getSegment(sortKey))
}.map { case ((partition, (segStart, segUntil)), segmentRowsIt) =>
// For each segment grouping of rows... set up a Segment
val keyRange = KeyRange(setup.dataset.name, partition, segStart, segUntil)
val segment = new RowWriterSegment(keyRange, setup.schema)
logger.debug(s"Created new segment $segment for encoding...")
// Group rows into chunk sized bytes and add to segment
segmentRowsIt.grouped(setup.dataset.options.chunkSize).foreach { chunkRowsIt =>
val chunkRows = chunkRowsIt.toSeq
segment.addRowsAsChunk(chunkRows)
}
segment
}.take(numSegments)
}
def reproject[K: TypedFieldExtractor](memTable: MemTable, setup: IngestionSetup, version: Int):
Future[Seq[String]] = {
implicit val helper = setup.helper[K]
val datasetName = setup.dataset.name
val projection = RichProjection(setup.dataset, setup.schema)
val segments = chunkize(memTable.readAllRows[K](datasetName, version, Locked), setup)
val segmentTasks: Seq[Future[String]] = segments.map { segment =>
for { resp <- columnStore.appendSegment(projection, segment, version) if resp == Success }
yield {
logger.debug(s"Finished merging segment ${segment.keyRange}, version $version...")
memTable.removeRows(segment.keyRange, version)
logger.debug(s"Removed rows for segment $segment from Locked table...")
// Return useful info about each successful reprojection
(segment.keyRange.partition, segment.keyRange.start).toString
}
}.toSeq
Future.sequence(segmentTasks)
}
}
| YanjieGao/FiloDB | core/src/main/scala/filodb.core/reprojector/Reprojector.scala | Scala | apache-2.0 | 5,240 |
/**
* Copyright 2015 Peter Nerg
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dmonix.akka.persistence
import com.typesafe.config.ConfigFactory
import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.WordSpecLike
object PersistenceSuiteTrait {
def journalId() = "dummy-journal"
def snapStoreId() = "dummy-snapshot-store"
def config() = ConfigFactory.parseString(
"""akka.loggers = [akka.testkit.TestEventListener] # makes both log-snooping and logging work
akka.loglevel = "DEBUG"
akka.persistence.journal.plugin = "dummy-journal"
akka.persistence.snapshot-store.plugin = "dummy-snapshot-store"
dummy-journal {
class = "org.dmonix.akka.persistence.JournalPlugin"
plugin-dispatcher = "akka.actor.default-dispatcher"
}
dummy-snapshot-store {
class = "org.dmonix.akka.persistence.SnapshotStorePlugin"
plugin-dispatcher = "akka.persistence.dispatchers.default-plugin-dispatcher"
}
akka.actor.debug.receive = on""")
}
| pnerg/akka-persistence-mock | src/test/scala/org/dmonix/akka/persistence/PersistenceSuiteTrait.scala | Scala | apache-2.0 | 1,691 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.backend
import ai.h2o.sparkling.H2OConf
/**
* Shared configuration independent on used backend
*/
trait SharedBackendConfExtensions {
self: H2OConf =>
import SharedBackendConf._
private[backend] def getFileProperties: Seq[(String, _, _, _)] =
Seq(PROP_JKS, PROP_LOGIN_CONF, PROP_SSL_CONF)
protected def setBackendClusterMode(backendClusterMode: String) = {
set(PROP_BACKEND_CLUSTER_MODE._1, backendClusterMode)
}
private[sparkling] def getClientLanguage: String = sparkConf.get(PROP_CLIENT_LANGUAGE._1, PROP_CLIENT_LANGUAGE._2)
}
| h2oai/sparkling-water | core/src/main/scala/ai/h2o/sparkling/backend/SharedBackendConfExtensions.scala | Scala | apache-2.0 | 1,388 |
package lms
import sql.Scanner
import com.oracle.truffle.api._
import com.oracle.truffle.api.frame._
import com.oracle.truffle.api.nodes._
import com.oracle.truffle.api.nodes.Node._
import org.scalatest._
trait Types extends Base {
implicit object unitTyp extends Typ[Unit]{
def slotKind = FrameSlotKind.Object
}
implicit object intTyp extends Typ[Int] {
def slotKind = FrameSlotKind.Int
}
implicit object boolTyp extends Typ[Boolean] {
def slotKind = FrameSlotKind.Boolean
}
implicit object doubleTyp extends Typ[Double] {
def slotKind = FrameSlotKind.Double
}
implicit object stringTyp extends Typ[String] {
def slotKind = FrameSlotKind.Object
}
implicit def arrayTyp[T:Typ] = new Typ[Array[T]] {
def slotKind = FrameSlotKind.Object
}
implicit def scannerTyp = new Typ[Scanner] {
def slotKind = FrameSlotKind.Object
}
}
| RomanTsegelskyi/lms-truffle | src/main/scala/lms/Types.scala | Scala | gpl-2.0 | 895 |
package com.karasiq.tls
import java.nio.channels.SocketChannel
import org.bouncycastle.crypto.tls.{AlertDescription, TlsFatalAlert}
import scala.concurrent.Promise
import scala.util.control.Exception
trait TLSConnectionWrapper {
protected val handshake: Promise[Boolean] = Promise()
protected def onError(message: String, exc: Throwable): Unit = { }
protected def onInfo(message: String): Unit = { }
protected def wrapException[T](message: String)(f: ⇒ T): T = {
val catcher = Exception.allCatch.withApply { exc ⇒
handshake.tryFailure(exc)
if (exc.isInstanceOf[TlsFatalAlert]) throw exc
else {
onError(message, exc)
throw new TlsFatalAlert(AlertDescription.internal_error, new TLSException(message, exc))
}
}
catcher(f)
}
def apply(connection: SocketChannel): SocketChannel
}
| Karasiq/cryptoutils | src/main/scala/com/karasiq/tls/TLSConnectionWrapper.scala | Scala | mit | 852 |
package coursier.publish
import coursier.core.{Configuration, ModuleName, Organization, Type}
import scala.collection.mutable
import scala.xml.{Elem, Node, NodeSeq}
object Pom {
// TODO Check https://github.com/lihaoyi/mill/pull/144/files
final case class License(name: String, url: String)
object License {
def apache2: License =
License("Apache-2.0", "https://spdx.org/licenses/Apache-2.0.html")
lazy val all = Seq(
apache2
)
lazy val map = all.map(l => l.name -> l).toMap
}
final case class Scm(
url: String,
connection: String,
developerConnection: String
)
object Scm {
def gitHub(org: String, project: String): Scm =
Scm(
s"https://github.com/$org/$project.git",
s"scm:git:github.com/$org/$project.git",
s"scm:git:[email protected]:$org/$project.git"
)
}
// FIXME What's mandatory? What's not?
final case class Developer(
id: String,
name: String,
url: String,
mail: Option[String]
)
def create(
organization: Organization,
moduleName: ModuleName,
version: String,
packaging: Option[Type] = None,
description: Option[String] = None,
url: Option[String] = None,
name: Option[String] = None,
// TODO Accept full-fledged coursier.Dependency
dependencies: Seq[(Organization, ModuleName, String, Option[Configuration])] = Nil,
license: Option[License] = None,
scm: Option[Scm] = None,
developers: Seq[Developer] = Nil
): String = {
val nodes = new mutable.ListBuffer[NodeSeq]
nodes ++= Seq(
<modelVersion>4.0.0</modelVersion>,
<groupId>{organization.value}</groupId>,
<artifactId>{moduleName.value}</artifactId>,
<version>{version}</version>
)
for (p <- packaging)
nodes += <packaging>{p.value}</packaging>
for (u <- url)
nodes += <url>{u}</url>
for (d <- description)
nodes += <description>{d}</description>
for (n <- name)
nodes += <name>{n}</name>
nodes += {
val urlNodeOpt = url.fold[NodeSeq](Nil)(u => <url>{u}</url>)
<organization>
<name>{organization.value}</name>
{urlNodeOpt}
</organization>
}
for (l <- license)
nodes +=
<licenses>
<license>
<name>{l.name}</name>
<url>{l.url}</url>
<distribution>repo</distribution>
</license>
</licenses>
for (s <- scm)
nodes +=
<scm>
<url>{s.url}</url>
<connection>{s.connection}</connection>
<developerConnection>{s.developerConnection}</developerConnection>
</scm>
if (developers.nonEmpty)
nodes +=
<developers>
{
developers.map { d =>
<developer>
<id>{d.id}</id>
<name>{d.name}</name>
<url>{d.url}</url>
</developer>
// + optional mail
}
}
</developers>
if (dependencies.nonEmpty)
nodes +=
<dependencies>
{
dependencies.map {
case (depOrg, depName, ver, confOpt) =>
<dependency>
<groupId>{depOrg.value}</groupId>
<artifactId>{depName.value}</artifactId>
<version>{ver}</version>
{confOpt.fold[NodeSeq](Nil)(c => <scope>{c}</scope>)}
</dependency>
}
}
</dependencies>
print(
<project xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0">
{nodes.result()}
</project>
)
}
private def addOrUpdate(content: Elem, label: String)(update: Option[Node] => Node): Elem = {
// assumes there's at most one child with this label…
val found = content.child.exists(_.label == label)
val updatedChildren =
if (found)
content.child.map {
case n if n.label == label =>
update(Some(n))
case n =>
n
}
else
content.child :+ update(None)
content.copy(
child = updatedChildren
)
}
def overrideOrganization(organization: Organization, content: Elem): Elem = {
val content0 = addOrUpdate(content, "groupId") { _ =>
<groupId>{organization.value}</groupId>
}
addOrUpdate(content0, "organization") {
case Some(elem0: Elem) =>
addOrUpdate(elem0, "name") { _ =>
<name>{organization.value}</name>
}
case _ =>
<organization>
<name>{organization.value}</name>
</organization>
}
}
def overrideModuleName(name: ModuleName, content: Elem): Elem =
addOrUpdate(content, "artifactId") { _ =>
<artifactId>{name.value}</artifactId>
}
def overrideVersion(version: String, content: Elem): Elem =
addOrUpdate(content, "version") { _ =>
<version>{version}</version>
}
def overrideHomepage(url: String, content: Elem): Elem = {
val content0 = addOrUpdate(content, "url") { _ =>
<url>{url}</url>
}
addOrUpdate(content0, "organization") {
case Some(elem0: Elem) =>
addOrUpdate(elem0, "url") { _ =>
<url>{url}</url>
}
case _ =>
<organization>
<url>{url}</url>
</organization>
}
}
def overrideScm(domain: String, path: String, content: Elem): Elem =
addOrUpdate(content, "scm") {
case Some(elem0: Elem) =>
var elem1 = addOrUpdate(elem0, "url") { _ =>
<url>https://{domain}/{path}</url>
}
elem1 = addOrUpdate(elem1, "connection") { _ =>
<connection>scm:git:https://{domain}/{path}.git</connection>
}
addOrUpdate(elem1, "developerConnection") { _ =>
<developerConnection>scm:git:git@{domain}:{path}.git</developerConnection>
}
case _ =>
<scm>
<url>https://{domain}/{path}</url>
<connection>scm:git:https://{domain}/{path}.git</connection>
<developerConnection>scm:git:git@{domain}:{path}.git</developerConnection>
</scm>
}
def overrideDistributionManagementRepository(
id: String,
name: String,
url: String,
content: Elem
): Elem =
addOrUpdate(content, "distributionManagement") {
case Some(elem0: Elem) =>
addOrUpdate(elem0, "repository") { _ =>
<repository>
<id>{id}</id>
<name>{name}</name>
<url>{url}</url>
</repository>
}
case _ =>
<distributionManagement>
<repository>
<id>{id}</id>
<name>{name}</name>
<url>{url}</url>
</repository>
</distributionManagement>
}
def overrideLicenses(licenses: Seq[License], content: Elem): Elem =
addOrUpdate(content, "licenses") { _ =>
<licenses>{
licenses.map { l =>
<license>
<name>{l.name}</name>
<url>{l.url}</url>
<distribution>repo</distribution>
</license>
}
}</licenses>
}
def overrideDevelopers(developers: Seq[Developer], content: Elem): Elem =
addOrUpdate(content, "developers") { _ =>
<developers>{
developers.map { dev =>
<developer>
<id>{dev.id}</id>
<name>{dev.name}</name>
{
dev.mail match {
case None =>
<email/>
case Some(mail) =>
<email>{mail}</email>
}
}
<url>{dev.url}</url>
</developer>
}
}</developers>
}
def transformDependency(
content: Elem,
from: (Organization, ModuleName),
to: (Organization, ModuleName)
): Elem = {
def adjustGroupArtifactIds(n: Elem): Elem = {
val orgOpt = n.child.collectFirst {
case n if n.label == "groupId" => Organization(n.text)
}
val nameOpt = n.child.collectFirst {
case n if n.label == "artifactId" => ModuleName(n.text)
}
if (orgOpt.contains(from._1) && nameOpt.contains(from._2))
n.copy(
child = n.child.map {
case n if n.label == "groupId" =>
<groupId>{to._1.value}</groupId>
case n if n.label == "artifactId" =>
<artifactId>{to._2.value}</artifactId>
case n => n
}
)
else
n
}
// TODO Adjust dependencyManagement section too?
content.copy(
child = content.child.map {
case n: Elem if n.label == "dependencies" =>
n.copy(
child = n.child.map {
case n: Elem if n.label == "dependency" =>
val n0 = adjustGroupArtifactIds(n)
n0.copy(
child = n0.child.map {
case n: Elem if n.label == "exclusions" =>
n.copy(
child = n.child.map {
case n: Elem if n.label == "exclusion" =>
adjustGroupArtifactIds(n)
case n => n
}
)
case n => n
}
)
case n => n
}
)
case n => n
}
)
}
def transformDependencyVersion(
content: Elem,
org: Organization,
name: ModuleName,
fromVersion: String,
toVersion: String
): Elem = {
def adjustVersion(n: Elem): Elem = {
val orgOpt = n.child.collectFirst {
case n if n.label == "groupId" => Organization(n.text)
}
val nameOpt = n.child.collectFirst {
case n if n.label == "artifactId" => ModuleName(n.text)
}
if (orgOpt.contains(org) && nameOpt.contains(name))
n.copy(
child = n.child.map {
case n if n.label == "version" && n.text.trim == fromVersion =>
<version>{toVersion}</version>
case n => n
}
)
else
n
}
// TODO Adjust dependencyManagement section too?
content.copy(
child = content.child.map {
case n: Elem if n.label == "dependencies" =>
n.copy(
child = n.child.map {
case n: Elem if n.label == "dependency" =>
adjustVersion(n)
case n => n
}
)
case n => n
}
)
}
def print(elem: Elem): String = {
val printer = new scala.xml.PrettyPrinter(Int.MaxValue, 2)
"""<?xml version="1.0" encoding="UTF-8"?>""" + '\n' + printer.format(elem)
}
}
| alexarchambault/coursier | modules/publish/src/main/scala/coursier/publish/Pom.scala | Scala | apache-2.0 | 10,776 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
trait AllSuiteProp extends MethodSuiteProp with FunctionSuiteProp {
override def examples =
Table[Suite with FixtureServices](
"suite",
suite,
fixtureSuite,
spec,
fixtureSpec,
junit3Suite,
junitSuite,
testngSuite,
funSuite,
fixtureFunSuite,
funSpec,
fixtureFunSpec,
featureSpec,
fixtureFeatureSpec,
flatSpec,
fixtureFlatSpec,
freeSpec,
fixtureFreeSpec,
propSpec,
fixturePropSpec,
wordSpec,
fixtureWordSpec,
pathFreeSpec,
pathFunSpec
)
}
| travisbrown/scalatest | src/test/scala/org/scalatest/AllSuiteProp.scala | Scala | apache-2.0 | 1,224 |
package org.genivi.sota.resolver.test.random
import akka.http.scaladsl.model.StatusCodes
import cats.state.{State, StateT}
import org.genivi.sota.resolver.resolve.ResolveFunctions
import org.genivi.sota.resolver.filters.{And, FilterAST, True}
import org.genivi.sota.resolver.filters.Filter
import FilterAST._
import org.genivi.sota.resolver.test._
import org.scalacheck.Gen
import Misc.{function0Instance, lift, monGen}
import org.genivi.sota.data.Device.DeviceId
import org.genivi.sota.data.{Device, PackageId, Uuid}
import scala.annotation.tailrec
import scala.collection.immutable.Iterable
import scala.concurrent.ExecutionContext
import cats.syntax.show._
import Device._
import org.genivi.sota.resolver.components.Component
import org.genivi.sota.resolver.db.Package
sealed trait Query
final case object ListVehicles extends Query
final case class ListVehiclesFor(cmp: Component) extends Query
final case class ListPackagesOnVehicle(veh: Uuid) extends Query
final case class ListPackagesFor(flt: Filter) extends Query
final case object ListFilters extends Query
final case class ListFiltersFor(pak: Package) extends Query
final case class Resolve(id: PackageId) extends Query
final case object ListComponents extends Query
final case class ListComponentsFor(veh: Uuid) extends Query
object Query extends
VehicleRequestsHttp with
PackageRequestsHttp with
FilterRequestsHttp with
ComponentRequestsHttp with
PackageFilterRequestsHttp with
ResolveRequestsHttp {
def semQueries(qrs: List[Query])
(implicit ec: ExecutionContext): State[RawStore, List[Semantics]] = {
@tailrec def go(qrs0: List[Query], s0: RawStore, acc: List[Semantics]): (RawStore, List[Semantics]) =
qrs0 match {
case Nil => (s0, acc.reverse)
case (qr :: qrs1) =>
val (s1, r) = semQuery(qr).run(s0).run
go(qrs1, s1, r :: acc)
}
State.get.flatMap { s0 =>
val (s1, sems) = go(qrs, s0, List())
State.set(s1).flatMap(_ => State.pure(sems))
}
}
def semQuery(q: Query): State[RawStore, Semantics] = q match {
case ListVehicles =>
State.get map (s => Semantics(Some(q),
listVehicles, StatusCodes.OK,
SuccessVehicles(s.devices.keySet)))
case ListVehiclesFor(cmp) =>
State.get map (s => Semantics(Some(q),
listVehiclesHaving(cmp), StatusCodes.OK,
SuccessVehicles(s.vehiclesHaving(cmp))))
case ListComponents =>
State.get map (s => Semantics(Some(q),
listComponents, StatusCodes.OK,
SuccessComponents(s.components)))
case ListComponentsFor(veh) =>
State.get map (s => Semantics(Some(q),
listComponentsOnVehicle(veh), StatusCodes.OK,
SuccessPartNumbers(s.devices(veh)._2.map(_.partNumber))))
case ListPackagesOnVehicle(veh) =>
State.get map (s => Semantics(Some(q),
listPackagesOnVehicle(veh), StatusCodes.OK,
SuccessPackageIds(s.devices(veh)._1.map(_.id))))
case ListPackagesFor(flt) =>
State.get map (s => Semantics(Some(q),
listPackagesForFilter(flt), StatusCodes.OK,
SuccessPackages(s.packagesHaving(flt))))
case ListFilters =>
State.get map (s => Semantics(Some(q),
listFilters, StatusCodes.OK,
SuccessFilters(s.filters)))
case ListFiltersFor(pak) =>
State.get map (s => Semantics(Some(q),
listFiltersForPackage(pak), StatusCodes.OK,
SuccessFilters(s.packages(pak))))
case Resolve(pkgId) =>
State.get map (s => Semantics(Some(q),
resolve2(defaultNs, pkgId), StatusCodes.OK,
SuccessVehicleMap(vehicleMap(s, pkgId))))
}
private def vehicleMap(s: RawStore, pkgId: PackageId): Map[Uuid, List[PackageId]] = {
// An AST for each filter associated to the given package.
val filters: Set[FilterAST] =
for (
flt <- s.lookupFilters(pkgId).get
) yield parseValidFilter(flt.expression)
// An AST AND-ing the filters associated to the given package.
val expr: FilterAST =
filters.toList.foldLeft[FilterAST](True)(And)
// Apply the resulting filter to select vehicles.
val devIds: Iterable[Uuid] = for {
(dev, (paks, comps)) <- s.devices
pakIds = paks.map(_.id).toSeq
compIds = comps.map(_.partNumber).toSeq
// TODO This will not work
entry2 = (DeviceId(dev.show), (pakIds, compIds))
if query(expr)(entry2)
} yield dev
ResolveFunctions.makeFakeDependencyMap(pkgId, devIds.toSeq)
}
// scalastyle:off magic.number
def genQuery: StateT[Gen, RawStore, Query] =
for {
s <- StateT.stateTMonadState[Gen, RawStore].get
vehs <- Store.numberOfVehicles
pkgs <- Store.numberOfPackages
cmps <- Store.numberOfComponents
flts <- Store.numberOfFilters
vcomp <- Store.numberOfVehiclesWithSomeComponent
vpaks <- Store.numberOfVehiclesWithSomePackage
pfilt <- Store.numberOfPackagesWithSomeFilter
qry <- lift(Gen.frequency(
(10, Gen.const(ListVehicles)),
(10, Gen.const(ListComponents)),
( 5, Gen.const(ListFilters)),
(if (vehs > 0) 10 else 0, Gen.oneOf(
Store.pickVehicle.runA(s).map(ListPackagesOnVehicle),
Store.pickVehicle.runA(s).map(ListComponentsFor),
Store.pickVehicle.runA(s).map(ListPackagesOnVehicle),
Store.pickVehicle.runA(s).map(ListComponentsFor)
)),
(if (pfilt > 0) 10 else 0, Gen.oneOf(
Store.pickPackageWithFilter.runA(s) map { case (pkg, flt) => ListPackagesFor(flt) },
Store.pickPackageWithFilter.runA(s) map { case (pkg, flt) => ListFiltersFor(pkg) }
)),
(if (vcomp > 0) 10 else 0,
Store.pickVehicleWithComponent.runA(s) map { case (veh, cmp) => ListVehiclesFor(cmp) }),
(if (pkgs > 0) 10 else 0,
Store.pickPackage.runA(s).map(pkg => Resolve(pkg.id)))
))
} yield qry
// scalastyle:on
def genQueries(n: Int)
(implicit ec: ExecutionContext): StateT[Gen, RawStore, List[Query]] = {
if (n < 1) throw new IllegalArgumentException
for {
q <- genQuery
qs <- if (n == 1) genQuery.map(List(_)) else genQueries(n - 1)
} yield q :: qs
}
}
| PDXostc/rvi_sota_server | external-resolver/src/test/scala/org/genivi/sota/resolver/test/random/Query.scala | Scala | mpl-2.0 | 6,400 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.stats
import org.junit.runner.RunWith
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Z3FrequencyTest extends Specification with StatTestHelper {
def createStat(precision: Int, observe: Boolean): Z3Frequency = {
val s = Stat(sft, Stat.Z3Frequency("geom", "dtg", TimePeriod.Week, precision))
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Z3Frequency]
}
def createStat(observe: Boolean = true): Z3Frequency = createStat(25, observe)
def toDate(string: String) = GeoToolsDateFormat.parseDateTime(string).toDate
def toGeom(string: String) = WKTUtils.read(string)
"FrequencyZ3 stat" should {
"work with geometries and dates" >> {
"be empty initially" >> {
val stat = createStat(observe = false)
stat.isEmpty must beTrue
stat.size mustEqual 0
}
"correctly bin values" >> {
val stat = createStat()
stat.isEmpty must beFalse
stat.size mustEqual 100
forall(0 until 100) { i =>
stat.count(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) must beBetween(1L, 6L)
}
}
"serialize and deserialize" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geomIndex mustEqual stat.geomIndex
unpacked.asInstanceOf[Z3Frequency].dtgIndex mustEqual stat.dtgIndex
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = createStat(observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geomIndex mustEqual stat.geomIndex
unpacked.asInstanceOf[Z3Frequency].dtgIndex mustEqual stat.dtgIndex
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = createStat()
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Z3Frequency]
unpacked.asInstanceOf[Z3Frequency].geomIndex mustEqual stat.geomIndex
unpacked.asInstanceOf[Z3Frequency].dtgIndex mustEqual stat.dtgIndex
unpacked.asInstanceOf[Z3Frequency].precision mustEqual stat.precision
unpacked.asInstanceOf[Z3Frequency].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"clear" >> {
val stat = createStat()
stat.clear()
stat.isEmpty must beTrue
stat.size mustEqual 0
forall(0 until 100) { i =>
stat.count(toGeom(s"POINT(-$i ${i / 2})"), toDate(f"2012-01-01T${i%24}%02d:00:00.000Z")) mustEqual 0
}
stat.count(toGeom("POINT(-180 -90)"), toDate("2012-01-01T00:00:00.000Z")) mustEqual 0
}
}
}
}
| nagavallia/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/Z3FrequencyTest.scala | Scala | apache-2.0 | 4,236 |
package codecheck.github
package events
import org.scalatest.FunSpec
import org.scalatest.Inside
import org.scalatest.Matchers
class GitHubEventSpec extends FunSpec with Matchers with Inside
with IssueEventJson
with PullRequestEventJson
with PullRequestReviewEventJson
with PushEventJson {
describe("GitHubEvent(issue, JValue)") {
val event = GitHubEvent("issues", issueEventJson)
it("should yield IssueEvent") {
event shouldBe a [IssueEvent]
}
describe("IssueEvent") {
inside(event) {
case e @ IssueEvent(name, _) =>
it("should have a name") {
assert(name === "issues")
}
it("should have an action") {
assert(e.action === models.IssueAction.opened)
}
it("should have an issue") {
e.issue shouldBe a [models.Issue]
}
describe("Issue") {
val issue = e.issue
it("should have a number") {
assert(issue.number === 2L)
}
it("should have a title") {
assert(issue.title === "Spelling error in the README file")
}
it("should have a state") {
assert(issue.state === models.IssueState.open)
}
it("should have a body") {
val exp = ""
assert(issue.body === exp)
}
}
}
}
}
describe("GitHubEvent(push, JValue)") {
val event = GitHubEvent("push", pushEventJson)
it("should yield PushEvent") {
event shouldBe a [PushEvent]
}
describe("Push") {
inside(event) {
case e @ PushEvent(name, _) =>
it("should have a name") {
assert(name === "push")
}
it("should have a ref") {
assert(e.ref === "refs/heads/changes")
}
it("should have a base ref") {
assert(e.base_ref === None)
}
it("should have a before") {
assert(e.before === "9049f1265b7d61be4a8904a9a27120d2064dab3b")
}
it("should have an after") {
assert(e.after === "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c")
}
it("should have a head commit") {
e.head_commit shouldBe a [PushCommit]
}
describe("PushCommit") {
val commit = e.head_commit
it("should have a id") {
assert(commit.id === "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c")
}
it("should have a message") {
assert(commit.message === "Update README.md")
}
it("should have a timestamp") {
assert(commit.timestamp === "2015-05-05T19:40:15-04:00")
}
it("should have a tree_id") {
assert(commit.tree_id === "f9d2a07e9488b91af2641b26b9407fe22a451433")
}
it("should have a comitter") {
commit.committer shouldBe a [models.User]
}
}
it("should have a repository") {
e.repository shouldBe a [models.Repository]
}
describe("Repository") {
val repo = e.repository
it("should have an id") {
assert(repo.id === 35129377)
}
it("should have a name") {
assert(repo.name === "public-repo")
}
it("should have a full_name") {
assert(repo.full_name === "baxterthehacker/public-repo")
}
it("should have a owner") {
repo.owner shouldBe a [models.User]
}
}
it("should have a pusher") {
e.pusher shouldBe a [models.User]
}
it("should have a sender") {
e.sender shouldBe a [models.User]
}
}
}
}
describe("GitHubEvent(pull_request, JValue)") {
val event = GitHubEvent("pull_request", pullRequestEventJson)
it("should yield PullRequestEvent") {
event shouldBe a [PullRequestEvent]
}
describe("PullRequest") {
inside(event) {
case e @ PullRequestEvent(name, _) =>
it("should have a name") {
assert(name === "pull_request")
}
it("should have a number") {
assert(e.number === 1L)
}
it("should have an action") {
assert(e.action === models.PullRequestAction.opened)
}
it("should have a pull request") {
e.pull_request shouldBe a [models.PullRequest]
}
describe("PullRequest") {
val pr = e.pull_request
it("should have a number") {
assert(pr.number === 1L)
}
it("should have a title") {
assert(pr.title === "Update the README with new information")
}
it("should have a state") {
assert(pr.state === models.IssueState.open)
}
it("should have a body") {
val exp = "This is a pretty simple change that we need to pull into master."
assert(pr.body === exp)
}
it("should have a head") {
pr.head shouldBe a [models.PullRequestRef]
}
describe("PullRequestRef") {
val head = pr.head
it("should have a label") {
assert(head.label === "baxterthehacker:changes")
}
it("should have a ref") {
assert(head.ref === "changes")
}
it("should have a sha") {
assert(head.sha === "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c")
}
it("should have a user") {
head.user shouldBe a [models.User]
}
it("should have a repo") {
head.repo.get shouldBe a [models.Repository]
}
}
it("should have a base") {
pr.base shouldBe a [models.PullRequestRef]
}
}
}
}
}
describe("GitHubEvent(pull_request_review, JValue)") {
val event = GitHubEvent("pull_request_review", pullRequestReviewEventJson)
it("should yield PullRequestReviewEvent") {
event shouldBe a [PullRequestReviewEvent]
}
describe("PullRequestReviewEvent") {
inside(event) {
case e @ PullRequestReviewEvent(name, _) =>
it("should have a name") {
assert(name === "pull_request_review")
}
it("should have an action") {
assert(e.action === models.PullRequestReviewAction.submitted)
}
it("should have a review") {
e.review shouldBe a [models.PullRequestReview]
}
describe("PullRequestReview") {
val review = e.review
it("should have an id") {
assert(review.id === 2626884L)
}
it("should have a state") {
assert(review.state === models.PullRequestReviewState.approved)
}
it("should have a body") {
val exp = "Looks great!"
assert(review.body === exp)
}
}
it("should have a pull request") {
e.pull_request shouldBe a [models.PullRequest]
}
describe("PullRequest") {
val pr = e.pull_request
it("should have a number") {
assert(pr.number === 8L)
}
it("should have a title") {
assert(pr.title === "Add a README description")
}
it("should have a state") {
assert(pr.state === models.IssueState.open)
}
it("should have a body") {
val exp = "Just a few more details"
assert(pr.body === exp)
}
it("should have a head") {
pr.head shouldBe a [models.PullRequestRef]
}
describe("PullRequestRef") {
val head = pr.head
it("should have a label") {
assert(head.label === "skalnik:patch-2")
}
it("should have a ref") {
assert(head.ref === "patch-2")
}
it("should have a sha") {
assert(head.sha === "b7a1f9c27caa4e03c14a88feb56e2d4f7500aa63")
}
it("should have a user") {
head.user shouldBe a [models.User]
}
it("should have a repo") {
head.repo.get shouldBe a [models.Repository]
}
}
it("should have a base") {
pr.base shouldBe a [models.PullRequestRef]
}
}
it("should have a repository") {
e.repository shouldBe a [models.Repository]
}
describe("Repository") {
val repo = e.repository
it("should have an id") {
assert(repo.id === 35129377L)
}
it("should have a name") {
assert(repo.name === "public-repo")
}
it("should have a full_name") {
assert(repo.full_name === "baxterthehacker/public-repo")
}
it("should have a url") {
assert(repo.url === "https://api.github.com/repos/baxterthehacker/public-repo")
}
}
it("should have a sender") {
e.sender shouldBe a [models.User]
}
describe("User") {
val user = e.sender
it("should have an id") {
assert(user.id === 6752317L)
}
it("should have a login") {
assert(user.login === "baxterthehacker")
}
it("should have a name") {
assert(user.name === None)
}
}
}
}
}
}
| code-check/github-api-scala | src/test/scala/events/GitHubEventSpec.scala | Scala | mit | 9,950 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
/**
* To be able to test UsersResponder, we need to be able to start UsersResponder isolated. Now the UsersResponder
* extend ResponderADM which messes up testing, as we cannot inject the TestActor system.
*/
package org.knora.webapi.responders.admin
import java.util.UUID
import akka.actor.Status.Failure
import akka.testkit.ImplicitSender
import com.typesafe.config.{Config, ConfigFactory}
import org.knora.webapi._
import org.knora.webapi.messages.admin.responder.groupsmessages.{GroupMembersGetRequestADM, GroupMembersGetResponseADM}
import org.knora.webapi.messages.admin.responder.projectsmessages.{ProjectAdminMembersGetRequestADM, ProjectAdminMembersGetResponseADM, ProjectIdentifierADM, ProjectMembersGetRequestADM, ProjectMembersGetResponseADM}
import org.knora.webapi.messages.admin.responder.usersmessages._
import org.knora.webapi.messages.v2.routing.authenticationmessages.KnoraPasswordCredentialsV2
import org.knora.webapi.routing.Authenticator
import org.knora.webapi.util.StringFormatter
import scala.concurrent.duration._
object UsersResponderADMSpec {
val config: Config = ConfigFactory.parseString(
"""
akka.loglevel = "DEBUG"
akka.stdout-loglevel = "DEBUG"
app.use-redis-cache = true
""".stripMargin)
}
/**
* This spec is used to test the messages received by the [[UsersResponderADM]] actor.
*/
class UsersResponderADMSpec extends CoreSpec(UsersResponderADMSpec.config) with ImplicitSender with Authenticator {
private val timeout = 5.seconds
private val rootUser = SharedTestDataADM.rootUser
private val anythingAdminUser = SharedTestDataADM.anythingAdminUser
private val normalUser = SharedTestDataADM.normalUser
private val incunabulaUser = SharedTestDataADM.incunabulaProjectAdminUser
private val imagesProject = SharedTestDataADM.imagesProject
private val imagesReviewerGroup = SharedTestDataADM.imagesReviewerGroup
implicit val stringFormatter: StringFormatter = StringFormatter.getGeneralInstance
"The UsersResponder " when {
"asked about all users" should {
"return a list if asked by SystemAdmin" in {
responderManager ! UsersGetRequestADM(requestingUser = rootUser)
val response = expectMsgType[UsersGetResponseADM](timeout)
response.users.nonEmpty should be (true)
response.users.size should be (18)
}
"return a list if asked by ProjectAdmin" in {
responderManager ! UsersGetRequestADM(requestingUser = anythingAdminUser)
val response = expectMsgType[UsersGetResponseADM](timeout)
response.users.nonEmpty should be (true)
response.users.size should be (18)
}
"return 'ForbiddenException' if asked by normal user'" in {
responderManager ! UsersGetRequestADM(requestingUser = normalUser)
expectMsg(Failure(ForbiddenException("ProjectAdmin or SystemAdmin permissions are required.")))
}
"not return the system and anonymous users" in {
responderManager ! UsersGetRequestADM(requestingUser = rootUser)
val response = expectMsgType[UsersGetResponseADM](timeout)
response.users.nonEmpty should be (true)
response.users.size should be (18)
response.users.count(_.id == KnoraSystemInstances.Users.AnonymousUser.id) should be (0)
response.users.count(_.id == KnoraSystemInstances.Users.SystemUser.id) should be (0)
}
}
"asked about an user identified by 'iri' " should {
"return a profile if the user (root user) is known" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeIri = Some(rootUser.id)),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Some(rootUser.ofType(UserInformationTypeADM.FULL)))
}
"return a profile if the user (incunabula user) is known" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeIri = Some(incunabulaUser.id)),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Some(incunabulaUser.ofType(UserInformationTypeADM.FULL)))
}
"return 'NotFoundException' when the user is unknown" in {
responderManager ! UserGetRequestADM(
identifier = UserIdentifierADM(maybeIri = Some("http://rdfh.ch/users/notexisting")),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Failure(NotFoundException(s"User 'http://rdfh.ch/users/notexisting' not found")))
}
"return 'None' when the user is unknown" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeIri = Some("http://rdfh.ch/users/notexisting")),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(None)
}
}
"asked about an user identified by 'email'" should {
"return a profile if the user (root user) is known" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeEmail = Some(rootUser.email)),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Some(rootUser.ofType(UserInformationTypeADM.FULL)))
}
"return a profile if the user (incunabula user) is known" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeEmail = Some(incunabulaUser.email)),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Some(incunabulaUser.ofType(UserInformationTypeADM.FULL)))
}
"return 'NotFoundException' when the user is unknown" in {
responderManager ! UserGetRequestADM(
identifier = UserIdentifierADM(maybeEmail = Some("[email protected]")),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Failure(NotFoundException(s"User '[email protected]' not found")))
}
"return 'None' when the user is unknown" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeEmail = Some("[email protected]")),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(None)
}
}
"asked about an user identified by 'username'" should {
"return a profile if the user (root user) is known" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeUsername = Some(rootUser.username)),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Some(rootUser.ofType(UserInformationTypeADM.FULL)))
}
"return a profile if the user (incunabula user) is known" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeUsername = Some(incunabulaUser.username)),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Some(incunabulaUser.ofType(UserInformationTypeADM.FULL)))
}
"return 'NotFoundException' when the user is unknown" in {
responderManager ! UserGetRequestADM(
identifier = UserIdentifierADM(maybeUsername = Some("userwrong")),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(Failure(NotFoundException(s"User 'userwrong' not found")))
}
"return 'None' when the user is unknown" in {
responderManager ! UserGetADM(
identifier = UserIdentifierADM(maybeUsername = Some("userwrong")),
userInformationTypeADM = UserInformationTypeADM.FULL,
requestingUser = KnoraSystemInstances.Users.SystemUser
)
expectMsg(None)
}
}
"asked to create a new user" should {
"CREATE the user and return it's profile if the supplied email is unique " in {
responderManager ! UserCreateRequestADM(
createRequest = CreateUserApiRequestADM(
username = "donald.duck",
email = "[email protected]",
givenName = "Donald",
familyName = "Duck",
password = "test",
status = true,
lang = "en",
systemAdmin = false
),
requestingUser = SharedTestDataADM.anonymousUser,
apiRequestID = UUID.randomUUID
)
val u = expectMsgType[UserOperationResponseADM](timeout).user
u.username shouldBe "donald.duck"
u.givenName shouldBe "Donald"
u.familyName shouldBe "Duck"
u.email shouldBe "[email protected]"
u.lang shouldBe "en"
}
"return a 'DuplicateValueException' if the supplied 'username' is not unique" in {
responderManager ! UserCreateRequestADM(
createRequest = CreateUserApiRequestADM(
username = "root",
email = "[email protected]",
givenName = "Donal",
familyName = "Duck",
password = "test",
status = true,
lang = "en",
systemAdmin = false
),
SharedTestDataADM.anonymousUser,
UUID.randomUUID
)
expectMsg(Failure(DuplicateValueException(s"User with the username: 'root' already exists")))
}
"return a 'DuplicateValueException' if the supplied 'email' is not unique" in {
responderManager ! UserCreateRequestADM(
createRequest = CreateUserApiRequestADM(
username = "root2",
email = "[email protected]",
givenName = "Donal",
familyName = "Duck",
password = "test",
status = true,
lang = "en",
systemAdmin = false
),
SharedTestDataADM.anonymousUser,
UUID.randomUUID
)
expectMsg(Failure(DuplicateValueException(s"User with the email: '[email protected]' already exists")))
}
}
"asked to update a user" should {
"UPDATE the user's basic information" in {
/* User information is updated by the user */
responderManager ! UserChangeBasicUserInformationRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(
email = None,
givenName = Some("Donald"),
familyName = None,
lang = None
),
requestingUser = SharedTestDataADM.normalUser,
UUID.randomUUID
)
val response1 = expectMsgType[UserOperationResponseADM](timeout)
response1.user.givenName should equal ("Donald")
/* User information is updated by a system admin */
responderManager ! UserChangeBasicUserInformationRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(
email = None,
givenName = None,
familyName = Some("Duck"),
lang = None
),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID
)
val response2 = expectMsgType[UserOperationResponseADM](timeout)
response2.user.familyName should equal ("Duck")
/* User information is updated by a system admin */
responderManager ! UserChangeBasicUserInformationRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(
email = None,
givenName = Some(SharedTestDataADM.normalUser.givenName),
familyName = Some(SharedTestDataADM.normalUser.familyName),
lang = None
),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID
)
val response3 = expectMsgType[UserOperationResponseADM](timeout)
response3.user.givenName should equal (SharedTestDataADM.normalUser.givenName)
response3.user.familyName should equal (SharedTestDataADM.normalUser.familyName)
}
"UPDATE the user's password (by himself)" in {
responderManager ! UserChangePasswordRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(
requesterPassword = Some("test"), // of the requesting user
newPassword = Some("test123456")
),
requestingUser = SharedTestDataADM.normalUser,
apiRequestID = UUID.randomUUID()
)
expectMsgType[UserOperationResponseADM](timeout)
// need to be able to authenticate credentials with new password
val resF = Authenticator.authenticateCredentialsV2(Some(KnoraPasswordCredentialsV2(UserIdentifierADM(maybeEmail = Some(normalUser.email)), "test123456")))(system, responderManager, executionContext)
resF map { res => assert(res) }
}
"UPDATE the user's password (by a system admin)" in {
responderManager ! UserChangePasswordRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(
requesterPassword = Some("test"), // of the requesting user
newPassword = Some("test654321")
),
requestingUser = SharedTestDataADM.rootUser,
apiRequestID = UUID.randomUUID()
)
expectMsgType[UserOperationResponseADM](timeout)
// need to be able to authenticate credentials with new password
val resF = Authenticator.authenticateCredentialsV2(Some(KnoraPasswordCredentialsV2(UserIdentifierADM(maybeEmail = Some(normalUser.email)), "test654321")))(system, responderManager, executionContext)
resF map { res => assert(res) }
}
"UPDATE the user's status, (deleting) making him inactive " in {
responderManager ! UserChangeStatusRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(status = Some(false)),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID()
)
val response1 = expectMsgType[UserOperationResponseADM](timeout)
response1.user.status should equal (false)
responderManager ! UserChangeStatusRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(status = Some(true)),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID()
)
val response2 = expectMsgType[UserOperationResponseADM](timeout)
response2.user.status should equal (true)
}
"UPDATE the user's system admin membership" in {
responderManager ! UserChangeSystemAdminMembershipStatusRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(systemAdmin = Some(true)),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID()
)
val response1 = expectMsgType[UserOperationResponseADM](timeout)
response1.user.isSystemAdmin should equal (true)
responderManager ! UserChangeSystemAdminMembershipStatusRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(systemAdmin = Some(false)),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID()
)
val response2 = expectMsgType[UserOperationResponseADM](timeout)
response2.user.permissions.isSystemAdmin should equal (false)
}
"return a 'ForbiddenException' if the user requesting update is not the user itself or system admin" in {
/* User information is updated by other normal user */
responderManager ! UserChangeBasicUserInformationRequestADM(
userIri = SharedTestDataADM.superUser.id,
changeUserRequest = ChangeUserApiRequestADM(
email = None,
givenName = Some("Donald"),
familyName = None,
lang = None
),
requestingUser = SharedTestDataADM.normalUser,
UUID.randomUUID
)
expectMsg(Failure(ForbiddenException("User information can only be changed by the user itself or a system administrator")))
/* Password is updated by other normal user */
responderManager ! UserChangePasswordRequestADM(
userIri = SharedTestDataADM.superUser.id,
changeUserRequest = ChangeUserApiRequestADM(
requesterPassword = Some("test"),
newPassword = Some("test123456")
),
requestingUser = SharedTestDataADM.normalUser,
UUID.randomUUID
)
expectMsg(Failure(ForbiddenException("User's password can only be changed by the user itself or a system admin.")))
/* Status is updated by other normal user */
responderManager ! UserChangeStatusRequestADM(
userIri = SharedTestDataADM.superUser.id,
changeUserRequest = ChangeUserApiRequestADM(status = Some(false)),
requestingUser = SharedTestDataADM.normalUser,
UUID.randomUUID
)
expectMsg(Failure(ForbiddenException("User's status can only be changed by the user itself or a system administrator")))
/* System admin group membership */
responderManager ! UserChangeSystemAdminMembershipStatusRequestADM(
userIri = SharedTestDataADM.normalUser.id,
changeUserRequest = ChangeUserApiRequestADM(systemAdmin = Some(true)),
requestingUser = SharedTestDataADM.normalUser,
UUID.randomUUID()
)
expectMsg(Failure(ForbiddenException("User's system admin membership can only be changed by a system administrator")))
}
"return 'BadRequest' if system user is requested to change" in {
responderManager ! UserChangeStatusRequestADM(
userIri = KnoraSystemInstances.Users.SystemUser.id,
changeUserRequest = ChangeUserApiRequestADM(status = Some(false)),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID()
)
expectMsg(Failure(BadRequestException("Changes to built-in users are not allowed.")))
}
"return 'BadRequest' if anonymous user is requested to change" in {
responderManager ! UserChangeStatusRequestADM(
userIri = KnoraSystemInstances.Users.AnonymousUser.id,
changeUserRequest = ChangeUserApiRequestADM(status = Some(false)),
requestingUser = SharedTestDataADM.superUser,
UUID.randomUUID()
)
expectMsg(Failure(BadRequestException("Changes to built-in users are not allowed.")))
}
"return 'BadRequest' if nothing would be changed during the update" in {
an [BadRequestException] should be thrownBy ChangeUserApiRequestADM(None, None, None, None, None, None, None, None)
}
}
"asked to update the user's project membership" should {
"ADD user to project" in {
responderManager ! UserProjectMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsBeforeUpdate = expectMsgType[UserProjectMembershipsGetResponseADM](timeout)
membershipsBeforeUpdate.projects should equal (Seq())
responderManager ! UserProjectMembershipAddRequestADM(normalUser.id, imagesProject.id, rootUser, UUID.randomUUID())
val membershipUpdateResponse = expectMsgType[UserOperationResponseADM](timeout)
responderManager ! UserProjectMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsAfterUpdate = expectMsgType[UserProjectMembershipsGetResponseADM](timeout)
membershipsAfterUpdate.projects should equal (Seq(imagesProject))
responderManager ! ProjectMembersGetRequestADM(
ProjectIdentifierADM(
maybeIri = Some(imagesProject.id)),
requestingUser = KnoraSystemInstances.Users.SystemUser
)
val received: ProjectMembersGetResponseADM = expectMsgType[ProjectMembersGetResponseADM](timeout)
received.members.map(_.id) should contain (normalUser.id)
}
"DELETE user from project" in {
responderManager ! UserProjectMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsBeforeUpdate = expectMsgType[UserProjectMembershipsGetResponseADM](timeout)
membershipsBeforeUpdate.projects should equal (Seq(imagesProject))
responderManager ! UserProjectMembershipRemoveRequestADM(normalUser.id, imagesProject.id, rootUser, UUID.randomUUID())
expectMsgType[UserOperationResponseADM](timeout)
responderManager ! UserProjectMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsAfterUpdate = expectMsgType[UserProjectMembershipsGetResponseADM](timeout)
membershipsAfterUpdate.projects should equal (Seq())
responderManager ! ProjectMembersGetRequestADM(
ProjectIdentifierADM(maybeIri = Some(imagesProject.id)),
requestingUser = rootUser
)
val received: ProjectMembersGetResponseADM = expectMsgType[ProjectMembersGetResponseADM](timeout)
received.members should not contain normalUser.ofType(UserInformationTypeADM.RESTRICTED)
}
"return a 'ForbiddenException' if the user requesting update is not the project or system admin" in {
/* User is added to a project by a normal user */
responderManager ! UserProjectMembershipAddRequestADM(normalUser.id, imagesProject.id, normalUser, UUID.randomUUID())
expectMsg(Failure(ForbiddenException("User's project membership can only be changed by a project or system administrator")))
/* User is removed from a project by a normal user */
responderManager ! UserProjectMembershipRemoveRequestADM(normalUser.id, imagesProject.id, normalUser, UUID.randomUUID())
expectMsg(Failure(ForbiddenException("User's project membership can only be changed by a project or system administrator")))
}
}
"asked to update the user's project admin group membership" should {
"ADD user to project admin group" in {
responderManager ! UserProjectAdminMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsBeforeUpdate = expectMsgType[UserProjectAdminMembershipsGetResponseADM](timeout)
membershipsBeforeUpdate.projects should equal (Seq())
responderManager ! UserProjectAdminMembershipAddRequestADM(normalUser.id, imagesProject.id, rootUser, UUID.randomUUID())
expectMsgType[UserOperationResponseADM](timeout)
responderManager ! UserProjectAdminMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsAfterUpdate = expectMsgType[UserProjectAdminMembershipsGetResponseADM](timeout)
membershipsAfterUpdate.projects should equal (Seq(imagesProject))
responderManager ! ProjectAdminMembersGetRequestADM(
ProjectIdentifierADM(maybeIri = Some(imagesProject.id)),
requestingUser = rootUser
)
val received: ProjectAdminMembersGetResponseADM = expectMsgType[ProjectAdminMembersGetResponseADM](timeout)
received.members should contain (normalUser.ofType(UserInformationTypeADM.RESTRICTED))
}
"DELETE user from project admin group" in {
responderManager ! UserProjectAdminMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsBeforeUpdate = expectMsgType[UserProjectAdminMembershipsGetResponseADM](timeout)
membershipsBeforeUpdate.projects should equal (Seq(imagesProject))
responderManager ! UserProjectAdminMembershipRemoveRequestADM(normalUser.id, imagesProject.id, rootUser, UUID.randomUUID())
expectMsgType[UserOperationResponseADM](timeout)
responderManager ! UserProjectAdminMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsAfterUpdate = expectMsgType[UserProjectAdminMembershipsGetResponseADM](timeout)
membershipsAfterUpdate.projects should equal (Seq())
responderManager ! ProjectAdminMembersGetRequestADM(
ProjectIdentifierADM(maybeIri = Some(imagesProject.id)),
requestingUser = rootUser
)
val received: ProjectAdminMembersGetResponseADM = expectMsgType[ProjectAdminMembersGetResponseADM](timeout)
received.members should not contain normalUser.ofType(UserInformationTypeADM.RESTRICTED)
}
"return a 'ForbiddenException' if the user requesting update is not the project or system admin" in {
/* User is added to a project by a normal user */
responderManager ! UserProjectAdminMembershipAddRequestADM(normalUser.id, imagesProject.id, normalUser, UUID.randomUUID())
expectMsg(Failure(ForbiddenException("User's project admin membership can only be changed by a project or system administrator")))
/* User is removed from a project by a normal user */
responderManager ! UserProjectAdminMembershipRemoveRequestADM(normalUser.id, imagesProject.id, normalUser, UUID.randomUUID())
expectMsg(Failure(ForbiddenException("User's project admin membership can only be changed by a project or system administrator")))
}
}
"asked to update the user's group membership" should {
"ADD user to group" in {
responderManager ! UserGroupMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsBeforeUpdate = expectMsgType[UserGroupMembershipsGetResponseADM](timeout)
membershipsBeforeUpdate.groups should equal (Seq())
responderManager ! UserGroupMembershipAddRequestADM(normalUser.id, imagesReviewerGroup.id, rootUser, UUID.randomUUID())
expectMsgType[UserOperationResponseADM](timeout)
responderManager ! UserGroupMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsAfterUpdate = expectMsgType[UserGroupMembershipsGetResponseADM](timeout)
membershipsAfterUpdate.groups.map(_.id) should equal (Seq(imagesReviewerGroup.id))
responderManager ! GroupMembersGetRequestADM(
groupIri = imagesReviewerGroup.id,
requestingUser = rootUser
)
val received: GroupMembersGetResponseADM = expectMsgType[GroupMembersGetResponseADM](timeout)
received.members.map(_.id) should contain (normalUser.id)
}
"DELETE user from group" in {
responderManager ! UserGroupMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsBeforeUpdate = expectMsgType[UserGroupMembershipsGetResponseADM](timeout)
membershipsBeforeUpdate.groups.map(_.id) should equal (Seq(imagesReviewerGroup.id))
responderManager ! UserGroupMembershipRemoveRequestADM(normalUser.id, imagesReviewerGroup.id, rootUser, UUID.randomUUID())
expectMsgType[UserOperationResponseADM](timeout)
responderManager ! UserGroupMembershipsGetRequestADM(normalUser.id, rootUser, UUID.randomUUID())
val membershipsAfterUpdate = expectMsgType[UserGroupMembershipsGetResponseADM](timeout)
membershipsAfterUpdate.groups should equal (Seq())
responderManager ! GroupMembersGetRequestADM(
groupIri = imagesReviewerGroup.id,
requestingUser = rootUser
)
val received: GroupMembersGetResponseADM = expectMsgType[GroupMembersGetResponseADM](timeout)
received.members.map(_.id) should not contain normalUser.id
}
"return a 'ForbiddenException' if the user requesting update is not the project or system admin" in {
/* User is added to a project by a normal user */
responderManager ! UserGroupMembershipAddRequestADM(normalUser.id, imagesReviewerGroup.id, normalUser, UUID.randomUUID())
expectMsg(Failure(ForbiddenException("User's group membership can only be changed by a project or system administrator")))
/* User is removed from a project by a normal user */
responderManager ! UserGroupMembershipRemoveRequestADM(normalUser.id, imagesReviewerGroup.id, normalUser, UUID.randomUUID())
expectMsg(Failure(ForbiddenException("User's group membership can only be changed by a project or system administrator")))
}
}
}
}
| musicEnfanthen/Knora | webapi/src/test/scala/org/knora/webapi/responders/admin/UsersResponderADMSpec.scala | Scala | agpl-3.0 | 33,935 |
package util.exception
case class ContentRenderException(msg: String, e: Option[Exception] = None) extends Exception(msg, e.orNull) | metaxmx/pm15 | app/util/exception/ContentRenderException.scala | Scala | apache-2.0 | 132 |
package epic.logo
import scala.collection.JavaConversions._
import java.util.Arrays
import breeze.linalg._
import breeze.math.MutableInnerProductModule
class Weights[W](var underlying: W, var scale : Double = 1.0)(implicit space: MutableInnerProductModule[W, Double]) {
import space._
var norm = calcNormSquared
private def calcNormSquared = (underlying dot underlying) * scale * scale
def checkNorm = {
val calcNorm = calcNormSquared
assert(NumUtils.approxEquals(this.`^2`, calcNorm, 1e-5))
}
def approxEquals(w : Weights[W]) = {
breeze.linalg.norm(compile - w.compile) < 1e-5
}
def compile: W = underlying * scale
override def toString() : String = {
scale + "*" + underlying
}
def *=(d : Double) = {
scale *= d
norm *= sq(d)
}
def *(fv : W) = {
scale * (underlying dot fv)
}
def +=(fv : W) = increment(fv, 1.0)
private def increment(fv: W, d: Double) = {
norm += sq(d / scale) * (fv dot fv)
norm += 2 * d * (fv dot underlying) / scale
axpy(d / scale, fv, underlying)
}
private final def sq(d : Double) = d * d
def increment(w : Weights[W], d : Double) = {
underlying += w.underlying * w.scale * d
}
def -=(fv : W) = {
increment(fv, -1.0)
}
def `^2` = norm
def zeroOut() = {
underlying = space.zeroLike.apply(underlying)
}
}
| langkilde/epic | src/main/scala/epic/logo/Weights.scala | Scala | apache-2.0 | 1,346 |
package org.joda.time.format
import java.util.Collection
import org.joda.time.DateTimeFieldType
import scala.collection.JavaConversions._
object ISODateTimeFormat {
def forFields(fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): DateTimeFormatter = {
if (fields == null || fields.size == 0) {
throw new IllegalArgumentException(
"The fields must not be null or empty")
}
val workingFields = fields.to[collection.mutable.HashSet]
val inputSize = workingFields.size
var reducedPrec = false
val bld = new DateTimeFormatterBuilder()
if (workingFields.contains(DateTimeFieldType.monthOfYear())) {
reducedPrec = dateByMonth(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.dayOfYear())) {
reducedPrec = dateByOrdinal(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.weekOfWeekyear())) {
reducedPrec = dateByWeek(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.dayOfMonth())) {
reducedPrec = dateByMonth(bld, workingFields, extended, strictISO)
} else if (workingFields.contains(DateTimeFieldType.dayOfWeek())) {
reducedPrec = dateByWeek(bld, workingFields, extended, strictISO)
} else if (workingFields.remove(DateTimeFieldType.year())) {
bld.append(Constants.ye)
reducedPrec = true
} else if (workingFields.remove(DateTimeFieldType.weekyear())) {
bld.append(Constants.we)
reducedPrec = true
}
val datePresent = workingFields.size < inputSize
time(bld, workingFields, extended, strictISO, reducedPrec, datePresent)
if (bld.canBuildFormatter() == false) {
throw new IllegalArgumentException(
"No valid format for fields: " + fields)
}
try {
fields.retainAll(workingFields)
} catch {
case ex: UnsupportedOperationException =>
}
bld.toFormatter()
}
private def dateByMonth(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): Boolean = {
var reducedPrec = false
if (fields.remove(DateTimeFieldType.year())) {
bld.append(Constants.ye)
if (fields.remove(DateTimeFieldType.monthOfYear())) {
if (fields.remove(DateTimeFieldType.dayOfMonth())) {
appendSeparator(bld, extended)
bld.appendMonthOfYear(2)
appendSeparator(bld, extended)
bld.appendDayOfMonth(2)
} else {
bld.appendLiteral('-')
bld.appendMonthOfYear(2)
reducedPrec = true
}
} else {
if (fields.remove(DateTimeFieldType.dayOfMonth())) {
checkNotStrictISO(fields, strictISO)
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendDayOfMonth(2)
} else {
reducedPrec = true
}
}
} else if (fields.remove(DateTimeFieldType.monthOfYear())) {
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendMonthOfYear(2)
if (fields.remove(DateTimeFieldType.dayOfMonth())) {
appendSeparator(bld, extended)
bld.appendDayOfMonth(2)
} else {
reducedPrec = true
}
} else if (fields.remove(DateTimeFieldType.dayOfMonth())) {
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendLiteral('-')
bld.appendDayOfMonth(2)
}
reducedPrec
}
private def dateByOrdinal(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): Boolean = {
var reducedPrec = false
if (fields.remove(DateTimeFieldType.year())) {
bld.append(Constants.ye)
if (fields.remove(DateTimeFieldType.dayOfYear())) {
appendSeparator(bld, extended)
bld.appendDayOfYear(3)
} else {
reducedPrec = true
}
} else if (fields.remove(DateTimeFieldType.dayOfYear())) {
bld.appendLiteral('-')
bld.appendDayOfYear(3)
}
reducedPrec
}
private def dateByWeek(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean): Boolean = {
var reducedPrec = false
if (fields.remove(DateTimeFieldType.weekyear())) {
bld.append(Constants.we)
if (fields.remove(DateTimeFieldType.weekOfWeekyear())) {
appendSeparator(bld, extended)
bld.appendLiteral('W')
bld.appendWeekOfWeekyear(2)
if (fields.remove(DateTimeFieldType.dayOfWeek())) {
appendSeparator(bld, extended)
bld.appendDayOfWeek(1)
} else {
reducedPrec = true
}
} else {
if (fields.remove(DateTimeFieldType.dayOfWeek())) {
checkNotStrictISO(fields, strictISO)
appendSeparator(bld, extended)
bld.appendLiteral('W')
bld.appendLiteral('-')
bld.appendDayOfWeek(1)
} else {
reducedPrec = true
}
}
} else if (fields.remove(DateTimeFieldType.weekOfWeekyear())) {
bld.appendLiteral('-')
bld.appendLiteral('W')
bld.appendWeekOfWeekyear(2)
if (fields.remove(DateTimeFieldType.dayOfWeek())) {
appendSeparator(bld, extended)
bld.appendDayOfWeek(1)
} else {
reducedPrec = true
}
} else if (fields.remove(DateTimeFieldType.dayOfWeek())) {
bld.appendLiteral('-')
bld.appendLiteral('W')
bld.appendLiteral('-')
bld.appendDayOfWeek(1)
}
reducedPrec
}
private def time(bld: DateTimeFormatterBuilder,
fields: Collection[DateTimeFieldType],
extended: Boolean,
strictISO: Boolean,
reducedPrec: Boolean,
datePresent: Boolean) {
val hour = fields.remove(DateTimeFieldType.hourOfDay())
val minute = fields.remove(DateTimeFieldType.minuteOfHour())
val second = fields.remove(DateTimeFieldType.secondOfMinute())
val milli = fields.remove(DateTimeFieldType.millisOfSecond())
if (!hour && !minute && !second && !milli) {
return
}
if (hour || minute || second || milli) {
if (strictISO && reducedPrec) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields because Date was reduced precision: " +
fields)
}
if (datePresent) {
bld.appendLiteral('T')
}
}
if (hour && minute && second || (hour && !second && !milli)) {} else {
if (strictISO && datePresent) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields because Time was truncated: " +
fields)
}
if (!hour && (minute && second || (minute && !milli) || second)) {} else {
if (strictISO) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields: " + fields)
}
}
}
if (hour) {
bld.appendHourOfDay(2)
} else if (minute || second || milli) {
bld.appendLiteral('-')
}
if (extended && hour && minute) {
bld.appendLiteral(':')
}
if (minute) {
bld.appendMinuteOfHour(2)
} else if (second || milli) {
bld.appendLiteral('-')
}
if (extended && minute && second) {
bld.appendLiteral(':')
}
if (second) {
bld.appendSecondOfMinute(2)
} else if (milli) {
bld.appendLiteral('-')
}
if (milli) {
bld.appendLiteral('.')
bld.appendMillisOfSecond(3)
}
}
private def checkNotStrictISO(fields: Collection[DateTimeFieldType],
strictISO: Boolean) {
if (strictISO) {
throw new IllegalArgumentException(
"No valid ISO8601 format for fields: " + fields)
}
}
private def appendSeparator(bld: DateTimeFormatterBuilder,
extended: Boolean) {
if (extended) {
bld.appendLiteral('-')
}
}
def dateParser(): DateTimeFormatter = Constants.dp
def localDateParser(): DateTimeFormatter = Constants.ldp
def dateElementParser(): DateTimeFormatter = Constants.dpe
def timeParser(): DateTimeFormatter = Constants.tp
def localTimeParser(): DateTimeFormatter = Constants.ltp
def timeElementParser(): DateTimeFormatter = Constants.tpe
def dateTimeParser(): DateTimeFormatter = Constants.dtp
def dateOptionalTimeParser(): DateTimeFormatter = Constants.dotp
def localDateOptionalTimeParser(): DateTimeFormatter = Constants.ldotp
def date(): DateTimeFormatter = yearMonthDay()
def time(): DateTimeFormatter = Constants.t
def timeNoMillis(): DateTimeFormatter = Constants.tx
def tTime(): DateTimeFormatter = Constants.tt
def tTimeNoMillis(): DateTimeFormatter = Constants.ttx
def dateTime(): DateTimeFormatter = Constants.dt
def dateTimeNoMillis(): DateTimeFormatter = Constants.dtx
def ordinalDate(): DateTimeFormatter = Constants.od
def ordinalDateTime(): DateTimeFormatter = Constants.odt
def ordinalDateTimeNoMillis(): DateTimeFormatter = Constants.odtx
def weekDate(): DateTimeFormatter = Constants.wwd
def weekDateTime(): DateTimeFormatter = Constants.wdt
def weekDateTimeNoMillis(): DateTimeFormatter = Constants.wdtx
def basicDate(): DateTimeFormatter = Constants.bd
def basicTime(): DateTimeFormatter = Constants.bt
def basicTimeNoMillis(): DateTimeFormatter = Constants.btx
def basicTTime(): DateTimeFormatter = Constants.btt
def basicTTimeNoMillis(): DateTimeFormatter = Constants.bttx
def basicDateTime(): DateTimeFormatter = Constants.bdt
def basicDateTimeNoMillis(): DateTimeFormatter = Constants.bdtx
def basicOrdinalDate(): DateTimeFormatter = Constants.bod
def basicOrdinalDateTime(): DateTimeFormatter = Constants.bodt
def basicOrdinalDateTimeNoMillis(): DateTimeFormatter = Constants.bodtx
def basicWeekDate(): DateTimeFormatter = Constants.bwd
def basicWeekDateTime(): DateTimeFormatter = Constants.bwdt
def basicWeekDateTimeNoMillis(): DateTimeFormatter = Constants.bwdtx
def year(): DateTimeFormatter = Constants.ye
def yearMonth(): DateTimeFormatter = Constants.ym
def yearMonthDay(): DateTimeFormatter = Constants.ymd
def weekyear(): DateTimeFormatter = Constants.we
def weekyearWeek(): DateTimeFormatter = Constants.ww
def weekyearWeekDay(): DateTimeFormatter = Constants.wwd
def hour(): DateTimeFormatter = Constants.hde
def hourMinute(): DateTimeFormatter = Constants.hm
def hourMinuteSecond(): DateTimeFormatter = Constants.hms
def hourMinuteSecondMillis(): DateTimeFormatter = Constants.hmsl
def hourMinuteSecondFraction(): DateTimeFormatter = Constants.hmsf
def dateHour(): DateTimeFormatter = Constants.dh
def dateHourMinute(): DateTimeFormatter = Constants.dhm
def dateHourMinuteSecond(): DateTimeFormatter = Constants.dhms
def dateHourMinuteSecondMillis(): DateTimeFormatter = Constants.dhmsl
def dateHourMinuteSecondFraction(): DateTimeFormatter = Constants.dhmsf
private object Constants {
val ye: DateTimeFormatter = yearElement()
val mye: DateTimeFormatter = monthElement()
val dme: DateTimeFormatter = dayOfMonthElement()
val we: DateTimeFormatter = weekyearElement()
val wwe: DateTimeFormatter = weekElement()
val dwe: DateTimeFormatter = dayOfWeekElement()
val dye: DateTimeFormatter = dayOfYearElement()
val hde: DateTimeFormatter = hourElement()
val mhe: DateTimeFormatter = minuteElement()
val sme: DateTimeFormatter = secondElement()
val fse: DateTimeFormatter = fractionElement()
val ze: DateTimeFormatter = offsetElement()
val lte: DateTimeFormatter = literalTElement()
val ym: DateTimeFormatter = yearMonth()
val ymd: DateTimeFormatter = yearMonthDay()
val ww: DateTimeFormatter = weekyearWeek()
val wwd: DateTimeFormatter = weekyearWeekDay()
val hm: DateTimeFormatter = hourMinute()
val hms: DateTimeFormatter = hourMinuteSecond()
val hmsl: DateTimeFormatter = hourMinuteSecondMillis()
val hmsf: DateTimeFormatter = hourMinuteSecondFraction()
val dh: DateTimeFormatter = dateHour()
val dhm: DateTimeFormatter = dateHourMinute()
val dhms: DateTimeFormatter = dateHourMinuteSecond()
val dhmsl: DateTimeFormatter = dateHourMinuteSecondMillis()
val dhmsf: DateTimeFormatter = dateHourMinuteSecondFraction()
val t: DateTimeFormatter = time()
val tx: DateTimeFormatter = timeNoMillis()
val tt: DateTimeFormatter = tTime()
val ttx: DateTimeFormatter = tTimeNoMillis()
val dt: DateTimeFormatter = dateTime()
val dtx: DateTimeFormatter = dateTimeNoMillis()
val wdt: DateTimeFormatter = weekDateTime()
val wdtx: DateTimeFormatter = weekDateTimeNoMillis()
val od: DateTimeFormatter = ordinalDate()
val odt: DateTimeFormatter = ordinalDateTime()
val odtx: DateTimeFormatter = ordinalDateTimeNoMillis()
val bd: DateTimeFormatter = basicDate()
val bt: DateTimeFormatter = basicTime()
val btx: DateTimeFormatter = basicTimeNoMillis()
val btt: DateTimeFormatter = basicTTime()
val bttx: DateTimeFormatter = basicTTimeNoMillis()
val bdt: DateTimeFormatter = basicDateTime()
val bdtx: DateTimeFormatter = basicDateTimeNoMillis()
val bod: DateTimeFormatter = basicOrdinalDate()
val bodt: DateTimeFormatter = basicOrdinalDateTime()
val bodtx: DateTimeFormatter = basicOrdinalDateTimeNoMillis()
val bwd: DateTimeFormatter = basicWeekDate()
val bwdt: DateTimeFormatter = basicWeekDateTime()
val bwdtx: DateTimeFormatter = basicWeekDateTimeNoMillis()
val dpe: DateTimeFormatter = dateElementParser()
val tpe: DateTimeFormatter = timeElementParser()
val dp: DateTimeFormatter = dateParser()
val ldp: DateTimeFormatter = localDateParser()
val tp: DateTimeFormatter = timeParser()
val ltp: DateTimeFormatter = localTimeParser()
val dtp: DateTimeFormatter = dateTimeParser()
val dotp: DateTimeFormatter = dateOptionalTimeParser()
val ldotp: DateTimeFormatter = localDateOptionalTimeParser()
private def dateParser(): DateTimeFormatter = {
if (dp == null) {
val tOffset = new DateTimeFormatterBuilder()
.appendLiteral('T')
.append(offsetElement())
.toParser()
return new DateTimeFormatterBuilder()
.append(dateElementParser())
.appendOptional(tOffset)
.toFormatter()
}
dp
}
private def localDateParser(): DateTimeFormatter = {
if (ldp == null) {
return dateElementParser().withZoneUTC()
}
ldp
}
private def dateElementParser(): DateTimeFormatter = {
if (dpe == null) {
return new DateTimeFormatterBuilder()
.append(null,
Array(new DateTimeFormatterBuilder()
.append(yearElement())
.appendOptional(
new DateTimeFormatterBuilder()
.append(monthElement())
.appendOptional(dayOfMonthElement().getParser)
.toParser())
.toParser(),
new DateTimeFormatterBuilder()
.append(weekyearElement())
.append(weekElement())
.appendOptional(dayOfWeekElement().getParser)
.toParser(),
new DateTimeFormatterBuilder()
.append(yearElement())
.append(dayOfYearElement())
.toParser()))
.toFormatter()
}
dpe
}
private def timeParser(): DateTimeFormatter = {
if (tp == null) {
return new DateTimeFormatterBuilder()
.appendOptional(literalTElement().getParser)
.append(timeElementParser())
.appendOptional(offsetElement().getParser)
.toFormatter()
}
tp
}
private def localTimeParser(): DateTimeFormatter = {
if (ltp == null) {
return new DateTimeFormatterBuilder()
.appendOptional(literalTElement().getParser)
.append(timeElementParser())
.toFormatter()
.withZoneUTC()
}
ltp
}
private def timeElementParser(): DateTimeFormatter = {
if (tpe == null) {
val decimalPoint = new DateTimeFormatterBuilder()
.append(
null,
Array(
new DateTimeFormatterBuilder().appendLiteral('.').toParser(),
new DateTimeFormatterBuilder().appendLiteral(',').toParser()))
.toParser()
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(null,
Array(new DateTimeFormatterBuilder()
.append(minuteElement())
.append(null,
Array(new DateTimeFormatterBuilder()
.append(secondElement())
.appendOptional(
new DateTimeFormatterBuilder()
.append(decimalPoint)
.appendFractionOfSecond(1, 9)
.toParser())
.toParser(),
new DateTimeFormatterBuilder()
.append(decimalPoint)
.appendFractionOfMinute(1, 9)
.toParser(),
null))
.toParser(),
new DateTimeFormatterBuilder()
.append(decimalPoint)
.appendFractionOfHour(1, 9)
.toParser(),
null))
.toFormatter()
}
tpe
}
private def dateTimeParser(): DateTimeFormatter = {
if (dtp == null) {
val time = new DateTimeFormatterBuilder()
.appendLiteral('T')
.append(timeElementParser())
.appendOptional(offsetElement().getParser)
.toParser()
return new DateTimeFormatterBuilder()
.append(null, Array(time, dateOptionalTimeParser().getParser))
.toFormatter()
}
dtp
}
private def dateOptionalTimeParser(): DateTimeFormatter = {
if (dotp == null) {
val timeOrOffset = new DateTimeFormatterBuilder()
.appendLiteral('T')
.appendOptional(timeElementParser().getParser)
.appendOptional(offsetElement().getParser)
.toParser()
return new DateTimeFormatterBuilder()
.append(dateElementParser())
.appendOptional(timeOrOffset)
.toFormatter()
}
dotp
}
private def localDateOptionalTimeParser(): DateTimeFormatter = {
if (ldotp == null) {
val time = new DateTimeFormatterBuilder()
.appendLiteral('T')
.append(timeElementParser())
.toParser()
return new DateTimeFormatterBuilder()
.append(dateElementParser())
.appendOptional(time)
.toFormatter()
.withZoneUTC()
}
ldotp
}
private def time(): DateTimeFormatter = {
if (t == null) {
return new DateTimeFormatterBuilder()
.append(hourMinuteSecondFraction())
.append(offsetElement())
.toFormatter()
}
t
}
private def timeNoMillis(): DateTimeFormatter = {
if (tx == null) {
return new DateTimeFormatterBuilder()
.append(hourMinuteSecond())
.append(offsetElement())
.toFormatter()
}
tx
}
private def tTime(): DateTimeFormatter = {
if (tt == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(time())
.toFormatter()
}
tt
}
private def tTimeNoMillis(): DateTimeFormatter = {
if (ttx == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(timeNoMillis())
.toFormatter()
}
ttx
}
private def dateTime(): DateTimeFormatter = {
if (dt == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(tTime())
.toFormatter()
}
dt
}
private def dateTimeNoMillis(): DateTimeFormatter = {
if (dtx == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(tTimeNoMillis())
.toFormatter()
}
dtx
}
private def ordinalDate(): DateTimeFormatter = {
if (od == null) {
return new DateTimeFormatterBuilder()
.append(yearElement())
.append(dayOfYearElement())
.toFormatter()
}
od
}
private def ordinalDateTime(): DateTimeFormatter = {
if (odt == null) {
return new DateTimeFormatterBuilder()
.append(ordinalDate())
.append(tTime())
.toFormatter()
}
odt
}
private def ordinalDateTimeNoMillis(): DateTimeFormatter = {
if (odtx == null) {
return new DateTimeFormatterBuilder()
.append(ordinalDate())
.append(tTimeNoMillis())
.toFormatter()
}
odtx
}
private def weekDateTime(): DateTimeFormatter = {
if (wdt == null) {
return new DateTimeFormatterBuilder()
.append(weekDate())
.append(tTime())
.toFormatter()
}
wdt
}
private def weekDateTimeNoMillis(): DateTimeFormatter = {
if (wdtx == null) {
return new DateTimeFormatterBuilder()
.append(weekDate())
.append(tTimeNoMillis())
.toFormatter()
}
wdtx
}
private def basicDate(): DateTimeFormatter = {
if (bd == null) {
return new DateTimeFormatterBuilder()
.appendYear(4, 4)
.appendFixedDecimal(DateTimeFieldType.monthOfYear(), 2)
.appendFixedDecimal(DateTimeFieldType.dayOfMonth(), 2)
.toFormatter()
}
bd
}
private def basicTime(): DateTimeFormatter = {
if (bt == null) {
return new DateTimeFormatterBuilder()
.appendFixedDecimal(DateTimeFieldType.hourOfDay(), 2)
.appendFixedDecimal(DateTimeFieldType.minuteOfHour(), 2)
.appendFixedDecimal(DateTimeFieldType.secondOfMinute(), 2)
.appendLiteral('.')
.appendFractionOfSecond(3, 9)
.appendTimeZoneOffset("Z", showSeparators = false, 2, 2)
.toFormatter()
}
bt
}
private def basicTimeNoMillis(): DateTimeFormatter = {
if (btx == null) {
return new DateTimeFormatterBuilder()
.appendFixedDecimal(DateTimeFieldType.hourOfDay(), 2)
.appendFixedDecimal(DateTimeFieldType.minuteOfHour(), 2)
.appendFixedDecimal(DateTimeFieldType.secondOfMinute(), 2)
.appendTimeZoneOffset("Z", showSeparators = false, 2, 2)
.toFormatter()
}
btx
}
private def basicTTime(): DateTimeFormatter = {
if (btt == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(basicTime())
.toFormatter()
}
btt
}
private def basicTTimeNoMillis(): DateTimeFormatter = {
if (bttx == null) {
return new DateTimeFormatterBuilder()
.append(literalTElement())
.append(basicTimeNoMillis())
.toFormatter()
}
bttx
}
private def basicDateTime(): DateTimeFormatter = {
if (bdt == null) {
return new DateTimeFormatterBuilder()
.append(basicDate())
.append(basicTTime())
.toFormatter()
}
bdt
}
private def basicDateTimeNoMillis(): DateTimeFormatter = {
if (bdtx == null) {
return new DateTimeFormatterBuilder()
.append(basicDate())
.append(basicTTimeNoMillis())
.toFormatter()
}
bdtx
}
private def basicOrdinalDate(): DateTimeFormatter = {
if (bod == null) {
return new DateTimeFormatterBuilder()
.appendYear(4, 4)
.appendFixedDecimal(DateTimeFieldType.dayOfYear(), 3)
.toFormatter()
}
bod
}
private def basicOrdinalDateTime(): DateTimeFormatter = {
if (bodt == null) {
return new DateTimeFormatterBuilder()
.append(basicOrdinalDate())
.append(basicTTime())
.toFormatter()
}
bodt
}
private def basicOrdinalDateTimeNoMillis(): DateTimeFormatter = {
if (bodtx == null) {
return new DateTimeFormatterBuilder()
.append(basicOrdinalDate())
.append(basicTTimeNoMillis())
.toFormatter()
}
bodtx
}
private def basicWeekDate(): DateTimeFormatter = {
if (bwd == null) {
return new DateTimeFormatterBuilder()
.appendWeekyear(4, 4)
.appendLiteral('W')
.appendFixedDecimal(DateTimeFieldType.weekOfWeekyear(), 2)
.appendFixedDecimal(DateTimeFieldType.dayOfWeek(), 1)
.toFormatter()
}
bwd
}
private def basicWeekDateTime(): DateTimeFormatter = {
if (bwdt == null) {
return new DateTimeFormatterBuilder()
.append(basicWeekDate())
.append(basicTTime())
.toFormatter()
}
bwdt
}
private def basicWeekDateTimeNoMillis(): DateTimeFormatter = {
if (bwdtx == null) {
return new DateTimeFormatterBuilder()
.append(basicWeekDate())
.append(basicTTimeNoMillis())
.toFormatter()
}
bwdtx
}
private def yearMonth(): DateTimeFormatter = {
if (ym == null) {
return new DateTimeFormatterBuilder()
.append(yearElement())
.append(monthElement())
.toFormatter()
}
ym
}
private def yearMonthDay(): DateTimeFormatter = {
if (ymd == null) {
return new DateTimeFormatterBuilder()
.append(yearElement())
.append(monthElement())
.append(dayOfMonthElement())
.toFormatter()
}
ymd
}
private def weekyearWeek(): DateTimeFormatter = {
if (ww == null) {
return new DateTimeFormatterBuilder()
.append(weekyearElement())
.append(weekElement())
.toFormatter()
}
ww
}
private def weekyearWeekDay(): DateTimeFormatter = {
if (wwd == null) {
return new DateTimeFormatterBuilder()
.append(weekyearElement())
.append(weekElement())
.append(dayOfWeekElement())
.toFormatter()
}
wwd
}
private def hourMinute(): DateTimeFormatter = {
if (hm == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.toFormatter()
}
hm
}
private def hourMinuteSecond(): DateTimeFormatter = {
if (hms == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.append(secondElement())
.toFormatter()
}
hms
}
private def hourMinuteSecondMillis(): DateTimeFormatter = {
if (hmsl == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.append(secondElement())
.appendLiteral('.')
.appendFractionOfSecond(3, 3)
.toFormatter()
}
hmsl
}
private def hourMinuteSecondFraction(): DateTimeFormatter = {
if (hmsf == null) {
return new DateTimeFormatterBuilder()
.append(hourElement())
.append(minuteElement())
.append(secondElement())
.append(fractionElement())
.toFormatter()
}
hmsf
}
private def dateHour(): DateTimeFormatter = {
if (dh == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hour())
.toFormatter()
}
dh
}
private def dateHourMinute(): DateTimeFormatter = {
if (dhm == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinute())
.toFormatter()
}
dhm
}
private def dateHourMinuteSecond(): DateTimeFormatter = {
if (dhms == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinuteSecond())
.toFormatter()
}
dhms
}
private def dateHourMinuteSecondMillis(): DateTimeFormatter = {
if (dhmsl == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinuteSecondMillis())
.toFormatter()
}
dhmsl
}
private def dateHourMinuteSecondFraction(): DateTimeFormatter = {
if (dhmsf == null) {
return new DateTimeFormatterBuilder()
.append(date())
.append(literalTElement())
.append(hourMinuteSecondFraction())
.toFormatter()
}
dhmsf
}
private def yearElement(): DateTimeFormatter = {
if (ye == null) {
return new DateTimeFormatterBuilder().appendYear(4, 9).toFormatter()
}
ye
}
private def monthElement(): DateTimeFormatter = {
if (mye == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendMonthOfYear(2)
.toFormatter()
}
mye
}
private def dayOfMonthElement(): DateTimeFormatter = {
if (dme == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendDayOfMonth(2)
.toFormatter()
}
dme
}
private def weekyearElement(): DateTimeFormatter = {
if (we == null) {
return new DateTimeFormatterBuilder()
.appendWeekyear(4, 9)
.toFormatter()
}
we
}
private def weekElement(): DateTimeFormatter = {
if (wwe == null) {
return new DateTimeFormatterBuilder()
.appendLiteral("-W")
.appendWeekOfWeekyear(2)
.toFormatter()
}
wwe
}
private def dayOfWeekElement(): DateTimeFormatter = {
if (dwe == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendDayOfWeek(1)
.toFormatter()
}
dwe
}
private def dayOfYearElement(): DateTimeFormatter = {
if (dye == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('-')
.appendDayOfYear(3)
.toFormatter()
}
dye
}
private def literalTElement(): DateTimeFormatter = {
if (lte == null) {
return new DateTimeFormatterBuilder().appendLiteral('T').toFormatter()
}
lte
}
private def hourElement(): DateTimeFormatter = {
if (hde == null) {
return new DateTimeFormatterBuilder().appendHourOfDay(2).toFormatter()
}
hde
}
private def minuteElement(): DateTimeFormatter = {
if (mhe == null) {
return new DateTimeFormatterBuilder()
.appendLiteral(':')
.appendMinuteOfHour(2)
.toFormatter()
}
mhe
}
private def secondElement(): DateTimeFormatter = {
if (sme == null) {
return new DateTimeFormatterBuilder()
.appendLiteral(':')
.appendSecondOfMinute(2)
.toFormatter()
}
sme
}
private def fractionElement(): DateTimeFormatter = {
if (fse == null) {
return new DateTimeFormatterBuilder()
.appendLiteral('.')
.appendFractionOfSecond(3, 9)
.toFormatter()
}
fse
}
private def offsetElement(): DateTimeFormatter = {
if (ze == null) {
return new DateTimeFormatterBuilder()
.appendTimeZoneOffset("Z", showSeparators = true, 2, 4)
.toFormatter()
}
ze
}
}
}
| mdedetrich/soda-time | js/src/main/scala/org/joda/time/format/ISODateTimeFormat.scala | Scala | bsd-2-clause | 32,942 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
import org.apache.spark.sql.catalyst.plans.Inner
import org.apache.spark.sql.catalyst.plans.logical.{Join, JoinHint}
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.exchange.EnsureRequirements
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, StringType, StructType}
class InnerJoinSuite extends SparkPlanTest with SharedSparkSession {
import testImplicits.newProductEncoder
import testImplicits.localSeqToDatasetHolder
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
private lazy val myUpperCaseData = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(1, "A"),
Row(2, "B"),
Row(3, "C"),
Row(4, "D"),
Row(5, "E"),
Row(6, "F"),
Row(null, "G")
)), new StructType().add("N", IntegerType).add("L", StringType))
private lazy val myLowerCaseData = spark.createDataFrame(
sparkContext.parallelize(Seq(
Row(1, "a"),
Row(2, "b"),
Row(3, "c"),
Row(4, "d"),
Row(null, "e")
)), new StructType().add("n", IntegerType).add("l", StringType))
private lazy val myTestData1 = Seq(
(1, 1),
(1, 2),
(2, 1),
(2, 2),
(3, 1),
(3, 2)
).toDF("a", "b")
private lazy val myTestData2 = Seq(
(1, 1),
(1, 2),
(2, 1),
(2, 2),
(3, 1),
(3, 2)
).toDF("a", "b")
// Note: the input dataframes and expression must be evaluated lazily because
// the SQLContext should be used only within a test to keep SQL tests stable
private def testInnerJoin(
testName: String,
leftRows: => DataFrame,
rightRows: => DataFrame,
condition: () => Expression,
expectedAnswer: Seq[Product]): Unit = {
def extractJoinParts(): Option[ExtractEquiJoinKeys.ReturnType] = {
val join = Join(leftRows.logicalPlan, rightRows.logicalPlan,
Inner, Some(condition()), JoinHint.NONE)
ExtractEquiJoinKeys.unapply(join)
}
def makeBroadcastHashJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
boundCondition: Option[Expression],
leftPlan: SparkPlan,
rightPlan: SparkPlan,
side: BuildSide) = {
val broadcastJoin = joins.BroadcastHashJoinExec(
leftKeys,
rightKeys,
Inner,
side,
boundCondition,
leftPlan,
rightPlan)
EnsureRequirements(spark.sessionState.conf).apply(broadcastJoin)
}
def makeShuffledHashJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
boundCondition: Option[Expression],
leftPlan: SparkPlan,
rightPlan: SparkPlan,
side: BuildSide) = {
val shuffledHashJoin = joins.ShuffledHashJoinExec(leftKeys, rightKeys, Inner,
side, None, leftPlan, rightPlan)
val filteredJoin =
boundCondition.map(FilterExec(_, shuffledHashJoin)).getOrElse(shuffledHashJoin)
EnsureRequirements(spark.sessionState.conf).apply(filteredJoin)
}
def makeSortMergeJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
boundCondition: Option[Expression],
leftPlan: SparkPlan,
rightPlan: SparkPlan) = {
val sortMergeJoin = joins.SortMergeJoinExec(leftKeys, rightKeys, Inner, boundCondition,
leftPlan, rightPlan)
EnsureRequirements(spark.sessionState.conf).apply(sortMergeJoin)
}
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastHashJoin (build=left)") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: SparkPlan) =>
makeBroadcastHashJoin(
leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, joins.BuildLeft),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using BroadcastHashJoin (build=right)") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: SparkPlan) =>
makeBroadcastHashJoin(
leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, joins.BuildRight),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
test(s"$testName using ShuffledHashJoin (build=left)") {
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: SparkPlan) =>
makeShuffledHashJoin(
leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, joins.BuildLeft),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
test(s"$testName using ShuffledHashJoin (build=right)") {
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: SparkPlan) =>
makeShuffledHashJoin(
leftKeys, rightKeys, boundCondition, leftPlan, rightPlan, joins.BuildRight),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
testWithWholeStageCodegenOnAndOff(s"$testName using SortMergeJoin") { _ =>
extractJoinParts().foreach { case (_, leftKeys, rightKeys, boundCondition, _, _, _) =>
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (leftPlan: SparkPlan, rightPlan: SparkPlan) =>
makeSortMergeJoin(leftKeys, rightKeys, boundCondition, leftPlan, rightPlan),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
test(s"$testName using CartesianProduct") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1",
SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
CartesianProductExec(left, right, Some(condition())),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
test(s"$testName using BroadcastNestedLoopJoin build left") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastNestedLoopJoinExec(left, right, BuildLeft, Inner, Some(condition())),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
test(s"$testName using BroadcastNestedLoopJoin build right") {
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
checkAnswer2(leftRows, rightRows, (left: SparkPlan, right: SparkPlan) =>
BroadcastNestedLoopJoinExec(left, right, BuildRight, Inner, Some(condition())),
expectedAnswer.map(Row.fromTuple),
sortAnswers = true)
}
}
}
/*
testInnerJoin(
"inner join, one match per row",
myUpperCaseData,
myLowerCaseData,
() => (myUpperCaseData.col("N") === myLowerCaseData.col("n")).expr,
Seq(
(1, "A", 1, "a"),
(2, "B", 2, "b"),
(3, "C", 3, "c"),
(4, "D", 4, "d")
)
)
{
lazy val left = myTestData1.where("a = 1")
lazy val right = myTestData2.where("a = 1")
testInnerJoin(
"inner join, multiple matches",
left,
right,
() => (left.col("a") === right.col("a")).expr,
Seq(
(1, 1, 1, 1),
(1, 1, 1, 2),
(1, 2, 1, 1),
(1, 2, 1, 2)
)
)
}
{
lazy val left = myTestData1.where("a = 1")
lazy val right = myTestData2.where("a = 2")
testInnerJoin(
"inner join, no matches",
left,
right,
() => (left.col("a") === right.col("a")).expr,
Seq.empty
)
}
{
lazy val left = Seq((1, Some(0)), (2, None)).toDF("a", "b")
lazy val right = Seq((1, Some(0)), (2, None)).toDF("a", "b")
testInnerJoin(
"inner join, null safe",
left,
right,
() => (left.col("b") <=> right.col("b")).expr,
Seq(
(1, 0, 1, 0),
(2, null, 2, null)
)
)
}
{
def df: DataFrame = spark.range(3).selectExpr("struct(id, id) as key", "id as value")
lazy val left = df.selectExpr("key", "concat('L', value) as value").alias("left")
lazy val right = df.selectExpr("key", "concat('R', value) as value").alias("right")
testInnerJoin(
"SPARK-15822 - test structs as keys",
left,
right,
() => (left.col("key") === right.col("key")).expr,
Seq(
(Row(0, 0), "L0", Row(0, 0), "R0"),
(Row(1, 1), "L1", Row(1, 1), "R1"),
(Row(2, 2), "L2", Row(2, 2), "R2")))
}
*/
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/joins/InnerJoinSuite.scala | Scala | apache-2.0 | 11,170 |
package com.box.castle.router.kafkadispatcher.cache
import com.box.castle.router.mock.MockBatchTools
import kafka.common.TopicAndPartition
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
class FetchDataProcessorCacheTest extends Specification with Mockito with MockBatchTools {
"DataFetchProcessorCache" should {
"have a reasonable toString" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val initialBatch = createBatch(20, 3, sizeInBytes=290)
val c = FetchDataProcessorCache(290,290).add(topicAndPartition, initialBatch)
c.toString must_== "Map([perf,1] -> Cache(bufferSize=290,maxSizeInBytes=290,currentSizeInBytes=290," +
"data=LinkedHashMap(20 -> CastleMessageBatch(offset=20,nextOffset=23,size=3,sizeInBytes=290,maxOffset=22))))"
}
"not allow 0 sized max size in bytes" in {
FetchDataProcessorCache(0,10) must throwA(new IllegalArgumentException(
s"requirement failed: Cache must have more than 0 bytes to use"))
FetchDataProcessorCache(-1,10) must throwA(new IllegalArgumentException(
s"requirement failed: Cache must have more than 0 bytes to use"))
}
"not allow bufferSize of 0 bytes" in {
FetchDataProcessorCache(100,0) must throwA(new IllegalArgumentException(
s"requirement failed: Fetch BufferSize must be larger than 0 bytes to fetch from the cache"))
FetchDataProcessorCache(100,-1) must throwA(new IllegalArgumentException(
s"requirement failed: Fetch BufferSize must be larger than 0 bytes to fetch from the cache"))
}
"correctly resize empty FetchDataProcessorCache" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val maxSize = 500
val c = FetchDataProcessorCache(maxSize, maxSize)
val b = createBatch(20, 3, sizeInBytes=maxSize)
// Verify the original one has enough room for this
c.add(topicAndPartition, b).get(topicAndPartition, 20) must_== Some(b)
// The resized one does not have enough room
c.setMaxSizeInBytes(maxSize - 1).add(topicAndPartition, b).get(topicAndPartition, 20) must_== None
}
"return the exact same object when setting the same size in bytes for cache with data" in {
val c = FetchDataProcessorCache(300,100)
val c2 = c.setMaxSizeInBytes(300)
c eq c2 must_== true
}
"return an empty FetchDataProcessorCache cache when kicking out the last batch on a resize" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val maxSize = 500
val initialBatch = createBatch(20, 3, sizeInBytes=maxSize)
val c = FetchDataProcessorCache(maxSize, maxSize).add(topicAndPartition, initialBatch)
c.get(topicAndPartition, 20) must_== Some(initialBatch)
val c2 = c.setMaxSizeInBytes(maxSize - 1)
c2.get(topicAndPartition, 20) must_== None
}
"allow a single topic and partition to take all the space if it's the only one there" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val cache = FetchDataProcessorCache(57 + 79 + 219,219)
.add(topicAndPartition, createBatch(20, 2, 57))
.add(topicAndPartition, createBatch(24, 1, 79))
.add(topicAndPartition, createBatch(26, 7, 219))
// Establish that all 3 batches are in the cache
cache.get(topicAndPartition, 20).get.sizeInBytes must_== 57
cache.get(topicAndPartition, 24).get.sizeInBytes must_== 79
cache.get(topicAndPartition, 26).get.sizeInBytes must_== 219
}
"return None if either the topic and partition or the offset is not there" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val cache = FetchDataProcessorCache(160 + 120 + 419, 419)
.add(topicAndPartition, createBatch(20, 3, 160))
.add(topicAndPartition, createBatch(23, 1, 120))
.add(topicAndPartition, createBatch(24, 7, 419))
cache.get(topicAndPartition, 2355) must_== None
cache.get(TopicAndPartition("x", 9000), 20) must_== None
}
"split the space evenly when adding more topics" in {
val topicAndPartition = TopicAndPartition("perf", 1)
var cache = FetchDataProcessorCache(160 + 120 + 40, 160)
.add(topicAndPartition, createBatch(20, 3, 160))
.add(topicAndPartition, createBatch(24, 1, 120))
.add(topicAndPartition, createBatch(26, 1, 40))
// Establish that all 3 batches are in the cache
cache.get(topicAndPartition, 20).get.sizeInBytes must_== 160
cache.get(topicAndPartition, 24).get.sizeInBytes must_== 120
cache.get(topicAndPartition, 26).get.sizeInBytes must_== 40
val topicAndPartition2 = TopicAndPartition("perf", 2)
cache = cache.add(topicAndPartition2, createBatch(18, 4, 160))
// Now the max for each is 160
cache.get(topicAndPartition, 20) must_== None
cache.get(topicAndPartition, 24).get.sizeInBytes must_== 120
cache.get(topicAndPartition, 26).get.sizeInBytes must_== 40
cache.get(topicAndPartition2, 18).get.sizeInBytes must_== 160
val topicAndPartition3 = TopicAndPartition("perf", 3)
cache = cache.add(topicAndPartition3, createBatch(38, 2, 70))
// Now the max for each is 100
cache.get(topicAndPartition, 20) must_== None
cache.get(topicAndPartition, 24) must_== None
cache.get(topicAndPartition, 26).get.sizeInBytes must_== 40
// No longer fits into 100
cache.get(topicAndPartition2, 18) must_== None
cache.get(topicAndPartition3, 38).get.sizeInBytes must_== 70
// Ensure we replace properly for existing partitions
cache = cache
.add(topicAndPartition, createBatch(323, 3, 100))
.add(topicAndPartition2, createBatch(521, 3, 100))
.add(topicAndPartition3, createBatch(256, 3, 100))
cache.get(topicAndPartition, 20) must_== None
cache.get(topicAndPartition, 24) must_== None
cache.get(topicAndPartition, 26) must_== None
cache.get(topicAndPartition, 323).get.sizeInBytes must_== 100
cache.get(topicAndPartition2, 18) must_== None
cache.get(topicAndPartition2, 521).get.sizeInBytes must_== 100
cache.get(topicAndPartition3, 38) must_== None
cache.get(topicAndPartition3, 256).get.sizeInBytes must_== 100
}
"return all contiguous batches from cache that fit in specified batchSize" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val cache = FetchDataProcessorCache(57 + 79 + 219,300)
.add(topicAndPartition, createBatch(20, 2, 57))
.add(topicAndPartition, createBatch(22, 1, 79))
.add(topicAndPartition, createBatch(23, 7, 219))
// This should return the first 2 batches
val batch = cache.get(topicAndPartition, 20).get
batch.size shouldEqual 3
batch.offset shouldEqual 20
batch.nextOffset shouldEqual 23
batch.sizeInBytes shouldEqual 57 + 79
}
"not return batches that are not contiguous" in {
val topicAndPartition = TopicAndPartition("perf", 1)
val cache = FetchDataProcessorCache(57 + 79 + 219 + 100, 500)
.add(topicAndPartition, createBatch(20, 2, 57))
.add(topicAndPartition, createBatch(22, 1, 79))
.add(topicAndPartition, createBatch(24, 7, 219))
.add(topicAndPartition, createBatch(31, 2, 100))
// This should return only the first two batches as the last two are non-contiguous.
cache.get(topicAndPartition, 20).get.sizeInBytes must_== 57 + 79
}
}
}
| Box-Castle/router | src/test/scala/com/box/castle/router/kafkadispatcher/cache/FetchDataProcessorCacheTest.scala | Scala | apache-2.0 | 7,496 |
package com.sclasen.sprack
import org.scalatest.matchers.MustMatchers
import org.scalatest.WordSpec
import spray.http.{Uri, HttpRequest}
import spray.http.HttpMethods._
class ChunkedSpec extends WordSpec with MustMatchers with SprackSpec {
"Chunked" must {
"do chunks" in {
try {
val app = rackApp("src/test/resources/chunked.ru", 80)
val (resp, chunks) = app.call(HttpRequest(GET, Uri("/"))).left.get
resp.status.intValue must equal(200)
chunks.size must equal("Hello".size)
} catch {
case e: Exception =>
e.printStackTrace()
fail(e)
}
}
}
}
| sclasen/sprack | src/test/scala/com/sclasen/sprack/ChunkedSpec.scala | Scala | mit | 640 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.io.FileNotFoundException
import scala.collection.mutable
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.mapred.{FileInputFormat, JobConf}
import org.apache.spark.internal.Logging
import org.apache.spark.metrics.source.HiveCatalogMetrics
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.{expressions, InternalRow}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types.{StringType, StructType}
import org.apache.spark.util.SerializableConfiguration
/**
* An abstract class that represents [[FileIndex]]s that are aware of partitioned tables.
* It provides the necessary methods to parse partition data based on a set of files.
*
* @param parameters as set of options to control partition discovery
* @param userPartitionSchema an optional partition schema that will be use to provide types for
* the discovered partitions
*/
abstract class PartitioningAwareFileIndex(
sparkSession: SparkSession,
parameters: Map[String, String],
userPartitionSchema: Option[StructType],
fileStatusCache: FileStatusCache = NoopCache) extends FileIndex with Logging {
import PartitioningAwareFileIndex.BASE_PATH_PARAM
/** Returns the specification of the partitions inferred from the data. */
def partitionSpec(): PartitionSpec
override def partitionSchema: StructType = partitionSpec().partitionColumns
protected val hadoopConf = sparkSession.sessionState.newHadoopConfWithOptions(parameters)
protected def leafFiles: mutable.LinkedHashMap[Path, FileStatus]
protected def leafDirToChildrenFiles: Map[Path, Array[FileStatus]]
override def listFiles(filters: Seq[Expression]): Seq[PartitionDirectory] = {
val selectedPartitions = if (partitionSpec().partitionColumns.isEmpty) {
PartitionDirectory(InternalRow.empty, allFiles().filter(f => isDataPath(f.getPath))) :: Nil
} else {
prunePartitions(filters, partitionSpec()).map {
case PartitionPath(values, path) =>
val files: Seq[FileStatus] = leafDirToChildrenFiles.get(path) match {
case Some(existingDir) =>
// Directory has children files in it, return them
existingDir.filter(f => isDataPath(f.getPath))
case None =>
// Directory does not exist, or has no children files
Nil
}
PartitionDirectory(values, files)
}
}
logTrace("Selected files after partition pruning:\\n\\t" + selectedPartitions.mkString("\\n\\t"))
selectedPartitions
}
/** Returns the list of files that will be read when scanning this relation. */
override def inputFiles: Array[String] =
allFiles().map(_.getPath.toUri.toString).toArray
override def sizeInBytes: Long = allFiles().map(_.getLen).sum
def allFiles(): Seq[FileStatus] = {
if (partitionSpec().partitionColumns.isEmpty) {
// For each of the root input paths, get the list of files inside them
rootPaths.flatMap { path =>
// Make the path qualified (consistent with listLeafFiles and listLeafFilesInParallel).
val fs = path.getFileSystem(hadoopConf)
val qualifiedPathPre = fs.makeQualified(path)
val qualifiedPath: Path = if (qualifiedPathPre.isRoot && !qualifiedPathPre.isAbsolute) {
// SPARK-17613: Always append `Path.SEPARATOR` to the end of parent directories,
// because the `leafFile.getParent` would have returned an absolute path with the
// separator at the end.
new Path(qualifiedPathPre, Path.SEPARATOR)
} else {
qualifiedPathPre
}
// There are three cases possible with each path
// 1. The path is a directory and has children files in it. Then it must be present in
// leafDirToChildrenFiles as those children files will have been found as leaf files.
// Find its children files from leafDirToChildrenFiles and include them.
// 2. The path is a file, then it will be present in leafFiles. Include this path.
// 3. The path is a directory, but has no children files. Do not include this path.
leafDirToChildrenFiles.get(qualifiedPath)
.orElse { leafFiles.get(qualifiedPath).map(Array(_)) }
.getOrElse(Array.empty)
}
} else {
leafFiles.values.toSeq
}
}
protected def inferPartitioning(): PartitionSpec = {
// We use leaf dirs containing data files to discover the schema.
val leafDirs = leafDirToChildrenFiles.filter { case (_, files) =>
files.exists(f => isDataPath(f.getPath))
}.keys.toSeq
userPartitionSchema match {
case Some(userProvidedSchema) if userProvidedSchema.nonEmpty =>
val spec = PartitioningUtils.parsePartitions(
leafDirs,
typeInference = false,
basePaths = basePaths)
// Without auto inference, all of value in the `row` should be null or in StringType,
// we need to cast into the data type that user specified.
def castPartitionValuesToUserSchema(row: InternalRow) = {
InternalRow((0 until row.numFields).map { i =>
Cast(
Literal.create(row.getUTF8String(i), StringType),
userProvidedSchema.fields(i).dataType).eval()
}: _*)
}
PartitionSpec(userProvidedSchema, spec.partitions.map { part =>
part.copy(values = castPartitionValuesToUserSchema(part.values))
})
case _ =>
PartitioningUtils.parsePartitions(
leafDirs,
typeInference = sparkSession.sessionState.conf.partitionColumnTypeInferenceEnabled,
basePaths = basePaths)
}
}
private def prunePartitions(
predicates: Seq[Expression],
partitionSpec: PartitionSpec): Seq[PartitionPath] = {
val PartitionSpec(partitionColumns, partitions) = partitionSpec
val partitionColumnNames = partitionColumns.map(_.name).toSet
val partitionPruningPredicates = predicates.filter {
_.references.map(_.name).toSet.subsetOf(partitionColumnNames)
}
if (partitionPruningPredicates.nonEmpty) {
val predicate = partitionPruningPredicates.reduce(expressions.And)
val boundPredicate = InterpretedPredicate.create(predicate.transform {
case a: AttributeReference =>
val index = partitionColumns.indexWhere(a.name == _.name)
BoundReference(index, partitionColumns(index).dataType, nullable = true)
})
val selected = partitions.filter {
case PartitionPath(values, _) => boundPredicate(values)
}
logInfo {
val total = partitions.length
val selectedSize = selected.length
val percentPruned = (1 - selectedSize.toDouble / total.toDouble) * 100
s"Selected $selectedSize partitions out of $total, pruned $percentPruned% partitions."
}
selected
} else {
partitions
}
}
/**
* Contains a set of paths that are considered as the base dirs of the input datasets.
* The partitioning discovery logic will make sure it will stop when it reaches any
* base path.
*
* By default, the paths of the dataset provided by users will be base paths.
* Below are three typical examples,
* Case 1) `spark.read.parquet("/path/something=true/")`: the base path will be
* `/path/something=true/`, and the returned DataFrame will not contain a column of `something`.
* Case 2) `spark.read.parquet("/path/something=true/a.parquet")`: the base path will be
* still `/path/something=true/`, and the returned DataFrame will also not contain a column of
* `something`.
* Case 3) `spark.read.parquet("/path/")`: the base path will be `/path/`, and the returned
* DataFrame will have the column of `something`.
*
* Users also can override the basePath by setting `basePath` in the options to pass the new base
* path to the data source.
* For example, `spark.read.option("basePath", "/path/").parquet("/path/something=true/")`,
* and the returned DataFrame will have the column of `something`.
*/
private def basePaths: Set[Path] = {
parameters.get(BASE_PATH_PARAM).map(new Path(_)) match {
case Some(userDefinedBasePath) =>
val fs = userDefinedBasePath.getFileSystem(hadoopConf)
if (!fs.isDirectory(userDefinedBasePath)) {
throw new IllegalArgumentException(s"Option '$BASE_PATH_PARAM' must be a directory")
}
Set(fs.makeQualified(userDefinedBasePath))
case None =>
rootPaths.map { path =>
// Make the path qualified (consistent with listLeafFiles and listLeafFilesInParallel).
val qualifiedPath = path.getFileSystem(hadoopConf).makeQualified(path)
if (leafFiles.contains(qualifiedPath)) qualifiedPath.getParent else qualifiedPath }.toSet
}
}
// SPARK-15895: Metadata files (e.g. Parquet summary files) and temporary files should not be
// counted as data files, so that they shouldn't participate partition discovery.
private def isDataPath(path: Path): Boolean = {
val name = path.getName
!((name.startsWith("_") && !name.contains("=")) || name.startsWith("."))
}
/**
* List leaf files of given paths. This method will submit a Spark job to do parallel
* listing whenever there is a path having more files than the parallel partition discovery
* discovery threshold.
*
* This is publicly visible for testing.
*/
def listLeafFiles(paths: Seq[Path]): mutable.LinkedHashSet[FileStatus] = {
val output = mutable.LinkedHashSet[FileStatus]()
val pathsToFetch = mutable.ArrayBuffer[Path]()
for (path <- paths) {
fileStatusCache.getLeafFiles(path) match {
case Some(files) =>
HiveCatalogMetrics.incrementFileCacheHits(files.length)
output ++= files
case None =>
pathsToFetch += path
}
}
val filter = FileInputFormat.getInputPathFilter(new JobConf(hadoopConf, this.getClass))
val discovered = PartitioningAwareFileIndex.bulkListLeafFiles(
pathsToFetch, hadoopConf, filter, sparkSession)
discovered.foreach { case (path, leafFiles) =>
HiveCatalogMetrics.incrementFilesDiscovered(leafFiles.size)
fileStatusCache.putLeafFiles(path, leafFiles.toArray)
output ++= leafFiles
}
output
}
}
object PartitioningAwareFileIndex extends Logging {
val BASE_PATH_PARAM = "basePath"
/** A serializable variant of HDFS's BlockLocation. */
private case class SerializableBlockLocation(
names: Array[String],
hosts: Array[String],
offset: Long,
length: Long)
/** A serializable variant of HDFS's FileStatus. */
private case class SerializableFileStatus(
path: String,
length: Long,
isDir: Boolean,
blockReplication: Short,
blockSize: Long,
modificationTime: Long,
accessTime: Long,
blockLocations: Array[SerializableBlockLocation])
/**
* Lists a collection of paths recursively. Picks the listing strategy adaptively depending
* on the number of paths to list.
*
* This may only be called on the driver.
*
* @return for each input path, the set of discovered files for the path
*/
private def bulkListLeafFiles(
paths: Seq[Path],
hadoopConf: Configuration,
filter: PathFilter,
sparkSession: SparkSession): Seq[(Path, Seq[FileStatus])] = {
// Short-circuits parallel listing when serial listing is likely to be faster.
if (paths.size < sparkSession.sessionState.conf.parallelPartitionDiscoveryThreshold) {
return paths.map { path =>
(path, listLeafFiles(path, hadoopConf, filter, Some(sparkSession)))
}
}
logInfo(s"Listing leaf files and directories in parallel under: ${paths.mkString(", ")}")
HiveCatalogMetrics.incrementParallelListingJobCount(1)
val sparkContext = sparkSession.sparkContext
val serializableConfiguration = new SerializableConfiguration(hadoopConf)
val serializedPaths = paths.map(_.toString)
// Set the number of parallelism to prevent following file listing from generating many tasks
// in case of large #defaultParallelism.
val numParallelism = Math.min(paths.size, 10000)
val statusMap = sparkContext
.parallelize(serializedPaths, numParallelism)
.mapPartitions { pathStrings =>
val hadoopConf = serializableConfiguration.value
pathStrings.map(new Path(_)).toSeq.map { path =>
(path, listLeafFiles(path, hadoopConf, filter, None))
}.iterator
}.map { case (path, statuses) =>
val serializableStatuses = statuses.map { status =>
// Turn FileStatus into SerializableFileStatus so we can send it back to the driver
val blockLocations = status match {
case f: LocatedFileStatus =>
f.getBlockLocations.map { loc =>
SerializableBlockLocation(
loc.getNames,
loc.getHosts,
loc.getOffset,
loc.getLength)
}
case _ =>
Array.empty[SerializableBlockLocation]
}
SerializableFileStatus(
status.getPath.toString,
status.getLen,
status.isDirectory,
status.getReplication,
status.getBlockSize,
status.getModificationTime,
status.getAccessTime,
blockLocations)
}
(path.toString, serializableStatuses)
}.collect()
// turn SerializableFileStatus back to Status
statusMap.map { case (path, serializableStatuses) =>
val statuses = serializableStatuses.map { f =>
val blockLocations = f.blockLocations.map { loc =>
new BlockLocation(loc.names, loc.hosts, loc.offset, loc.length)
}
new LocatedFileStatus(
new FileStatus(
f.length, f.isDir, f.blockReplication, f.blockSize, f.modificationTime,
new Path(f.path)),
blockLocations)
}
(new Path(path), statuses)
}
}
/**
* Lists a single filesystem path recursively. If a SparkSession object is specified, this
* function may launch Spark jobs to parallelize listing.
*
* If sessionOpt is None, this may be called on executors.
*
* @return all children of path that match the specified filter.
*/
private def listLeafFiles(
path: Path,
hadoopConf: Configuration,
filter: PathFilter,
sessionOpt: Option[SparkSession]): Seq[FileStatus] = {
logTrace(s"Listing $path")
val fs = path.getFileSystem(hadoopConf)
val name = path.getName.toLowerCase
if (shouldFilterOut(name)) {
Seq.empty[FileStatus]
} else {
// [SPARK-17599] Prevent InMemoryFileIndex from failing if path doesn't exist
// Note that statuses only include FileStatus for the files and dirs directly under path,
// and does not include anything else recursively.
val statuses = try fs.listStatus(path) catch {
case _: FileNotFoundException =>
logWarning(s"The directory $path was not found. Was it deleted very recently?")
Array.empty[FileStatus]
}
val allLeafStatuses = {
val (dirs, topLevelFiles) = statuses.partition(_.isDirectory)
val nestedFiles: Seq[FileStatus] = sessionOpt match {
case Some(session) =>
bulkListLeafFiles(dirs.map(_.getPath), hadoopConf, filter, session).flatMap(_._2)
case _ =>
dirs.flatMap(dir => listLeafFiles(dir.getPath, hadoopConf, filter, sessionOpt))
}
val allFiles = topLevelFiles ++ nestedFiles
if (filter != null) allFiles.filter(f => filter.accept(f.getPath)) else allFiles
}
allLeafStatuses.filterNot(status => shouldFilterOut(status.getPath.getName)).map {
case f: LocatedFileStatus =>
f
// NOTE:
//
// - Although S3/S3A/S3N file system can be quite slow for remote file metadata
// operations, calling `getFileBlockLocations` does no harm here since these file system
// implementations don't actually issue RPC for this method.
//
// - Here we are calling `getFileBlockLocations` in a sequential manner, but it should not
// be a big deal since we always use to `listLeafFilesInParallel` when the number of
// paths exceeds threshold.
case f =>
// The other constructor of LocatedFileStatus will call FileStatus.getPermission(),
// which is very slow on some file system (RawLocalFileSystem, which is launch a
// subprocess and parse the stdout).
val locations = fs.getFileBlockLocations(f, 0, f.getLen)
val lfs = new LocatedFileStatus(f.getLen, f.isDirectory, f.getReplication, f.getBlockSize,
f.getModificationTime, 0, null, null, null, null, f.getPath, locations)
if (f.isSymlink) {
lfs.setSymlink(f.getSymlink)
}
lfs
}
}
}
/** Checks if we should filter out this path name. */
def shouldFilterOut(pathName: String): Boolean = {
// We filter everything that starts with _ and ., except _common_metadata and _metadata
// because Parquet needs to find those metadata files from leaf files returned by this method.
// We should refactor this logic to not mix metadata files with data files.
((pathName.startsWith("_") && !pathName.contains("=")) || pathName.startsWith(".")) &&
!pathName.startsWith("_common_metadata") && !pathName.startsWith("_metadata")
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningAwareFileIndex.scala | Scala | apache-2.0 | 18,520 |
/*
Copyright 2013 Stephen K Samuel
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.sksamuel.scrimage.filter
import com.sksamuel.scrimage.BufferedOpFilter
/** @author Stephen Samuel */
class DiffuseFilter(scale: Float) extends BufferedOpFilter {
val op = new thirdparty.jhlabs.image.DiffuseFilter()
op.setScale(scale)
}
object DiffuseFilter {
def apply(): DiffuseFilter = apply(4)
def apply(scale: Float): DiffuseFilter = new DiffuseFilter(scale)
} | carlosFattor/scrimage | scrimage-filters/src/main/scala/com/sksamuel/scrimage/filter/DiffuseFilter.scala | Scala | apache-2.0 | 980 |
package dsmoq.controllers.json
/**
* サジェスト系検索APIのリクエストに使用するJSON型のケースクラス
*
* 具体的には、以下のAPIで使用する。
* GET /api/suggests/users
* GET /api/suggests/groups
* GET /api/suggests/attributes
*
* @param query 検索文字列
* @param limit 検索件数上限
* @param offset 検索位置
*/
case class SuggestApiParams(
query: Option[String] = None,
limit: Option[Int] = None,
offset: Option[Int] = None
)
/**
* GET /api/suggests/users_and_groupsのリクエストに使用するJSON型のケースクラス
*
* @param query 検索文字列
* @param limit 検索件数上限
* @param offset 検索位置
* @param excludeIds 検索から除外するユーザー・グループID
*/
case class UserAndGroupSuggestApiParams(
query: Option[String] = None,
limit: Option[Int] = None,
offset: Option[Int] = None,
excludeIds: Seq[String] = Seq.empty
)
| nkawa/dsmoq | server/apiServer/src/main/scala/dsmoq/controllers/json/SuggestApiParams.scala | Scala | apache-2.0 | 950 |
package com.betfair.robots.racing
import akka.actor.Actor
import com.betfair.domain.{LimitOrder, PersistenceType, PlaceInstruction, Side}
import com.betfair.robots.{Direction, PriceFormat}
import com.betfair.service.BetfairServiceNG
import org.joda.time
import org.joda.time.{DateTime, DateTimeZone}
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}
class MonitorInPlayWinScalp(betfairServiceNG: BetfairServiceNG, sessionToken: String,
marketId: String, marketStartTime: Option[DateTime])
(implicit executionContext: ExecutionContext) extends Actor {
val oddsBackFav = List[(Double, Double, Double, Double, Double, Double, Double)](
(4.25,5.0,4.75,5.5,5.75,6.5,0.8),
(4.0,4.75,4.75,5.5,5.25,6.0,0.8),
(4.0,4.75,4.75,5.5,5.5,6.25,0.8),
(4.25,5.0,4.75,5.5,5.25,6.0,0.8),
(4.25,5.0,4.75,5.5,6.0,6.75,0.8),
(3.75,4.5,4.5,5.25,5.5,6.25,0.8),
(3.5,4.25,4.5,5.25,5.25,6.0,0.8),
(4.5,5.25,4.75,5.5,5.75,6.5,0.8),
(4.0,4.75,4.25,5.0,5.75,6.5,0.8),
(4.0,4.75,4.5,5.25,6.0,6.75,0.8),
(3.75,4.5,4.75,5.5,5.5,6.25,0.8),
(3.5,4.25,4.5,5.25,5.5,6.25,0.8),
(2.5,3.25,4.25,5.0,4.75,5.5,0.8),
(4.0,4.75,4.75,5.5,5.75,6.5,0.8),
(4.0,4.75,4.75,5.5,6.0,6.75,0.8),
(4.5,5.25,4.75,5.5,5.25,6.0,0.8),
(3.5,4.25,4.25,5.0,5.5,6.25,0.8),
(3.75,4.5,4.5,5.25,5.25,6.0,0.8),
(4.25,5.0,4.5,5.25,5.25,6.0,0.8),
(4.25,5.0,5.0,5.75,5.25,6.0,0.8),
(4.25,5.0,5.0,5.75,5.75,6.5,0.8),
(4.25,5.0,4.75,5.5,5.5,6.25,0.8),
(3.0,3.75,4.25,5.0,4.75,5.5,0.8),
(4.0,4.75,4.5,5.25,5.5,6.25,0.8),
(4.5,5.25,5.0,5.75,5.75,6.5,0.8),
(3.5,4.25,4.25,5.0,5.25,6.0,0.8),
(4.0,4.75,4.5,5.25,5.0,5.75,0.8),
(4.0,4.75,4.5,5.25,5.75,6.5,0.8),
(4.25,5.0,5.0,5.75,6.0,6.75,0.8),
(3.75,4.5,4.75,5.5,5.25,6.0,0.8),
(3.5,4.25,4.5,5.25,5.0,5.75,0.8),
(4.0,4.75,5.0,5.75,5.25,6.0,0.8),
(3.5,4.25,4.5,5.25,4.75,5.5,0.8),
(2.25,3.0,3.0,3.75,5.5,6.25,0.8),
(3.75,4.5,4.25,5.0,5.5,6.25,0.8),
(4.0,4.75,5.0,5.75,5.5,6.25,0.8),
(4.0,4.75,4.5,5.25,4.75,5.5,0.8),
(4.0,4.75,4.5,5.25,5.25,6.0,0.8),
(3.5,4.25,4.75,5.5,5.5,6.25,0.8),
(2.5,3.25,4.25,5.0,5.0,5.75,0.8),
(2.5,3.25,4.5,5.25,4.75,5.5,0.8),
(3.75,4.5,4.5,5.25,5.0,5.75,0.8),
(4.5,5.25,5.0,5.75,6.0,6.75,0.8),
(3.25,4.0,4.5,5.25,5.25,6.0,0.8),
(2.75,3.5,4.25,5.0,4.75,5.5,0.8),
(3.0,3.75,4.25,5.0,5.0,5.75,0.8),
(3.75,4.5,4.5,5.25,4.75,5.5,0.8),
(3.75,4.5,4.25,5.0,6.0,6.75,0.8),
(2.5,3.25,3.0,3.75,5.5,6.25,0.7),
(2.5,3.25,4.5,5.25,5.25,6.0,0.8),
(2.5,3.25,3.0,3.75,5.25,6.0,0.8),
(2.5,3.25,4.25,5.0,5.25,6.0,0.8),
(4.25,5.0,4.5,5.25,5.5,6.25,0.8),
(2.25,3.0,4.25,5.0,5.25,6.0,0.8),
(2.25,3.0,4.25,5.0,5.0,5.75,0.8),
(2.25,3.0,3.0,3.75,5.25,6.0,0.8),
(3.0,3.75,4.5,5.25,5.25,6.0,0.8),
(2.25,3.0,4.5,5.25,5.25,6.0,0.8)
)
val oddsBackSecondFav = List[(Double, Double, Double, Double, Double, Double, Double)](
(3.75,4.5,5.25,6.0,6.25,7.0,0.8),
(3.75,4.5,5.25,6.0,6.0,6.75,0.8),
(3.0,3.75,3.75,4.5,5.25,6.0,0.8),
(3.25,4.0,3.75,4.5,5.25,6.0,0.8),
(4.0,4.75,5.25,6.0,6.0,6.75,0.8),
(4.25,5.0,5.25,6.0,6.0,6.75,0.8),
(2.75,3.5,3.75,4.5,5.25,6.0,0.8),
(3.0,3.75,3.5,4.25,5.25,6.0,0.8),
(3.0,3.75,3.75,4.5,4.0,4.75,0.8),
(3.5,4.25,5.0,5.75,5.25,6.0,0.7),
(2.75,3.5,3.75,4.5,5.5,6.25,0.8),
(4.25,5.0,5.5,6.25,6.0,6.75,0.8),
(2.75,3.5,3.75,4.5,4.0,4.75,0.8),
(2.75,3.5,3.5,4.25,5.25,6.0,0.8),
(3.25,4.0,5.0,5.75,5.25,6.0,0.7),
(3.75,4.5,5.0,5.75,6.25,7.0,0.8),
(2.5,3.25,4.75,5.5,5.5,6.25,0.8),
(4.0,4.75,5.0,5.75,6.0,6.75,0.8),
(3.25,4.0,3.75,4.5,5.0,5.75,0.6),
(3.0,3.75,3.75,4.5,5.5,6.25,0.8),
(2.75,3.5,3.5,4.25,5.5,6.25,0.8),
(3.25,4.0,5.0,5.75,5.5,6.25,0.7),
(3.25,4.0,3.75,4.5,5.5,6.25,0.8),
(2.5,3.25,3.5,4.25,5.25,6.0,0.8),
(3.25,4.0,4.75,5.5,5.5,6.25,0.7)
)
val oddsBackThirdFav = List[(Double, Double, Double, Double, Double, Double, Double)](
)
def receive = {
case _ => {
println((new time.DateTime(DateTimeZone.UTC)) + " - monitoring fav, second and third fav in play starting - " + marketId)
var favSelectionId = 0L
var favOdds = 20.0
var secondFavSelectionId = 0L
var secondFavOdds = 20.0
var thirdFavSelectionId = 0L
var thirdFavOdds = 20.0
var fourthFavSelectionId = 0L
var fourthFavOdds = 20.0
var preRace = true
// monitor market until the off
while (preRace) {
betfairServiceNG.listMarketBook(sessionToken, marketIds = Set(marketId)
) onComplete {
case Success(Some(listMarketBookContainer)) =>
for (marketBook <- listMarketBookContainer.result) {
if (marketBook.status.equals("OPEN") && marketBook.betDelay.equals(0)) {
val orderedRunners =
marketBook.runners.filter(r => r.status == "ACTIVE").toSeq.sortBy(_.lastPriceTraded)
favOdds = orderedRunners.head.lastPriceTraded.get
favSelectionId = orderedRunners.head.selectionId
secondFavOdds = orderedRunners.drop(1).head.lastPriceTraded.get
secondFavSelectionId = orderedRunners.drop(1).head.selectionId
if (orderedRunners.size > 3) {
thirdFavOdds = orderedRunners.drop(2).head.lastPriceTraded.get
thirdFavSelectionId = orderedRunners.drop(2).head.selectionId
fourthFavOdds = orderedRunners.drop(3).head.lastPriceTraded.get
fourthFavSelectionId = orderedRunners.drop(3).head.selectionId
}
println(new time.DateTime(DateTimeZone.UTC) + " - OPEN PRE RACE - "
+ marketId + " - market delay - " + marketBook.betDelay)
} else {
preRace = false
}
}
case Success(None) =>
println("error no result returned")
case Failure(error) =>
println("error " + error)
}
Thread.sleep(150)
}
if (favOdds.equals(secondFavOdds) || secondFavOdds.equals(thirdFavOdds) || thirdFavOdds.equals(fourthFavOdds)) {
println(new time.DateTime(DateTimeZone.UTC) + " - " + marketId + " - no bets placed - same odds")
} else {
var inPlay = true
var suspended = true
// monitor market until no longer suspended
while (suspended) {
betfairServiceNG.listMarketBook(sessionToken, marketIds = Set(marketId)
) onComplete {
case Success(Some(listMarketBookContainer)) =>
for (marketBook <- listMarketBookContainer.result) {
if (marketBook.status.equals("OPEN")) {
suspended = false
println(new time.DateTime(DateTimeZone.UTC) + " - OPEN - "
+ marketId + " - market delay - " + marketBook.betDelay)
if (marketBook.betDelay.equals(0)) {
println(new time.DateTime(DateTimeZone.UTC) + " - NON RUNNER - "
+ marketId + " - market delay - " + marketBook.betDelay)
inPlay = false
}
} else {
println(new time.DateTime(DateTimeZone.UTC) + " - SUSPENDED - "
+ marketId + " - market delay - " + marketBook.betDelay)
}
}
case Success(None) =>
println("error no result returned")
case Failure(error) =>
println("error " + error)
}
Thread.sleep(75)
}
Thread.sleep(500)
println((new time.DateTime(DateTimeZone.UTC)) + " - monitoring in play - "
+ marketId + " - " + favOdds + " - " + secondFavOdds + " - " + thirdFavOdds
)
var oddsBackFavBetPlaced = false
var oddsBackSecondFavBetPlaced = false
var oddsBackThirdFavBetPlaced = false
while (inPlay) {
val priceBoundRunners = betfairServiceNG.getPriceBoundRunners(sessionToken, marketId = marketId,
lowerPrice = 1.00, higherPrice = 8.5
) map { response =>
response match {
case Some(runners) =>
// println((new time.DateTime(DateTimeZone.UTC)) + " - monitoring prices - " + marketId)
if (runners.isEmpty) {
inPlay = false
} else {
runners foreach { runner =>
if (runner.selectionId == favSelectionId) {
for (odd <- oddsBackFav) {
if ((!oddsBackFavBetPlaced) && (favOdds >= odd._1) && (favOdds <= odd._2)
&& (secondFavOdds >= odd._3) && (secondFavOdds <= odd._4)
&& (thirdFavOdds >= odd._5) && (thirdFavOdds <= odd._6)
) {
val inPlayFavOdds = BigDecimal(((favOdds - 1.0) * odd._7) + 1.0).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
val inPlayFavOddsFormatted = PriceFormat.round(Direction.Up, inPlayFavOdds)
val inPlayFavOddsBet = BigDecimal(((favOdds - 1.0) * (odd._7 - 0.05)) + 1.0).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
val inPlayFavOddsFormattedBet = PriceFormat.round(Direction.Up, inPlayFavOddsBet)
if (runner.lastPriceTraded.get <= inPlayFavOddsFormatted) {
val placeInstructionsFav = Set(
PlaceInstruction(
selectionId = favSelectionId,
side = Side.BACK,
limitOrder = Some(LimitOrder(size = 2.50,
price = inPlayFavOddsFormattedBet,
persistenceType = PersistenceType.PERSIST))))
betfairServiceNG.placeOrders(sessionToken,
marketId = marketId,
instructions = placeInstructionsFav) onComplete {
case Success(Some(placeExecutionReportContainer)) =>
println(new time.DateTime(DateTimeZone.UTC) + " - " + marketId + " - " + runner.selectionId + " - bet fav win in play")
case _ =>
println("error no result returned")
}
oddsBackFavBetPlaced = true
}
}
}
} else if (runner.selectionId == secondFavSelectionId) {
for (odd <- oddsBackSecondFav) {
if ((!oddsBackSecondFavBetPlaced) && (favOdds >= odd._1) && (favOdds <= odd._2)
&& (secondFavOdds >= odd._3) && (secondFavOdds <= odd._4)
&& (thirdFavOdds >= odd._5) && (thirdFavOdds <= odd._6)
) {
val inPlayFavOdds = BigDecimal(((secondFavOdds - 1.0) * odd._7) + 1.0).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
val inPlayFavOddsFormatted = PriceFormat.round(Direction.Up, inPlayFavOdds)
val inPlayFavOddsBet = BigDecimal(((secondFavOdds - 1.0) * (odd._7 - 0.05)) + 1.0).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
val inPlayFavOddsFormattedBet = PriceFormat.round(Direction.Up, inPlayFavOddsBet)
if (runner.lastPriceTraded.get <= inPlayFavOddsFormatted) {
val placeInstructionsFav = Set(
PlaceInstruction(
selectionId = secondFavSelectionId,
side = Side.BACK,
limitOrder = Some(LimitOrder(size = 2.25,
price = inPlayFavOddsFormattedBet,
persistenceType = PersistenceType.PERSIST))))
betfairServiceNG.placeOrders(sessionToken,
marketId = marketId,
instructions = placeInstructionsFav) onComplete {
case Success(Some(placeExecutionReportContainer)) =>
println(new time.DateTime(DateTimeZone.UTC) + " - " + marketId + " - " + runner.selectionId + " - bet second fav win in play")
case _ =>
println("error no result returned")
}
oddsBackSecondFavBetPlaced = true
}
}
}
} else if (runner.selectionId == thirdFavSelectionId) {
for (odd <- oddsBackThirdFav) {
if ((!oddsBackThirdFavBetPlaced) && (favOdds >= odd._1) && (favOdds <= odd._2)
&& (secondFavOdds >= odd._3) && (secondFavOdds <= odd._4)
&& (thirdFavOdds >= odd._5) && (thirdFavOdds <= odd._6)
) {
val inPlayFavOdds = BigDecimal(((thirdFavOdds - 1.0) * odd._7) + 1.0).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
val inPlayFavOddsFormatted = PriceFormat.round(Direction.Up, inPlayFavOdds)
val inPlayFavOddsBet = BigDecimal(((thirdFavOdds - 1.0) * (odd._7 - 0.05)) + 1.0).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
val inPlayFavOddsFormattedBet = PriceFormat.round(Direction.Up, inPlayFavOddsBet)
if (runner.lastPriceTraded.get <= inPlayFavOddsFormatted) {
val placeInstructionsFav = Set(
PlaceInstruction(
selectionId = thirdFavSelectionId,
side = Side.BACK,
limitOrder = Some(LimitOrder(size = 2.00,
price = inPlayFavOddsFormattedBet,
persistenceType = PersistenceType.PERSIST))))
betfairServiceNG.placeOrders(sessionToken,
marketId = marketId,
instructions = placeInstructionsFav) onComplete {
case Success(Some(placeExecutionReportContainer)) =>
println(new time.DateTime(DateTimeZone.UTC) + " - " + marketId + " - " + runner.selectionId + " - bet third fav win in play")
case _ =>
println("error no result returned")
}
oddsBackThirdFavBetPlaced = true
}
}
}
}
}
}
case _ =>
println("error no result returned")
inPlay = false
}
}
Await.result(priceBoundRunners, 10 seconds)
Thread.sleep(120)
}
}
println((new time.DateTime(DateTimeZone.UTC)) + " - monitoring fav, second and third fav in play ending - " + marketId)
}
}
def decrementPrice(price: Double): Double = {
var newPrice: Double = 0.0
if (price <= 2.0) {
newPrice = price - 0.01
}
else if (price <= 3.0) {
newPrice = price - 0.02
}
else if (price <= 4.0) {
newPrice = price - 0.05
}
else if (price <= 6.0) {
newPrice = price - 0.1
}
else if (price <= 10.0) {
newPrice = price - 0.2
}
else if (price <= 20.0) {
newPrice = price - 0.5
}
else if (price <= 30.0) {
newPrice = price - 1.0
}
else if (price <= 50.0) {
newPrice = price - 2.0
}
else if (price <= 100.0) {
newPrice = price - 5.0
}
else {
newPrice = price - 10.0
}
BigDecimal(newPrice).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
}
def incrementPrice(price: Double): Double = {
var newPrice: Double = 0.0
if (price >= 100.0) {
newPrice = price + 10.0
}
else if (price >= 50.0) {
newPrice = price + 5.0
}
else if (price >= 30.0) {
newPrice = price + 2.0
}
else if (price >= 20.0) {
newPrice = price + 1.0
}
else if (price >= 10.0) {
newPrice = price + 0.5
}
else if (price >= 6.0) {
newPrice = price + 0.2
}
else if (price >= 4.0) {
newPrice = price + 0.1
}
else if (price >= 3.0) {
newPrice = price + 0.05
}
else if (price >= 2.0) {
newPrice = price + 0.02
}
else {
newPrice = price + 0.01
}
BigDecimal(newPrice).setScale(2, BigDecimal.RoundingMode.HALF_UP).toDouble
}
} | city81/betfair-service-ng | src/main/scala/com/betfair/robots/racing/MonitorInPlayWinScalp.scala | Scala | bsd-2-clause | 17,276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.